2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_hdcp.h>
41 #include <drm/drm_probe_helper.h>
42 #include <drm/i915_drm.h>
45 #include "intel_audio.h"
46 #include "intel_connector.h"
47 #include "intel_ddi.h"
48 #include "intel_drv.h"
49 #include "intel_lspcon.h"
50 #include "intel_psr.h"
52 #define DP_DPRX_ESI_LEN 14
54 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
55 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
56 #define DP_DSC_MIN_SUPPORTED_BPC 8
57 #define DP_DSC_MAX_SUPPORTED_BPC 10
59 /* DP DSC throughput values used for slice count calculations KPixels/s */
60 #define DP_DSC_PEAK_PIXEL_RATE 2720000
61 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
62 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
64 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
65 #define DP_DSC_FEC_OVERHEAD_FACTOR 976
67 /* Compliance test status bits */
68 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
69 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
70 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
71 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
78 static const struct dp_link_dpll g4x_dpll[] = {
80 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
82 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
85 static const struct dp_link_dpll pch_dpll[] = {
87 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
89 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
92 static const struct dp_link_dpll vlv_dpll[] = {
94 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
96 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
100 * CHV supports eDP 1.4 that have more link rates.
101 * Below only provides the fixed rate but exclude variable rate.
103 static const struct dp_link_dpll chv_dpll[] = {
105 * CHV requires to program fractional division for m2.
106 * m2 is stored in fixed point format using formula below
107 * (m2_int << 22) | m2_fraction
109 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
110 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
111 { 270000, /* m2_int = 27, m2_fraction = 0 */
112 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
115 /* Constants for DP DSC configurations */
116 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
118 /* With Single pipe configuration, HW is capable of supporting maximum
119 * of 4 slices per line.
121 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
124 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
125 * @intel_dp: DP struct
127 * If a CPU or PCH DP output is attached to an eDP panel, this function
128 * will return true, and false otherwise.
130 bool intel_dp_is_edp(struct intel_dp *intel_dp)
132 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
134 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
137 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
139 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
142 static void intel_dp_link_down(struct intel_encoder *encoder,
143 const struct intel_crtc_state *old_crtc_state);
144 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
145 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
146 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
147 const struct intel_crtc_state *crtc_state);
148 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
150 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
152 /* update sink rates from dpcd */
153 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
155 static const int dp_rates[] = {
156 162000, 270000, 540000, 810000
160 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
162 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
163 if (dp_rates[i] > max_rate)
165 intel_dp->sink_rates[i] = dp_rates[i];
168 intel_dp->num_sink_rates = i;
171 /* Get length of rates array potentially limited by max_rate. */
172 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
176 /* Limit results by potentially reduced max rate */
177 for (i = 0; i < len; i++) {
178 if (rates[len - i - 1] <= max_rate)
185 /* Get length of common rates array potentially limited by max_rate. */
186 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
189 return intel_dp_rate_limit_len(intel_dp->common_rates,
190 intel_dp->num_common_rates, max_rate);
193 /* Theoretical max between source and sink */
194 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
196 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
199 static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
201 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
202 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
203 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
206 if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
209 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
210 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
211 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
215 MISSING_CASE(lane_info);
229 /* Theoretical max between source and sink */
230 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
232 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
233 int source_max = intel_dig_port->max_lanes;
234 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
235 int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
237 return min3(source_max, sink_max, fia_max);
240 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
242 return intel_dp->max_link_lane_count;
246 intel_dp_link_required(int pixel_clock, int bpp)
248 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
249 return DIV_ROUND_UP(pixel_clock * bpp, 8);
253 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
255 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
256 * link rate that is generally expressed in Gbps. Since, 8 bits of data
257 * is transmitted every LS_Clk per lane, there is no need to account for
258 * the channel encoding that is done in the PHY layer here.
261 return max_link_clock * max_lanes;
265 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
267 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
268 struct intel_encoder *encoder = &intel_dig_port->base;
269 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
270 int max_dotclk = dev_priv->max_dotclk_freq;
273 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
275 if (type != DP_DS_PORT_TYPE_VGA)
278 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
279 intel_dp->downstream_ports);
281 if (ds_max_dotclk != 0)
282 max_dotclk = min(max_dotclk, ds_max_dotclk);
287 static int cnl_max_source_rate(struct intel_dp *intel_dp)
289 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
290 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
291 enum port port = dig_port->base.port;
293 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
295 /* Low voltage SKUs are limited to max of 5.4G */
296 if (voltage == VOLTAGE_INFO_0_85V)
299 /* For this SKU 8.1G is supported in all ports */
300 if (IS_CNL_WITH_PORT_F(dev_priv))
303 /* For other SKUs, max rate on ports A and D is 5.4G */
304 if (port == PORT_A || port == PORT_D)
310 static int icl_max_source_rate(struct intel_dp *intel_dp)
312 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
313 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
314 enum port port = dig_port->base.port;
316 if (intel_port_is_combophy(dev_priv, port) &&
317 !intel_dp_is_edp(intel_dp))
324 intel_dp_set_source_rates(struct intel_dp *intel_dp)
326 /* The values must be in increasing order */
327 static const int cnl_rates[] = {
328 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
330 static const int bxt_rates[] = {
331 162000, 216000, 243000, 270000, 324000, 432000, 540000
333 static const int skl_rates[] = {
334 162000, 216000, 270000, 324000, 432000, 540000
336 static const int hsw_rates[] = {
337 162000, 270000, 540000
339 static const int g4x_rates[] = {
342 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
343 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
344 const struct ddi_vbt_port_info *info =
345 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
346 const int *source_rates;
347 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
349 /* This should only be done once */
350 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
352 if (INTEL_GEN(dev_priv) >= 10) {
353 source_rates = cnl_rates;
354 size = ARRAY_SIZE(cnl_rates);
355 if (IS_GEN(dev_priv, 10))
356 max_rate = cnl_max_source_rate(intel_dp);
358 max_rate = icl_max_source_rate(intel_dp);
359 } else if (IS_GEN9_LP(dev_priv)) {
360 source_rates = bxt_rates;
361 size = ARRAY_SIZE(bxt_rates);
362 } else if (IS_GEN9_BC(dev_priv)) {
363 source_rates = skl_rates;
364 size = ARRAY_SIZE(skl_rates);
365 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
366 IS_BROADWELL(dev_priv)) {
367 source_rates = hsw_rates;
368 size = ARRAY_SIZE(hsw_rates);
370 source_rates = g4x_rates;
371 size = ARRAY_SIZE(g4x_rates);
374 if (max_rate && vbt_max_rate)
375 max_rate = min(max_rate, vbt_max_rate);
376 else if (vbt_max_rate)
377 max_rate = vbt_max_rate;
380 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
382 intel_dp->source_rates = source_rates;
383 intel_dp->num_source_rates = size;
386 static int intersect_rates(const int *source_rates, int source_len,
387 const int *sink_rates, int sink_len,
390 int i = 0, j = 0, k = 0;
392 while (i < source_len && j < sink_len) {
393 if (source_rates[i] == sink_rates[j]) {
394 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
396 common_rates[k] = source_rates[i];
400 } else if (source_rates[i] < sink_rates[j]) {
409 /* return index of rate in rates array, or -1 if not found */
410 static int intel_dp_rate_index(const int *rates, int len, int rate)
414 for (i = 0; i < len; i++)
415 if (rate == rates[i])
421 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
423 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
425 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
426 intel_dp->num_source_rates,
427 intel_dp->sink_rates,
428 intel_dp->num_sink_rates,
429 intel_dp->common_rates);
431 /* Paranoia, there should always be something in common. */
432 if (WARN_ON(intel_dp->num_common_rates == 0)) {
433 intel_dp->common_rates[0] = 162000;
434 intel_dp->num_common_rates = 1;
438 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
442 * FIXME: we need to synchronize the current link parameters with
443 * hardware readout. Currently fast link training doesn't work on
446 if (link_rate == 0 ||
447 link_rate > intel_dp->max_link_rate)
450 if (lane_count == 0 ||
451 lane_count > intel_dp_max_lane_count(intel_dp))
457 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
461 const struct drm_display_mode *fixed_mode =
462 intel_dp->attached_connector->panel.fixed_mode;
463 int mode_rate, max_rate;
465 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
466 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
467 if (mode_rate > max_rate)
473 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
474 int link_rate, u8 lane_count)
478 index = intel_dp_rate_index(intel_dp->common_rates,
479 intel_dp->num_common_rates,
482 if (intel_dp_is_edp(intel_dp) &&
483 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
484 intel_dp->common_rates[index - 1],
486 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
489 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
490 intel_dp->max_link_lane_count = lane_count;
491 } else if (lane_count > 1) {
492 if (intel_dp_is_edp(intel_dp) &&
493 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
494 intel_dp_max_common_rate(intel_dp),
496 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
499 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
500 intel_dp->max_link_lane_count = lane_count >> 1;
502 DRM_ERROR("Link Training Unsuccessful\n");
509 static enum drm_mode_status
510 intel_dp_mode_valid(struct drm_connector *connector,
511 struct drm_display_mode *mode)
513 struct intel_dp *intel_dp = intel_attached_dp(connector);
514 struct intel_connector *intel_connector = to_intel_connector(connector);
515 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
516 struct drm_i915_private *dev_priv = to_i915(connector->dev);
517 int target_clock = mode->clock;
518 int max_rate, mode_rate, max_lanes, max_link_clock;
520 u16 dsc_max_output_bpp = 0;
521 u8 dsc_slice_count = 0;
523 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
524 return MODE_NO_DBLESCAN;
526 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
528 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
529 if (mode->hdisplay > fixed_mode->hdisplay)
532 if (mode->vdisplay > fixed_mode->vdisplay)
535 target_clock = fixed_mode->clock;
538 max_link_clock = intel_dp_max_link_rate(intel_dp);
539 max_lanes = intel_dp_max_lane_count(intel_dp);
541 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
542 mode_rate = intel_dp_link_required(target_clock, 18);
545 * Output bpp is stored in 6.4 format so right shift by 4 to get the
546 * integer value since we support only integer values of bpp.
548 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
549 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
550 if (intel_dp_is_edp(intel_dp)) {
552 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
554 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
556 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
558 intel_dp_dsc_get_output_bpp(max_link_clock,
561 mode->hdisplay) >> 4;
563 intel_dp_dsc_get_slice_count(intel_dp,
569 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
570 target_clock > max_dotclk)
571 return MODE_CLOCK_HIGH;
573 if (mode->clock < 10000)
574 return MODE_CLOCK_LOW;
576 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
577 return MODE_H_ILLEGAL;
582 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
589 for (i = 0; i < src_bytes; i++)
590 v |= ((u32)src[i]) << ((3 - i) * 8);
594 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
599 for (i = 0; i < dst_bytes; i++)
600 dst[i] = src >> ((3-i) * 8);
604 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
606 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
607 bool force_disable_vdd);
609 intel_dp_pps_init(struct intel_dp *intel_dp);
611 static intel_wakeref_t
612 pps_lock(struct intel_dp *intel_dp)
614 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
615 intel_wakeref_t wakeref;
618 * See intel_power_sequencer_reset() why we need
619 * a power domain reference here.
621 wakeref = intel_display_power_get(dev_priv,
622 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
624 mutex_lock(&dev_priv->pps_mutex);
629 static intel_wakeref_t
630 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
632 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
634 mutex_unlock(&dev_priv->pps_mutex);
635 intel_display_power_put(dev_priv,
636 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
641 #define with_pps_lock(dp, wf) \
642 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
645 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
647 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
649 enum pipe pipe = intel_dp->pps_pipe;
650 bool pll_enabled, release_cl_override = false;
651 enum dpio_phy phy = DPIO_PHY(pipe);
652 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
655 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
656 "skipping pipe %c power sequencer kick due to port %c being active\n",
657 pipe_name(pipe), port_name(intel_dig_port->base.port)))
660 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
661 pipe_name(pipe), port_name(intel_dig_port->base.port));
663 /* Preserve the BIOS-computed detected bit. This is
664 * supposed to be read-only.
666 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
667 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
668 DP |= DP_PORT_WIDTH(1);
669 DP |= DP_LINK_TRAIN_PAT_1;
671 if (IS_CHERRYVIEW(dev_priv))
672 DP |= DP_PIPE_SEL_CHV(pipe);
674 DP |= DP_PIPE_SEL(pipe);
676 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
679 * The DPLL for the pipe must be enabled for this to work.
680 * So enable temporarily it if it's not already enabled.
683 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
684 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
686 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
687 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
688 DRM_ERROR("Failed to force on pll for pipe %c!\n",
695 * Similar magic as in intel_dp_enable_port().
696 * We _must_ do this port enable + disable trick
697 * to make this power sequencer lock onto the port.
698 * Otherwise even VDD force bit won't work.
700 I915_WRITE(intel_dp->output_reg, DP);
701 POSTING_READ(intel_dp->output_reg);
703 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
704 POSTING_READ(intel_dp->output_reg);
706 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
707 POSTING_READ(intel_dp->output_reg);
710 vlv_force_pll_off(dev_priv, pipe);
712 if (release_cl_override)
713 chv_phy_powergate_ch(dev_priv, phy, ch, false);
717 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
719 struct intel_encoder *encoder;
720 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
723 * We don't have power sequencer currently.
724 * Pick one that's not used by other ports.
726 for_each_intel_dp(&dev_priv->drm, encoder) {
727 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
729 if (encoder->type == INTEL_OUTPUT_EDP) {
730 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
731 intel_dp->active_pipe != intel_dp->pps_pipe);
733 if (intel_dp->pps_pipe != INVALID_PIPE)
734 pipes &= ~(1 << intel_dp->pps_pipe);
736 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
738 if (intel_dp->active_pipe != INVALID_PIPE)
739 pipes &= ~(1 << intel_dp->active_pipe);
746 return ffs(pipes) - 1;
750 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 lockdep_assert_held(&dev_priv->pps_mutex);
758 /* We should never land here with regular DP ports */
759 WARN_ON(!intel_dp_is_edp(intel_dp));
761 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
762 intel_dp->active_pipe != intel_dp->pps_pipe);
764 if (intel_dp->pps_pipe != INVALID_PIPE)
765 return intel_dp->pps_pipe;
767 pipe = vlv_find_free_pps(dev_priv);
770 * Didn't find one. This should not happen since there
771 * are two power sequencers and up to two eDP ports.
773 if (WARN_ON(pipe == INVALID_PIPE))
776 vlv_steal_power_sequencer(dev_priv, pipe);
777 intel_dp->pps_pipe = pipe;
779 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
780 pipe_name(intel_dp->pps_pipe),
781 port_name(intel_dig_port->base.port));
783 /* init power sequencer on this pipe and port */
784 intel_dp_init_panel_power_sequencer(intel_dp);
785 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
788 * Even vdd force doesn't work until we've made
789 * the power sequencer lock in on the port.
791 vlv_power_sequencer_kick(intel_dp);
793 return intel_dp->pps_pipe;
797 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
799 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
800 int backlight_controller = dev_priv->vbt.backlight.controller;
802 lockdep_assert_held(&dev_priv->pps_mutex);
804 /* We should never land here with regular DP ports */
805 WARN_ON(!intel_dp_is_edp(intel_dp));
807 if (!intel_dp->pps_reset)
808 return backlight_controller;
810 intel_dp->pps_reset = false;
813 * Only the HW needs to be reprogrammed, the SW state is fixed and
814 * has been setup during connector init.
816 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
818 return backlight_controller;
821 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
824 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
827 return I915_READ(PP_STATUS(pipe)) & PP_ON;
830 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
833 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
836 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
843 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
845 vlv_pipe_check pipe_check)
849 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
850 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
851 PANEL_PORT_SELECT_MASK;
853 if (port_sel != PANEL_PORT_SELECT_VLV(port))
856 if (!pipe_check(dev_priv, pipe))
866 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
868 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
869 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
870 enum port port = intel_dig_port->base.port;
872 lockdep_assert_held(&dev_priv->pps_mutex);
874 /* try to find a pipe with this port selected */
875 /* first pick one where the panel is on */
876 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
878 /* didn't find one? pick one where vdd is on */
879 if (intel_dp->pps_pipe == INVALID_PIPE)
880 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
881 vlv_pipe_has_vdd_on);
882 /* didn't find one? pick one with just the correct port */
883 if (intel_dp->pps_pipe == INVALID_PIPE)
884 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
887 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
888 if (intel_dp->pps_pipe == INVALID_PIPE) {
889 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
894 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
895 port_name(port), pipe_name(intel_dp->pps_pipe));
897 intel_dp_init_panel_power_sequencer(intel_dp);
898 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
901 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
903 struct intel_encoder *encoder;
905 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
906 !IS_GEN9_LP(dev_priv)))
910 * We can't grab pps_mutex here due to deadlock with power_domain
911 * mutex when power_domain functions are called while holding pps_mutex.
912 * That also means that in order to use pps_pipe the code needs to
913 * hold both a power domain reference and pps_mutex, and the power domain
914 * reference get/put must be done while _not_ holding pps_mutex.
915 * pps_{lock,unlock}() do these steps in the correct order, so one
916 * should use them always.
919 for_each_intel_dp(&dev_priv->drm, encoder) {
920 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
922 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
924 if (encoder->type != INTEL_OUTPUT_EDP)
927 if (IS_GEN9_LP(dev_priv))
928 intel_dp->pps_reset = true;
930 intel_dp->pps_pipe = INVALID_PIPE;
934 struct pps_registers {
942 static void intel_pps_get_registers(struct intel_dp *intel_dp,
943 struct pps_registers *regs)
945 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
948 memset(regs, 0, sizeof(*regs));
950 if (IS_GEN9_LP(dev_priv))
951 pps_idx = bxt_power_sequencer_idx(intel_dp);
952 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
953 pps_idx = vlv_power_sequencer_pipe(intel_dp);
955 regs->pp_ctrl = PP_CONTROL(pps_idx);
956 regs->pp_stat = PP_STATUS(pps_idx);
957 regs->pp_on = PP_ON_DELAYS(pps_idx);
958 regs->pp_off = PP_OFF_DELAYS(pps_idx);
960 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
961 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
962 regs->pp_div = INVALID_MMIO_REG;
964 regs->pp_div = PP_DIVISOR(pps_idx);
968 _pp_ctrl_reg(struct intel_dp *intel_dp)
970 struct pps_registers regs;
972 intel_pps_get_registers(intel_dp, ®s);
978 _pp_stat_reg(struct intel_dp *intel_dp)
980 struct pps_registers regs;
982 intel_pps_get_registers(intel_dp, ®s);
987 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
988 This function only applicable when panel PM state is not to be tracked */
989 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
992 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
994 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
995 intel_wakeref_t wakeref;
997 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1000 with_pps_lock(intel_dp, wakeref) {
1001 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1002 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1003 i915_reg_t pp_ctrl_reg, pp_div_reg;
1006 pp_ctrl_reg = PP_CONTROL(pipe);
1007 pp_div_reg = PP_DIVISOR(pipe);
1008 pp_div = I915_READ(pp_div_reg);
1009 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1011 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1012 I915_WRITE(pp_div_reg, pp_div | 0x1F);
1013 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1014 msleep(intel_dp->panel_power_cycle_delay);
1021 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1023 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1025 lockdep_assert_held(&dev_priv->pps_mutex);
1027 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1028 intel_dp->pps_pipe == INVALID_PIPE)
1031 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1034 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1036 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1038 lockdep_assert_held(&dev_priv->pps_mutex);
1040 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1041 intel_dp->pps_pipe == INVALID_PIPE)
1044 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1048 intel_dp_check_edp(struct intel_dp *intel_dp)
1050 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1052 if (!intel_dp_is_edp(intel_dp))
1055 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1056 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1057 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1058 I915_READ(_pp_stat_reg(intel_dp)),
1059 I915_READ(_pp_ctrl_reg(intel_dp)));
1064 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1066 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1067 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1071 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1072 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
1073 msecs_to_jiffies_timeout(10));
1075 /* just trace the final value */
1076 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1079 DRM_ERROR("dp aux hw did not signal timeout!\n");
1085 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1087 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1093 * The clock divider is based off the hrawclk, and would like to run at
1094 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1096 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1099 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1101 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1102 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1108 * The clock divider is based off the cdclk or PCH rawclk, and would
1109 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1110 * divide by 2000 and use that
1112 if (dig_port->aux_ch == AUX_CH_A)
1113 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1115 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1118 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1120 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1121 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1123 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1124 /* Workaround for non-ULT HSW */
1132 return ilk_get_aux_clock_divider(intel_dp, index);
1135 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1138 * SKL doesn't need us to program the AUX clock divider (Hardware will
1139 * derive the clock from CDCLK automatically). We still implement the
1140 * get_aux_clock_divider vfunc to plug-in into the existing code.
1142 return index ? 0 : 1;
1145 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1147 u32 aux_clock_divider)
1149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1150 struct drm_i915_private *dev_priv =
1151 to_i915(intel_dig_port->base.base.dev);
1152 u32 precharge, timeout;
1154 if (IS_GEN(dev_priv, 6))
1159 if (IS_BROADWELL(dev_priv))
1160 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1162 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1164 return DP_AUX_CH_CTL_SEND_BUSY |
1165 DP_AUX_CH_CTL_DONE |
1166 DP_AUX_CH_CTL_INTERRUPT |
1167 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1169 DP_AUX_CH_CTL_RECEIVE_ERROR |
1170 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1171 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1172 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1175 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1179 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1182 ret = DP_AUX_CH_CTL_SEND_BUSY |
1183 DP_AUX_CH_CTL_DONE |
1184 DP_AUX_CH_CTL_INTERRUPT |
1185 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1186 DP_AUX_CH_CTL_TIME_OUT_MAX |
1187 DP_AUX_CH_CTL_RECEIVE_ERROR |
1188 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1189 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1190 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1192 if (intel_dig_port->tc_type == TC_PORT_TBT)
1193 ret |= DP_AUX_CH_CTL_TBT_IO;
1199 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1200 const u8 *send, int send_bytes,
1201 u8 *recv, int recv_size,
1202 u32 aux_send_ctl_flags)
1204 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1205 struct drm_i915_private *dev_priv =
1206 to_i915(intel_dig_port->base.base.dev);
1207 i915_reg_t ch_ctl, ch_data[5];
1208 u32 aux_clock_divider;
1209 intel_wakeref_t wakeref;
1210 int i, ret, recv_bytes;
1215 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1216 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1217 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1219 wakeref = pps_lock(intel_dp);
1222 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1223 * In such cases we want to leave VDD enabled and it's up to upper layers
1224 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1227 vdd = edp_panel_vdd_on(intel_dp);
1229 /* dp aux is extremely sensitive to irq latency, hence request the
1230 * lowest possible wakeup latency and so prevent the cpu from going into
1231 * deep sleep states.
1233 pm_qos_update_request(&dev_priv->pm_qos, 0);
1235 intel_dp_check_edp(intel_dp);
1237 /* Try to wait for any previous AUX channel activity */
1238 for (try = 0; try < 3; try++) {
1239 status = I915_READ_NOTRACE(ch_ctl);
1240 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1244 /* just trace the final value */
1245 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1248 static u32 last_status = -1;
1249 const u32 status = I915_READ(ch_ctl);
1251 if (status != last_status) {
1252 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1254 last_status = status;
1261 /* Only 5 data registers! */
1262 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1267 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1268 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1272 send_ctl |= aux_send_ctl_flags;
1274 /* Must try at least 3 times according to DP spec */
1275 for (try = 0; try < 5; try++) {
1276 /* Load the send data into the aux channel data registers */
1277 for (i = 0; i < send_bytes; i += 4)
1278 I915_WRITE(ch_data[i >> 2],
1279 intel_dp_pack_aux(send + i,
1282 /* Send the command and wait for it to complete */
1283 I915_WRITE(ch_ctl, send_ctl);
1285 status = intel_dp_aux_wait_done(intel_dp);
1287 /* Clear done status and any errors */
1290 DP_AUX_CH_CTL_DONE |
1291 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1292 DP_AUX_CH_CTL_RECEIVE_ERROR);
1294 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1295 * 400us delay required for errors and timeouts
1296 * Timeout errors from the HW already meet this
1297 * requirement so skip to next iteration
1299 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1302 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1303 usleep_range(400, 500);
1306 if (status & DP_AUX_CH_CTL_DONE)
1311 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1312 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1318 /* Check for timeout or receive error.
1319 * Timeouts occur when the sink is not connected
1321 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1322 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1327 /* Timeouts occur when the device isn't connected, so they're
1328 * "normal" -- don't fill the kernel log with these */
1329 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1330 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1335 /* Unload any bytes sent back from the other side */
1336 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1337 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1340 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1341 * We have no idea of what happened so we return -EBUSY so
1342 * drm layer takes care for the necessary retries.
1344 if (recv_bytes == 0 || recv_bytes > 20) {
1345 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1351 if (recv_bytes > recv_size)
1352 recv_bytes = recv_size;
1354 for (i = 0; i < recv_bytes; i += 4)
1355 intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
1356 recv + i, recv_bytes - i);
1360 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1363 edp_panel_vdd_off(intel_dp, false);
1365 pps_unlock(intel_dp, wakeref);
1370 #define BARE_ADDRESS_SIZE 3
1371 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1374 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1375 const struct drm_dp_aux_msg *msg)
1377 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1378 txbuf[1] = (msg->address >> 8) & 0xff;
1379 txbuf[2] = msg->address & 0xff;
1380 txbuf[3] = msg->size - 1;
1384 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1386 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1387 u8 txbuf[20], rxbuf[20];
1388 size_t txsize, rxsize;
1391 intel_dp_aux_header(txbuf, msg);
1393 switch (msg->request & ~DP_AUX_I2C_MOT) {
1394 case DP_AUX_NATIVE_WRITE:
1395 case DP_AUX_I2C_WRITE:
1396 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1397 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1398 rxsize = 2; /* 0 or 1 data bytes */
1400 if (WARN_ON(txsize > 20))
1403 WARN_ON(!msg->buffer != !msg->size);
1406 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1408 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1411 msg->reply = rxbuf[0] >> 4;
1414 /* Number of bytes written in a short write. */
1415 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1417 /* Return payload size. */
1423 case DP_AUX_NATIVE_READ:
1424 case DP_AUX_I2C_READ:
1425 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1426 rxsize = msg->size + 1;
1428 if (WARN_ON(rxsize > 20))
1431 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1434 msg->reply = rxbuf[0] >> 4;
1436 * Assume happy day, and copy the data. The caller is
1437 * expected to check msg->reply before touching it.
1439 * Return payload size.
1442 memcpy(msg->buffer, rxbuf + 1, ret);
1455 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1457 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1458 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1459 enum aux_ch aux_ch = dig_port->aux_ch;
1465 return DP_AUX_CH_CTL(aux_ch);
1467 MISSING_CASE(aux_ch);
1468 return DP_AUX_CH_CTL(AUX_CH_B);
1472 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1474 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1475 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1476 enum aux_ch aux_ch = dig_port->aux_ch;
1482 return DP_AUX_CH_DATA(aux_ch, index);
1484 MISSING_CASE(aux_ch);
1485 return DP_AUX_CH_DATA(AUX_CH_B, index);
1489 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1491 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1492 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1493 enum aux_ch aux_ch = dig_port->aux_ch;
1497 return DP_AUX_CH_CTL(aux_ch);
1501 return PCH_DP_AUX_CH_CTL(aux_ch);
1503 MISSING_CASE(aux_ch);
1504 return DP_AUX_CH_CTL(AUX_CH_A);
1508 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1510 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1511 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1512 enum aux_ch aux_ch = dig_port->aux_ch;
1516 return DP_AUX_CH_DATA(aux_ch, index);
1520 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1522 MISSING_CASE(aux_ch);
1523 return DP_AUX_CH_DATA(AUX_CH_A, index);
1527 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1529 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1530 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1531 enum aux_ch aux_ch = dig_port->aux_ch;
1540 return DP_AUX_CH_CTL(aux_ch);
1542 MISSING_CASE(aux_ch);
1543 return DP_AUX_CH_CTL(AUX_CH_A);
1547 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1549 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1550 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1551 enum aux_ch aux_ch = dig_port->aux_ch;
1560 return DP_AUX_CH_DATA(aux_ch, index);
1562 MISSING_CASE(aux_ch);
1563 return DP_AUX_CH_DATA(AUX_CH_A, index);
1568 intel_dp_aux_fini(struct intel_dp *intel_dp)
1570 kfree(intel_dp->aux.name);
1574 intel_dp_aux_init(struct intel_dp *intel_dp)
1576 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1577 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1578 struct intel_encoder *encoder = &dig_port->base;
1580 if (INTEL_GEN(dev_priv) >= 9) {
1581 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1582 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1583 } else if (HAS_PCH_SPLIT(dev_priv)) {
1584 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1585 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1587 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1588 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1591 if (INTEL_GEN(dev_priv) >= 9)
1592 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1593 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1594 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1595 else if (HAS_PCH_SPLIT(dev_priv))
1596 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1598 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1600 if (INTEL_GEN(dev_priv) >= 9)
1601 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1603 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1605 drm_dp_aux_init(&intel_dp->aux);
1607 /* Failure to allocate our preferred name is not critical */
1608 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1609 port_name(encoder->port));
1610 intel_dp->aux.transfer = intel_dp_aux_transfer;
1613 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1615 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1617 return max_rate >= 540000;
1620 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1622 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1624 return max_rate >= 810000;
1628 intel_dp_set_clock(struct intel_encoder *encoder,
1629 struct intel_crtc_state *pipe_config)
1631 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1632 const struct dp_link_dpll *divisor = NULL;
1635 if (IS_G4X(dev_priv)) {
1637 count = ARRAY_SIZE(g4x_dpll);
1638 } else if (HAS_PCH_SPLIT(dev_priv)) {
1640 count = ARRAY_SIZE(pch_dpll);
1641 } else if (IS_CHERRYVIEW(dev_priv)) {
1643 count = ARRAY_SIZE(chv_dpll);
1644 } else if (IS_VALLEYVIEW(dev_priv)) {
1646 count = ARRAY_SIZE(vlv_dpll);
1649 if (divisor && count) {
1650 for (i = 0; i < count; i++) {
1651 if (pipe_config->port_clock == divisor[i].clock) {
1652 pipe_config->dpll = divisor[i].dpll;
1653 pipe_config->clock_set = true;
1660 static void snprintf_int_array(char *str, size_t len,
1661 const int *array, int nelem)
1667 for (i = 0; i < nelem; i++) {
1668 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1676 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1678 char str[128]; /* FIXME: too big for stack? */
1680 if ((drm_debug & DRM_UT_KMS) == 0)
1683 snprintf_int_array(str, sizeof(str),
1684 intel_dp->source_rates, intel_dp->num_source_rates);
1685 DRM_DEBUG_KMS("source rates: %s\n", str);
1687 snprintf_int_array(str, sizeof(str),
1688 intel_dp->sink_rates, intel_dp->num_sink_rates);
1689 DRM_DEBUG_KMS("sink rates: %s\n", str);
1691 snprintf_int_array(str, sizeof(str),
1692 intel_dp->common_rates, intel_dp->num_common_rates);
1693 DRM_DEBUG_KMS("common rates: %s\n", str);
1697 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1701 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1702 if (WARN_ON(len <= 0))
1705 return intel_dp->common_rates[len - 1];
1708 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1710 int i = intel_dp_rate_index(intel_dp->sink_rates,
1711 intel_dp->num_sink_rates, rate);
1719 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1720 u8 *link_bw, u8 *rate_select)
1722 /* eDP 1.4 rate select method. */
1723 if (intel_dp->use_rate_select) {
1726 intel_dp_rate_select(intel_dp, port_clock);
1728 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1733 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1734 const struct intel_crtc_state *pipe_config)
1736 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1738 return INTEL_GEN(dev_priv) >= 11 &&
1739 pipe_config->cpu_transcoder != TRANSCODER_A;
1742 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1743 const struct intel_crtc_state *pipe_config)
1745 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1746 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1749 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1750 const struct intel_crtc_state *pipe_config)
1752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1754 return INTEL_GEN(dev_priv) >= 10 &&
1755 pipe_config->cpu_transcoder != TRANSCODER_A;
1758 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1759 const struct intel_crtc_state *pipe_config)
1761 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1764 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1765 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1768 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1769 struct intel_crtc_state *pipe_config)
1771 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1772 struct intel_connector *intel_connector = intel_dp->attached_connector;
1775 bpp = pipe_config->pipe_bpp;
1776 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1779 bpp = min(bpp, 3*bpc);
1781 if (intel_dp_is_edp(intel_dp)) {
1782 /* Get bpp from vbt only for panels that dont have bpp in edid */
1783 if (intel_connector->base.display_info.bpc == 0 &&
1784 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1785 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1786 dev_priv->vbt.edp.bpp);
1787 bpp = dev_priv->vbt.edp.bpp;
1794 /* Adjust link config limits based on compliance test requests. */
1796 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1797 struct intel_crtc_state *pipe_config,
1798 struct link_config_limits *limits)
1800 /* For DP Compliance we override the computed bpp for the pipe */
1801 if (intel_dp->compliance.test_data.bpc != 0) {
1802 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1804 limits->min_bpp = limits->max_bpp = bpp;
1805 pipe_config->dither_force_disable = bpp == 6 * 3;
1807 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1810 /* Use values requested by Compliance Test Request */
1811 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1814 /* Validate the compliance test data since max values
1815 * might have changed due to link train fallback.
1817 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1818 intel_dp->compliance.test_lane_count)) {
1819 index = intel_dp_rate_index(intel_dp->common_rates,
1820 intel_dp->num_common_rates,
1821 intel_dp->compliance.test_link_rate);
1823 limits->min_clock = limits->max_clock = index;
1824 limits->min_lane_count = limits->max_lane_count =
1825 intel_dp->compliance.test_lane_count;
1830 /* Optimize link config in order: max bpp, min clock, min lanes */
1832 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1833 struct intel_crtc_state *pipe_config,
1834 const struct link_config_limits *limits)
1836 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1837 int bpp, clock, lane_count;
1838 int mode_rate, link_clock, link_avail;
1840 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1841 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1844 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1845 for (lane_count = limits->min_lane_count;
1846 lane_count <= limits->max_lane_count;
1848 link_clock = intel_dp->common_rates[clock];
1849 link_avail = intel_dp_max_data_rate(link_clock,
1852 if (mode_rate <= link_avail) {
1853 pipe_config->lane_count = lane_count;
1854 pipe_config->pipe_bpp = bpp;
1855 pipe_config->port_clock = link_clock;
1866 /* Optimize link config in order: max bpp, min lanes, min clock */
1868 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1869 struct intel_crtc_state *pipe_config,
1870 const struct link_config_limits *limits)
1872 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1873 int bpp, clock, lane_count;
1874 int mode_rate, link_clock, link_avail;
1876 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1877 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1880 for (lane_count = limits->min_lane_count;
1881 lane_count <= limits->max_lane_count;
1883 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1884 link_clock = intel_dp->common_rates[clock];
1885 link_avail = intel_dp_max_data_rate(link_clock,
1888 if (mode_rate <= link_avail) {
1889 pipe_config->lane_count = lane_count;
1890 pipe_config->pipe_bpp = bpp;
1891 pipe_config->port_clock = link_clock;
1902 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1905 u8 dsc_bpc[3] = {0};
1907 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1909 for (i = 0; i < num_bpc; i++) {
1910 if (dsc_max_bpc >= dsc_bpc[i])
1911 return dsc_bpc[i] * 3;
1917 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1918 struct intel_crtc_state *pipe_config,
1919 struct drm_connector_state *conn_state,
1920 struct link_config_limits *limits)
1922 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1923 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1924 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1929 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1932 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1933 conn_state->max_requested_bpc);
1935 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1936 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1937 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1942 * For now enable DSC for max bpp, max link rate, max lane count.
1943 * Optimize this later for the minimum possible link rate/lane count
1944 * with DSC enabled for the requested mode.
1946 pipe_config->pipe_bpp = pipe_bpp;
1947 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1948 pipe_config->lane_count = limits->max_lane_count;
1950 if (intel_dp_is_edp(intel_dp)) {
1951 pipe_config->dsc_params.compressed_bpp =
1952 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1953 pipe_config->pipe_bpp);
1954 pipe_config->dsc_params.slice_count =
1955 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1958 u16 dsc_max_output_bpp;
1959 u8 dsc_dp_slice_count;
1961 dsc_max_output_bpp =
1962 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1963 pipe_config->lane_count,
1964 adjusted_mode->crtc_clock,
1965 adjusted_mode->crtc_hdisplay);
1966 dsc_dp_slice_count =
1967 intel_dp_dsc_get_slice_count(intel_dp,
1968 adjusted_mode->crtc_clock,
1969 adjusted_mode->crtc_hdisplay);
1970 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1971 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1974 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1975 dsc_max_output_bpp >> 4,
1976 pipe_config->pipe_bpp);
1977 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1980 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1981 * is greater than the maximum Cdclock and if slice count is even
1982 * then we need to use 2 VDSC instances.
1984 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1985 if (pipe_config->dsc_params.slice_count > 1) {
1986 pipe_config->dsc_params.dsc_split = true;
1988 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
1993 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
1995 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
1996 "Compressed BPP = %d\n",
1997 pipe_config->pipe_bpp,
1998 pipe_config->dsc_params.compressed_bpp);
2002 pipe_config->dsc_params.compression_enable = true;
2003 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2004 "Compressed Bpp = %d Slice Count = %d\n",
2005 pipe_config->pipe_bpp,
2006 pipe_config->dsc_params.compressed_bpp,
2007 pipe_config->dsc_params.slice_count);
2013 intel_dp_compute_link_config(struct intel_encoder *encoder,
2014 struct intel_crtc_state *pipe_config,
2015 struct drm_connector_state *conn_state)
2017 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2018 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2019 struct link_config_limits limits;
2023 common_len = intel_dp_common_len_rate_limit(intel_dp,
2024 intel_dp->max_link_rate);
2026 /* No common link rates between source and sink */
2027 WARN_ON(common_len <= 0);
2029 limits.min_clock = 0;
2030 limits.max_clock = common_len - 1;
2032 limits.min_lane_count = 1;
2033 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2035 limits.min_bpp = 6 * 3;
2036 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2038 if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
2040 * Use the maximum clock and number of lanes the eDP panel
2041 * advertizes being capable of. The eDP 1.3 and earlier panels
2042 * are generally designed to support only a single clock and
2043 * lane configuration, and typically these values correspond to
2044 * the native resolution of the panel. With eDP 1.4 rate select
2045 * and DSC, this is decreasingly the case, and we need to be
2046 * able to select less than maximum link config.
2048 limits.min_lane_count = limits.max_lane_count;
2049 limits.min_clock = limits.max_clock;
2052 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2054 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2055 "max rate %d max bpp %d pixel clock %iKHz\n",
2056 limits.max_lane_count,
2057 intel_dp->common_rates[limits.max_clock],
2058 limits.max_bpp, adjusted_mode->crtc_clock);
2060 if (intel_dp_is_edp(intel_dp))
2062 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2063 * section A.1: "It is recommended that the minimum number of
2064 * lanes be used, using the minimum link rate allowed for that
2065 * lane configuration."
2067 * Note that we use the max clock and lane count for eDP 1.3 and
2068 * earlier, and fast vs. wide is irrelevant.
2070 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
2073 /* Optimize for slow and wide. */
2074 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
2077 /* enable compression if the mode doesn't fit available BW */
2078 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2079 if (ret || intel_dp->force_dsc_en) {
2080 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2081 conn_state, &limits);
2086 if (pipe_config->dsc_params.compression_enable) {
2087 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2088 pipe_config->lane_count, pipe_config->port_clock,
2089 pipe_config->pipe_bpp,
2090 pipe_config->dsc_params.compressed_bpp);
2092 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2093 intel_dp_link_required(adjusted_mode->crtc_clock,
2094 pipe_config->dsc_params.compressed_bpp),
2095 intel_dp_max_data_rate(pipe_config->port_clock,
2096 pipe_config->lane_count));
2098 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2099 pipe_config->lane_count, pipe_config->port_clock,
2100 pipe_config->pipe_bpp);
2102 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2103 intel_dp_link_required(adjusted_mode->crtc_clock,
2104 pipe_config->pipe_bpp),
2105 intel_dp_max_data_rate(pipe_config->port_clock,
2106 pipe_config->lane_count));
2111 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2112 const struct drm_connector_state *conn_state)
2114 const struct intel_digital_connector_state *intel_conn_state =
2115 to_intel_digital_connector_state(conn_state);
2116 const struct drm_display_mode *adjusted_mode =
2117 &crtc_state->base.adjusted_mode;
2119 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2122 * CEA-861-E - 5.1 Default Encoding Parameters
2123 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2125 return crtc_state->pipe_bpp != 18 &&
2126 drm_default_rgb_quant_range(adjusted_mode) ==
2127 HDMI_QUANTIZATION_RANGE_LIMITED;
2129 return intel_conn_state->broadcast_rgb ==
2130 INTEL_BROADCAST_RGB_LIMITED;
2135 intel_dp_compute_config(struct intel_encoder *encoder,
2136 struct intel_crtc_state *pipe_config,
2137 struct drm_connector_state *conn_state)
2139 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2140 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2141 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2142 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2143 enum port port = encoder->port;
2144 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2145 struct intel_connector *intel_connector = intel_dp->attached_connector;
2146 struct intel_digital_connector_state *intel_conn_state =
2147 to_intel_digital_connector_state(conn_state);
2148 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2149 DP_DPCD_QUIRK_CONSTANT_N);
2152 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2153 pipe_config->has_pch_encoder = true;
2155 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2157 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2159 pipe_config->has_drrs = false;
2160 if (IS_G4X(dev_priv) || port == PORT_A)
2161 pipe_config->has_audio = false;
2162 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2163 pipe_config->has_audio = intel_dp->has_audio;
2165 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2167 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2168 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2171 if (INTEL_GEN(dev_priv) >= 9) {
2172 ret = skl_update_scaler_crtc(pipe_config);
2177 if (HAS_GMCH(dev_priv))
2178 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2179 conn_state->scaling_mode);
2181 intel_pch_panel_fitting(intel_crtc, pipe_config,
2182 conn_state->scaling_mode);
2185 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2188 if (HAS_GMCH(dev_priv) &&
2189 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2192 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2195 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2196 intel_dp_supports_fec(intel_dp, pipe_config);
2198 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2202 pipe_config->limited_color_range =
2203 intel_dp_limited_color_range(pipe_config, conn_state);
2205 if (!pipe_config->dsc_params.compression_enable)
2206 intel_link_compute_m_n(pipe_config->pipe_bpp,
2207 pipe_config->lane_count,
2208 adjusted_mode->crtc_clock,
2209 pipe_config->port_clock,
2210 &pipe_config->dp_m_n,
2213 intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
2214 pipe_config->lane_count,
2215 adjusted_mode->crtc_clock,
2216 pipe_config->port_clock,
2217 &pipe_config->dp_m_n,
2220 if (intel_connector->panel.downclock_mode != NULL &&
2221 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2222 pipe_config->has_drrs = true;
2223 intel_link_compute_m_n(pipe_config->pipe_bpp,
2224 pipe_config->lane_count,
2225 intel_connector->panel.downclock_mode->clock,
2226 pipe_config->port_clock,
2227 &pipe_config->dp_m2_n2,
2231 if (!HAS_DDI(dev_priv))
2232 intel_dp_set_clock(encoder, pipe_config);
2234 intel_psr_compute_config(intel_dp, pipe_config);
2239 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2240 int link_rate, u8 lane_count,
2243 intel_dp->link_trained = false;
2244 intel_dp->link_rate = link_rate;
2245 intel_dp->lane_count = lane_count;
2246 intel_dp->link_mst = link_mst;
2249 static void intel_dp_prepare(struct intel_encoder *encoder,
2250 const struct intel_crtc_state *pipe_config)
2252 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2253 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2254 enum port port = encoder->port;
2255 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2256 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2258 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2259 pipe_config->lane_count,
2260 intel_crtc_has_type(pipe_config,
2261 INTEL_OUTPUT_DP_MST));
2264 * There are four kinds of DP registers:
2271 * IBX PCH and CPU are the same for almost everything,
2272 * except that the CPU DP PLL is configured in this
2275 * CPT PCH is quite different, having many bits moved
2276 * to the TRANS_DP_CTL register instead. That
2277 * configuration happens (oddly) in ironlake_pch_enable
2280 /* Preserve the BIOS-computed detected bit. This is
2281 * supposed to be read-only.
2283 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2285 /* Handle DP bits in common between all three register formats */
2286 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2287 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2289 /* Split out the IBX/CPU vs CPT settings */
2291 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2292 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2293 intel_dp->DP |= DP_SYNC_HS_HIGH;
2294 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2295 intel_dp->DP |= DP_SYNC_VS_HIGH;
2296 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2298 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2299 intel_dp->DP |= DP_ENHANCED_FRAMING;
2301 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2302 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2305 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2307 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2308 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2309 trans_dp |= TRANS_DP_ENH_FRAMING;
2311 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2312 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2314 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2315 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2317 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2318 intel_dp->DP |= DP_SYNC_HS_HIGH;
2319 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2320 intel_dp->DP |= DP_SYNC_VS_HIGH;
2321 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2323 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2324 intel_dp->DP |= DP_ENHANCED_FRAMING;
2326 if (IS_CHERRYVIEW(dev_priv))
2327 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2329 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2333 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2334 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2336 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2337 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2339 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2340 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2342 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2344 static void wait_panel_status(struct intel_dp *intel_dp,
2348 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2349 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2351 lockdep_assert_held(&dev_priv->pps_mutex);
2353 intel_pps_verify_state(intel_dp);
2355 pp_stat_reg = _pp_stat_reg(intel_dp);
2356 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2358 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2360 I915_READ(pp_stat_reg),
2361 I915_READ(pp_ctrl_reg));
2363 if (intel_wait_for_register(&dev_priv->uncore,
2364 pp_stat_reg, mask, value,
2366 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2367 I915_READ(pp_stat_reg),
2368 I915_READ(pp_ctrl_reg));
2370 DRM_DEBUG_KMS("Wait complete\n");
2373 static void wait_panel_on(struct intel_dp *intel_dp)
2375 DRM_DEBUG_KMS("Wait for panel power on\n");
2376 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2379 static void wait_panel_off(struct intel_dp *intel_dp)
2381 DRM_DEBUG_KMS("Wait for panel power off time\n");
2382 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2385 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2387 ktime_t panel_power_on_time;
2388 s64 panel_power_off_duration;
2390 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2392 /* take the difference of currrent time and panel power off time
2393 * and then make panel wait for t11_t12 if needed. */
2394 panel_power_on_time = ktime_get_boottime();
2395 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2397 /* When we disable the VDD override bit last we have to do the manual
2399 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2400 wait_remaining_ms_from_jiffies(jiffies,
2401 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2403 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2406 static void wait_backlight_on(struct intel_dp *intel_dp)
2408 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2409 intel_dp->backlight_on_delay);
2412 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2414 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2415 intel_dp->backlight_off_delay);
2418 /* Read the current pp_control value, unlocking the register if it
2422 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2424 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2427 lockdep_assert_held(&dev_priv->pps_mutex);
2429 control = I915_READ(_pp_ctrl_reg(intel_dp));
2430 if (WARN_ON(!HAS_DDI(dev_priv) &&
2431 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2432 control &= ~PANEL_UNLOCK_MASK;
2433 control |= PANEL_UNLOCK_REGS;
2439 * Must be paired with edp_panel_vdd_off().
2440 * Must hold pps_mutex around the whole on/off sequence.
2441 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2443 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2445 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2446 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2448 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2449 bool need_to_disable = !intel_dp->want_panel_vdd;
2451 lockdep_assert_held(&dev_priv->pps_mutex);
2453 if (!intel_dp_is_edp(intel_dp))
2456 cancel_delayed_work(&intel_dp->panel_vdd_work);
2457 intel_dp->want_panel_vdd = true;
2459 if (edp_have_panel_vdd(intel_dp))
2460 return need_to_disable;
2462 intel_display_power_get(dev_priv,
2463 intel_aux_power_domain(intel_dig_port));
2465 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2466 port_name(intel_dig_port->base.port));
2468 if (!edp_have_panel_power(intel_dp))
2469 wait_panel_power_cycle(intel_dp);
2471 pp = ironlake_get_pp_control(intel_dp);
2472 pp |= EDP_FORCE_VDD;
2474 pp_stat_reg = _pp_stat_reg(intel_dp);
2475 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2477 I915_WRITE(pp_ctrl_reg, pp);
2478 POSTING_READ(pp_ctrl_reg);
2479 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2480 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2482 * If the panel wasn't on, delay before accessing aux channel
2484 if (!edp_have_panel_power(intel_dp)) {
2485 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2486 port_name(intel_dig_port->base.port));
2487 msleep(intel_dp->panel_power_up_delay);
2490 return need_to_disable;
2494 * Must be paired with intel_edp_panel_vdd_off() or
2495 * intel_edp_panel_off().
2496 * Nested calls to these functions are not allowed since
2497 * we drop the lock. Caller must use some higher level
2498 * locking to prevent nested calls from other threads.
2500 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2502 intel_wakeref_t wakeref;
2505 if (!intel_dp_is_edp(intel_dp))
2509 with_pps_lock(intel_dp, wakeref)
2510 vdd = edp_panel_vdd_on(intel_dp);
2511 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2512 port_name(dp_to_dig_port(intel_dp)->base.port));
2515 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2517 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2518 struct intel_digital_port *intel_dig_port =
2519 dp_to_dig_port(intel_dp);
2521 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2523 lockdep_assert_held(&dev_priv->pps_mutex);
2525 WARN_ON(intel_dp->want_panel_vdd);
2527 if (!edp_have_panel_vdd(intel_dp))
2530 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2531 port_name(intel_dig_port->base.port));
2533 pp = ironlake_get_pp_control(intel_dp);
2534 pp &= ~EDP_FORCE_VDD;
2536 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2537 pp_stat_reg = _pp_stat_reg(intel_dp);
2539 I915_WRITE(pp_ctrl_reg, pp);
2540 POSTING_READ(pp_ctrl_reg);
2542 /* Make sure sequencer is idle before allowing subsequent activity */
2543 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2544 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2546 if ((pp & PANEL_POWER_ON) == 0)
2547 intel_dp->panel_power_off_time = ktime_get_boottime();
2549 intel_display_power_put_unchecked(dev_priv,
2550 intel_aux_power_domain(intel_dig_port));
2553 static void edp_panel_vdd_work(struct work_struct *__work)
2555 struct intel_dp *intel_dp =
2556 container_of(to_delayed_work(__work),
2557 struct intel_dp, panel_vdd_work);
2558 intel_wakeref_t wakeref;
2560 with_pps_lock(intel_dp, wakeref) {
2561 if (!intel_dp->want_panel_vdd)
2562 edp_panel_vdd_off_sync(intel_dp);
2566 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2568 unsigned long delay;
2571 * Queue the timer to fire a long time from now (relative to the power
2572 * down delay) to keep the panel power up across a sequence of
2575 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2576 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2580 * Must be paired with edp_panel_vdd_on().
2581 * Must hold pps_mutex around the whole on/off sequence.
2582 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2584 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2586 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2588 lockdep_assert_held(&dev_priv->pps_mutex);
2590 if (!intel_dp_is_edp(intel_dp))
2593 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2594 port_name(dp_to_dig_port(intel_dp)->base.port));
2596 intel_dp->want_panel_vdd = false;
2599 edp_panel_vdd_off_sync(intel_dp);
2601 edp_panel_vdd_schedule_off(intel_dp);
2604 static void edp_panel_on(struct intel_dp *intel_dp)
2606 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2608 i915_reg_t pp_ctrl_reg;
2610 lockdep_assert_held(&dev_priv->pps_mutex);
2612 if (!intel_dp_is_edp(intel_dp))
2615 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2616 port_name(dp_to_dig_port(intel_dp)->base.port));
2618 if (WARN(edp_have_panel_power(intel_dp),
2619 "eDP port %c panel power already on\n",
2620 port_name(dp_to_dig_port(intel_dp)->base.port)))
2623 wait_panel_power_cycle(intel_dp);
2625 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2626 pp = ironlake_get_pp_control(intel_dp);
2627 if (IS_GEN(dev_priv, 5)) {
2628 /* ILK workaround: disable reset around power sequence */
2629 pp &= ~PANEL_POWER_RESET;
2630 I915_WRITE(pp_ctrl_reg, pp);
2631 POSTING_READ(pp_ctrl_reg);
2634 pp |= PANEL_POWER_ON;
2635 if (!IS_GEN(dev_priv, 5))
2636 pp |= PANEL_POWER_RESET;
2638 I915_WRITE(pp_ctrl_reg, pp);
2639 POSTING_READ(pp_ctrl_reg);
2641 wait_panel_on(intel_dp);
2642 intel_dp->last_power_on = jiffies;
2644 if (IS_GEN(dev_priv, 5)) {
2645 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2646 I915_WRITE(pp_ctrl_reg, pp);
2647 POSTING_READ(pp_ctrl_reg);
2651 void intel_edp_panel_on(struct intel_dp *intel_dp)
2653 intel_wakeref_t wakeref;
2655 if (!intel_dp_is_edp(intel_dp))
2658 with_pps_lock(intel_dp, wakeref)
2659 edp_panel_on(intel_dp);
2663 static void edp_panel_off(struct intel_dp *intel_dp)
2665 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2666 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2668 i915_reg_t pp_ctrl_reg;
2670 lockdep_assert_held(&dev_priv->pps_mutex);
2672 if (!intel_dp_is_edp(intel_dp))
2675 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2676 port_name(dig_port->base.port));
2678 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2679 port_name(dig_port->base.port));
2681 pp = ironlake_get_pp_control(intel_dp);
2682 /* We need to switch off panel power _and_ force vdd, for otherwise some
2683 * panels get very unhappy and cease to work. */
2684 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2687 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2689 intel_dp->want_panel_vdd = false;
2691 I915_WRITE(pp_ctrl_reg, pp);
2692 POSTING_READ(pp_ctrl_reg);
2694 wait_panel_off(intel_dp);
2695 intel_dp->panel_power_off_time = ktime_get_boottime();
2697 /* We got a reference when we enabled the VDD. */
2698 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2701 void intel_edp_panel_off(struct intel_dp *intel_dp)
2703 intel_wakeref_t wakeref;
2705 if (!intel_dp_is_edp(intel_dp))
2708 with_pps_lock(intel_dp, wakeref)
2709 edp_panel_off(intel_dp);
2712 /* Enable backlight in the panel power control. */
2713 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2715 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2716 intel_wakeref_t wakeref;
2719 * If we enable the backlight right away following a panel power
2720 * on, we may see slight flicker as the panel syncs with the eDP
2721 * link. So delay a bit to make sure the image is solid before
2722 * allowing it to appear.
2724 wait_backlight_on(intel_dp);
2726 with_pps_lock(intel_dp, wakeref) {
2727 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2730 pp = ironlake_get_pp_control(intel_dp);
2731 pp |= EDP_BLC_ENABLE;
2733 I915_WRITE(pp_ctrl_reg, pp);
2734 POSTING_READ(pp_ctrl_reg);
2738 /* Enable backlight PWM and backlight PP control. */
2739 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2740 const struct drm_connector_state *conn_state)
2742 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2744 if (!intel_dp_is_edp(intel_dp))
2747 DRM_DEBUG_KMS("\n");
2749 intel_panel_enable_backlight(crtc_state, conn_state);
2750 _intel_edp_backlight_on(intel_dp);
2753 /* Disable backlight in the panel power control. */
2754 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2756 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2757 intel_wakeref_t wakeref;
2759 if (!intel_dp_is_edp(intel_dp))
2762 with_pps_lock(intel_dp, wakeref) {
2763 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2766 pp = ironlake_get_pp_control(intel_dp);
2767 pp &= ~EDP_BLC_ENABLE;
2769 I915_WRITE(pp_ctrl_reg, pp);
2770 POSTING_READ(pp_ctrl_reg);
2773 intel_dp->last_backlight_off = jiffies;
2774 edp_wait_backlight_off(intel_dp);
2777 /* Disable backlight PP control and backlight PWM. */
2778 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2780 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2782 if (!intel_dp_is_edp(intel_dp))
2785 DRM_DEBUG_KMS("\n");
2787 _intel_edp_backlight_off(intel_dp);
2788 intel_panel_disable_backlight(old_conn_state);
2792 * Hook for controlling the panel power control backlight through the bl_power
2793 * sysfs attribute. Take care to handle multiple calls.
2795 static void intel_edp_backlight_power(struct intel_connector *connector,
2798 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2799 intel_wakeref_t wakeref;
2803 with_pps_lock(intel_dp, wakeref)
2804 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2805 if (is_enabled == enable)
2808 DRM_DEBUG_KMS("panel power control backlight %s\n",
2809 enable ? "enable" : "disable");
2812 _intel_edp_backlight_on(intel_dp);
2814 _intel_edp_backlight_off(intel_dp);
2817 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2819 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2820 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2821 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2823 I915_STATE_WARN(cur_state != state,
2824 "DP port %c state assertion failure (expected %s, current %s)\n",
2825 port_name(dig_port->base.port),
2826 onoff(state), onoff(cur_state));
2828 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2830 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2832 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2834 I915_STATE_WARN(cur_state != state,
2835 "eDP PLL state assertion failure (expected %s, current %s)\n",
2836 onoff(state), onoff(cur_state));
2838 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2839 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2841 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2842 const struct intel_crtc_state *pipe_config)
2844 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2845 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2847 assert_pipe_disabled(dev_priv, crtc->pipe);
2848 assert_dp_port_disabled(intel_dp);
2849 assert_edp_pll_disabled(dev_priv);
2851 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2852 pipe_config->port_clock);
2854 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2856 if (pipe_config->port_clock == 162000)
2857 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2859 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2861 I915_WRITE(DP_A, intel_dp->DP);
2866 * [DevILK] Work around required when enabling DP PLL
2867 * while a pipe is enabled going to FDI:
2868 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2869 * 2. Program DP PLL enable
2871 if (IS_GEN(dev_priv, 5))
2872 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2874 intel_dp->DP |= DP_PLL_ENABLE;
2876 I915_WRITE(DP_A, intel_dp->DP);
2881 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2882 const struct intel_crtc_state *old_crtc_state)
2884 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2885 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2887 assert_pipe_disabled(dev_priv, crtc->pipe);
2888 assert_dp_port_disabled(intel_dp);
2889 assert_edp_pll_enabled(dev_priv);
2891 DRM_DEBUG_KMS("disabling eDP PLL\n");
2893 intel_dp->DP &= ~DP_PLL_ENABLE;
2895 I915_WRITE(DP_A, intel_dp->DP);
2900 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2903 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2904 * be capable of signalling downstream hpd with a long pulse.
2905 * Whether or not that means D3 is safe to use is not clear,
2906 * but let's assume so until proven otherwise.
2908 * FIXME should really check all downstream ports...
2910 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2911 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2912 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2915 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2916 const struct intel_crtc_state *crtc_state,
2921 if (!crtc_state->dsc_params.compression_enable)
2924 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2925 enable ? DP_DECOMPRESSION_EN : 0);
2927 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2928 enable ? "enable" : "disable");
2931 /* If the sink supports it, try to set the power state appropriately */
2932 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2936 /* Should have a valid DPCD by this point */
2937 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2940 if (mode != DRM_MODE_DPMS_ON) {
2941 if (downstream_hpd_needs_d0(intel_dp))
2944 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2947 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2950 * When turning on, we need to retry for 1ms to give the sink
2953 for (i = 0; i < 3; i++) {
2954 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2961 if (ret == 1 && lspcon->active)
2962 lspcon_wait_pcon_mode(lspcon);
2966 DRM_DEBUG_KMS("failed to %s sink power state\n",
2967 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2970 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2971 enum port port, enum pipe *pipe)
2975 for_each_pipe(dev_priv, p) {
2976 u32 val = I915_READ(TRANS_DP_CTL(p));
2978 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2984 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
2986 /* must initialize pipe to something for the asserts */
2992 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
2993 i915_reg_t dp_reg, enum port port,
2999 val = I915_READ(dp_reg);
3001 ret = val & DP_PORT_EN;
3003 /* asserts want to know the pipe even if the port is disabled */
3004 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3005 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3006 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3007 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3008 else if (IS_CHERRYVIEW(dev_priv))
3009 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3011 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3016 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3019 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3020 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3021 intel_wakeref_t wakeref;
3024 wakeref = intel_display_power_get_if_enabled(dev_priv,
3025 encoder->power_domain);
3029 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3030 encoder->port, pipe);
3032 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3037 static void intel_dp_get_config(struct intel_encoder *encoder,
3038 struct intel_crtc_state *pipe_config)
3040 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3041 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3043 enum port port = encoder->port;
3044 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3046 if (encoder->type == INTEL_OUTPUT_EDP)
3047 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3049 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3051 tmp = I915_READ(intel_dp->output_reg);
3053 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3055 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3056 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3058 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3059 flags |= DRM_MODE_FLAG_PHSYNC;
3061 flags |= DRM_MODE_FLAG_NHSYNC;
3063 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3064 flags |= DRM_MODE_FLAG_PVSYNC;
3066 flags |= DRM_MODE_FLAG_NVSYNC;
3068 if (tmp & DP_SYNC_HS_HIGH)
3069 flags |= DRM_MODE_FLAG_PHSYNC;
3071 flags |= DRM_MODE_FLAG_NHSYNC;
3073 if (tmp & DP_SYNC_VS_HIGH)
3074 flags |= DRM_MODE_FLAG_PVSYNC;
3076 flags |= DRM_MODE_FLAG_NVSYNC;
3079 pipe_config->base.adjusted_mode.flags |= flags;
3081 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3082 pipe_config->limited_color_range = true;
3084 pipe_config->lane_count =
3085 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3087 intel_dp_get_m_n(crtc, pipe_config);
3089 if (port == PORT_A) {
3090 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3091 pipe_config->port_clock = 162000;
3093 pipe_config->port_clock = 270000;
3096 pipe_config->base.adjusted_mode.crtc_clock =
3097 intel_dotclock_calculate(pipe_config->port_clock,
3098 &pipe_config->dp_m_n);
3100 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3101 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3103 * This is a big fat ugly hack.
3105 * Some machines in UEFI boot mode provide us a VBT that has 18
3106 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3107 * unknown we fail to light up. Yet the same BIOS boots up with
3108 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3109 * max, not what it tells us to use.
3111 * Note: This will still be broken if the eDP panel is not lit
3112 * up by the BIOS, and thus we can't get the mode at module
3115 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3116 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3117 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3121 static void intel_disable_dp(struct intel_encoder *encoder,
3122 const struct intel_crtc_state *old_crtc_state,
3123 const struct drm_connector_state *old_conn_state)
3125 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3127 intel_dp->link_trained = false;
3129 if (old_crtc_state->has_audio)
3130 intel_audio_codec_disable(encoder,
3131 old_crtc_state, old_conn_state);
3133 /* Make sure the panel is off before trying to change the mode. But also
3134 * ensure that we have vdd while we switch off the panel. */
3135 intel_edp_panel_vdd_on(intel_dp);
3136 intel_edp_backlight_off(old_conn_state);
3137 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3138 intel_edp_panel_off(intel_dp);
3141 static void g4x_disable_dp(struct intel_encoder *encoder,
3142 const struct intel_crtc_state *old_crtc_state,
3143 const struct drm_connector_state *old_conn_state)
3145 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3148 static void vlv_disable_dp(struct intel_encoder *encoder,
3149 const struct intel_crtc_state *old_crtc_state,
3150 const struct drm_connector_state *old_conn_state)
3152 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3155 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3156 const struct intel_crtc_state *old_crtc_state,
3157 const struct drm_connector_state *old_conn_state)
3159 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3160 enum port port = encoder->port;
3163 * Bspec does not list a specific disable sequence for g4x DP.
3164 * Follow the ilk+ sequence (disable pipe before the port) for
3165 * g4x DP as it does not suffer from underruns like the normal
3166 * g4x modeset sequence (disable pipe after the port).
3168 intel_dp_link_down(encoder, old_crtc_state);
3170 /* Only ilk+ has port A */
3172 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3175 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3176 const struct intel_crtc_state *old_crtc_state,
3177 const struct drm_connector_state *old_conn_state)
3179 intel_dp_link_down(encoder, old_crtc_state);
3182 static void chv_post_disable_dp(struct intel_encoder *encoder,
3183 const struct intel_crtc_state *old_crtc_state,
3184 const struct drm_connector_state *old_conn_state)
3186 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3188 intel_dp_link_down(encoder, old_crtc_state);
3190 mutex_lock(&dev_priv->sb_lock);
3192 /* Assert data lane reset */
3193 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3195 mutex_unlock(&dev_priv->sb_lock);
3199 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3203 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3204 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3205 enum port port = intel_dig_port->base.port;
3206 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3208 if (dp_train_pat & train_pat_mask)
3209 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3210 dp_train_pat & train_pat_mask);
3212 if (HAS_DDI(dev_priv)) {
3213 u32 temp = I915_READ(DP_TP_CTL(port));
3215 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3216 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3218 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3220 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3221 switch (dp_train_pat & train_pat_mask) {
3222 case DP_TRAINING_PATTERN_DISABLE:
3223 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3226 case DP_TRAINING_PATTERN_1:
3227 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3229 case DP_TRAINING_PATTERN_2:
3230 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3232 case DP_TRAINING_PATTERN_3:
3233 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3235 case DP_TRAINING_PATTERN_4:
3236 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3239 I915_WRITE(DP_TP_CTL(port), temp);
3241 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3242 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3243 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3245 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3246 case DP_TRAINING_PATTERN_DISABLE:
3247 *DP |= DP_LINK_TRAIN_OFF_CPT;
3249 case DP_TRAINING_PATTERN_1:
3250 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3252 case DP_TRAINING_PATTERN_2:
3253 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3255 case DP_TRAINING_PATTERN_3:
3256 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3257 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3262 *DP &= ~DP_LINK_TRAIN_MASK;
3264 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3265 case DP_TRAINING_PATTERN_DISABLE:
3266 *DP |= DP_LINK_TRAIN_OFF;
3268 case DP_TRAINING_PATTERN_1:
3269 *DP |= DP_LINK_TRAIN_PAT_1;
3271 case DP_TRAINING_PATTERN_2:
3272 *DP |= DP_LINK_TRAIN_PAT_2;
3274 case DP_TRAINING_PATTERN_3:
3275 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3276 *DP |= DP_LINK_TRAIN_PAT_2;
3282 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3283 const struct intel_crtc_state *old_crtc_state)
3285 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3287 /* enable with pattern 1 (as per spec) */
3289 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3292 * Magic for VLV/CHV. We _must_ first set up the register
3293 * without actually enabling the port, and then do another
3294 * write to enable the port. Otherwise link training will
3295 * fail when the power sequencer is freshly used for this port.
3297 intel_dp->DP |= DP_PORT_EN;
3298 if (old_crtc_state->has_audio)
3299 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3301 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3302 POSTING_READ(intel_dp->output_reg);
3305 static void intel_enable_dp(struct intel_encoder *encoder,
3306 const struct intel_crtc_state *pipe_config,
3307 const struct drm_connector_state *conn_state)
3309 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3310 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3311 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3312 u32 dp_reg = I915_READ(intel_dp->output_reg);
3313 enum pipe pipe = crtc->pipe;
3314 intel_wakeref_t wakeref;
3316 if (WARN_ON(dp_reg & DP_PORT_EN))
3319 with_pps_lock(intel_dp, wakeref) {
3320 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3321 vlv_init_panel_power_sequencer(encoder, pipe_config);
3323 intel_dp_enable_port(intel_dp, pipe_config);
3325 edp_panel_vdd_on(intel_dp);
3326 edp_panel_on(intel_dp);
3327 edp_panel_vdd_off(intel_dp, true);
3330 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3331 unsigned int lane_mask = 0x0;
3333 if (IS_CHERRYVIEW(dev_priv))
3334 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3336 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3340 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3341 intel_dp_start_link_train(intel_dp);
3342 intel_dp_stop_link_train(intel_dp);
3344 if (pipe_config->has_audio) {
3345 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3347 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3351 static void g4x_enable_dp(struct intel_encoder *encoder,
3352 const struct intel_crtc_state *pipe_config,
3353 const struct drm_connector_state *conn_state)
3355 intel_enable_dp(encoder, pipe_config, conn_state);
3356 intel_edp_backlight_on(pipe_config, conn_state);
3359 static void vlv_enable_dp(struct intel_encoder *encoder,
3360 const struct intel_crtc_state *pipe_config,
3361 const struct drm_connector_state *conn_state)
3363 intel_edp_backlight_on(pipe_config, conn_state);
3366 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3367 const struct intel_crtc_state *pipe_config,
3368 const struct drm_connector_state *conn_state)
3370 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3371 enum port port = encoder->port;
3373 intel_dp_prepare(encoder, pipe_config);
3375 /* Only ilk+ has port A */
3377 ironlake_edp_pll_on(intel_dp, pipe_config);
3380 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3382 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3383 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3384 enum pipe pipe = intel_dp->pps_pipe;
3385 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3387 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3389 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3392 edp_panel_vdd_off_sync(intel_dp);
3395 * VLV seems to get confused when multiple power sequencers
3396 * have the same port selected (even if only one has power/vdd
3397 * enabled). The failure manifests as vlv_wait_port_ready() failing
3398 * CHV on the other hand doesn't seem to mind having the same port
3399 * selected in multiple power sequencers, but let's clear the
3400 * port select always when logically disconnecting a power sequencer
3403 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3404 pipe_name(pipe), port_name(intel_dig_port->base.port));
3405 I915_WRITE(pp_on_reg, 0);
3406 POSTING_READ(pp_on_reg);
3408 intel_dp->pps_pipe = INVALID_PIPE;
3411 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3414 struct intel_encoder *encoder;
3416 lockdep_assert_held(&dev_priv->pps_mutex);
3418 for_each_intel_dp(&dev_priv->drm, encoder) {
3419 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3420 enum port port = encoder->port;
3422 WARN(intel_dp->active_pipe == pipe,
3423 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3424 pipe_name(pipe), port_name(port));
3426 if (intel_dp->pps_pipe != pipe)
3429 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3430 pipe_name(pipe), port_name(port));
3432 /* make sure vdd is off before we steal it */
3433 vlv_detach_power_sequencer(intel_dp);
3437 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3438 const struct intel_crtc_state *crtc_state)
3440 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3441 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3442 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3444 lockdep_assert_held(&dev_priv->pps_mutex);
3446 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3448 if (intel_dp->pps_pipe != INVALID_PIPE &&
3449 intel_dp->pps_pipe != crtc->pipe) {
3451 * If another power sequencer was being used on this
3452 * port previously make sure to turn off vdd there while
3453 * we still have control of it.
3455 vlv_detach_power_sequencer(intel_dp);
3459 * We may be stealing the power
3460 * sequencer from another port.
3462 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3464 intel_dp->active_pipe = crtc->pipe;
3466 if (!intel_dp_is_edp(intel_dp))
3469 /* now it's all ours */
3470 intel_dp->pps_pipe = crtc->pipe;
3472 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3473 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3475 /* init power sequencer on this pipe and port */
3476 intel_dp_init_panel_power_sequencer(intel_dp);
3477 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3480 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3481 const struct intel_crtc_state *pipe_config,
3482 const struct drm_connector_state *conn_state)
3484 vlv_phy_pre_encoder_enable(encoder, pipe_config);
3486 intel_enable_dp(encoder, pipe_config, conn_state);
3489 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3490 const struct intel_crtc_state *pipe_config,
3491 const struct drm_connector_state *conn_state)
3493 intel_dp_prepare(encoder, pipe_config);
3495 vlv_phy_pre_pll_enable(encoder, pipe_config);
3498 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3499 const struct intel_crtc_state *pipe_config,
3500 const struct drm_connector_state *conn_state)
3502 chv_phy_pre_encoder_enable(encoder, pipe_config);
3504 intel_enable_dp(encoder, pipe_config, conn_state);
3506 /* Second common lane will stay alive on its own now */
3507 chv_phy_release_cl2_override(encoder);
3510 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3511 const struct intel_crtc_state *pipe_config,
3512 const struct drm_connector_state *conn_state)
3514 intel_dp_prepare(encoder, pipe_config);
3516 chv_phy_pre_pll_enable(encoder, pipe_config);
3519 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3520 const struct intel_crtc_state *old_crtc_state,
3521 const struct drm_connector_state *old_conn_state)
3523 chv_phy_post_pll_disable(encoder, old_crtc_state);
3527 * Fetch AUX CH registers 0x202 - 0x207 which contain
3528 * link status information
3531 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3533 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3534 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3537 /* These are source-specific values. */
3539 intel_dp_voltage_max(struct intel_dp *intel_dp)
3541 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3542 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3543 enum port port = encoder->port;
3545 if (HAS_DDI(dev_priv))
3546 return intel_ddi_dp_voltage_max(encoder);
3547 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3548 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3549 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3550 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3551 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3552 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3554 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3558 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3560 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3561 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3562 enum port port = encoder->port;
3564 if (HAS_DDI(dev_priv)) {
3565 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3566 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3567 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3569 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3570 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3571 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3572 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3573 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3574 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3576 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3578 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3579 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3580 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3581 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3582 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3583 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3584 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3586 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3589 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3590 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3591 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3592 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3593 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3594 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3595 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3596 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3598 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3603 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3605 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3606 unsigned long demph_reg_value, preemph_reg_value,
3607 uniqtranscale_reg_value;
3608 u8 train_set = intel_dp->train_set[0];
3610 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3611 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3612 preemph_reg_value = 0x0004000;
3613 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3614 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3615 demph_reg_value = 0x2B405555;
3616 uniqtranscale_reg_value = 0x552AB83A;
3618 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3619 demph_reg_value = 0x2B404040;
3620 uniqtranscale_reg_value = 0x5548B83A;
3622 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3623 demph_reg_value = 0x2B245555;
3624 uniqtranscale_reg_value = 0x5560B83A;
3626 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3627 demph_reg_value = 0x2B405555;
3628 uniqtranscale_reg_value = 0x5598DA3A;
3634 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3635 preemph_reg_value = 0x0002000;
3636 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3638 demph_reg_value = 0x2B404040;
3639 uniqtranscale_reg_value = 0x5552B83A;
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3642 demph_reg_value = 0x2B404848;
3643 uniqtranscale_reg_value = 0x5580B83A;
3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3646 demph_reg_value = 0x2B404040;
3647 uniqtranscale_reg_value = 0x55ADDA3A;
3653 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3654 preemph_reg_value = 0x0000000;
3655 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3656 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3657 demph_reg_value = 0x2B305555;
3658 uniqtranscale_reg_value = 0x5570B83A;
3660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3661 demph_reg_value = 0x2B2B4040;
3662 uniqtranscale_reg_value = 0x55ADDA3A;
3668 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3669 preemph_reg_value = 0x0006000;
3670 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3671 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3672 demph_reg_value = 0x1B405555;
3673 uniqtranscale_reg_value = 0x55ADDA3A;
3683 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3684 uniqtranscale_reg_value, 0);
3689 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3691 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3692 u32 deemph_reg_value, margin_reg_value;
3693 bool uniq_trans_scale = false;
3694 u8 train_set = intel_dp->train_set[0];
3696 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3697 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3698 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3699 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3700 deemph_reg_value = 128;
3701 margin_reg_value = 52;
3703 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3704 deemph_reg_value = 128;
3705 margin_reg_value = 77;
3707 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3708 deemph_reg_value = 128;
3709 margin_reg_value = 102;
3711 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3712 deemph_reg_value = 128;
3713 margin_reg_value = 154;
3714 uniq_trans_scale = true;
3720 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3721 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3722 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3723 deemph_reg_value = 85;
3724 margin_reg_value = 78;
3726 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3727 deemph_reg_value = 85;
3728 margin_reg_value = 116;
3730 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3731 deemph_reg_value = 85;
3732 margin_reg_value = 154;
3738 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3739 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3740 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3741 deemph_reg_value = 64;
3742 margin_reg_value = 104;
3744 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3745 deemph_reg_value = 64;
3746 margin_reg_value = 154;
3752 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3753 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3754 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3755 deemph_reg_value = 43;
3756 margin_reg_value = 154;
3766 chv_set_phy_signal_level(encoder, deemph_reg_value,
3767 margin_reg_value, uniq_trans_scale);
3773 g4x_signal_levels(u8 train_set)
3775 u32 signal_levels = 0;
3777 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3778 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3780 signal_levels |= DP_VOLTAGE_0_4;
3782 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3783 signal_levels |= DP_VOLTAGE_0_6;
3785 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3786 signal_levels |= DP_VOLTAGE_0_8;
3788 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3789 signal_levels |= DP_VOLTAGE_1_2;
3792 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3793 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3795 signal_levels |= DP_PRE_EMPHASIS_0;
3797 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3798 signal_levels |= DP_PRE_EMPHASIS_3_5;
3800 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3801 signal_levels |= DP_PRE_EMPHASIS_6;
3803 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3804 signal_levels |= DP_PRE_EMPHASIS_9_5;
3807 return signal_levels;
3810 /* SNB CPU eDP voltage swing and pre-emphasis control */
3812 snb_cpu_edp_signal_levels(u8 train_set)
3814 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3815 DP_TRAIN_PRE_EMPHASIS_MASK);
3816 switch (signal_levels) {
3817 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3818 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3819 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3820 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3821 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3822 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3823 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3824 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3825 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3826 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3827 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3828 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3829 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3830 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3832 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3833 "0x%x\n", signal_levels);
3834 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3838 /* IVB CPU eDP voltage swing and pre-emphasis control */
3840 ivb_cpu_edp_signal_levels(u8 train_set)
3842 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3843 DP_TRAIN_PRE_EMPHASIS_MASK);
3844 switch (signal_levels) {
3845 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3846 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3847 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3848 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3849 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3850 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3853 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3855 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3858 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3859 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3860 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3863 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3864 "0x%x\n", signal_levels);
3865 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3870 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3873 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3874 enum port port = intel_dig_port->base.port;
3875 u32 signal_levels, mask = 0;
3876 u8 train_set = intel_dp->train_set[0];
3878 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3879 signal_levels = bxt_signal_levels(intel_dp);
3880 } else if (HAS_DDI(dev_priv)) {
3881 signal_levels = ddi_signal_levels(intel_dp);
3882 mask = DDI_BUF_EMP_MASK;
3883 } else if (IS_CHERRYVIEW(dev_priv)) {
3884 signal_levels = chv_signal_levels(intel_dp);
3885 } else if (IS_VALLEYVIEW(dev_priv)) {
3886 signal_levels = vlv_signal_levels(intel_dp);
3887 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3888 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3889 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3890 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3891 signal_levels = snb_cpu_edp_signal_levels(train_set);
3892 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3894 signal_levels = g4x_signal_levels(train_set);
3895 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3899 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3901 DRM_DEBUG_KMS("Using vswing level %d\n",
3902 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3903 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3904 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3905 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3907 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3909 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3910 POSTING_READ(intel_dp->output_reg);
3914 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3917 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3918 struct drm_i915_private *dev_priv =
3919 to_i915(intel_dig_port->base.base.dev);
3921 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3923 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3924 POSTING_READ(intel_dp->output_reg);
3927 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3929 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3930 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3931 enum port port = intel_dig_port->base.port;
3934 if (!HAS_DDI(dev_priv))
3937 val = I915_READ(DP_TP_CTL(port));
3938 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3939 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3940 I915_WRITE(DP_TP_CTL(port), val);
3943 * On PORT_A we can have only eDP in SST mode. There the only reason
3944 * we need to set idle transmission mode is to work around a HW issue
3945 * where we enable the pipe while not in idle link-training mode.
3946 * In this case there is requirement to wait for a minimum number of
3947 * idle patterns to be sent.
3952 if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
3953 DP_TP_STATUS_IDLE_DONE,
3954 DP_TP_STATUS_IDLE_DONE,
3956 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3960 intel_dp_link_down(struct intel_encoder *encoder,
3961 const struct intel_crtc_state *old_crtc_state)
3963 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3964 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3965 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3966 enum port port = encoder->port;
3967 u32 DP = intel_dp->DP;
3969 if (WARN_ON(HAS_DDI(dev_priv)))
3972 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3975 DRM_DEBUG_KMS("\n");
3977 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3978 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3979 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3980 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3982 DP &= ~DP_LINK_TRAIN_MASK;
3983 DP |= DP_LINK_TRAIN_PAT_IDLE;
3985 I915_WRITE(intel_dp->output_reg, DP);
3986 POSTING_READ(intel_dp->output_reg);
3988 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3989 I915_WRITE(intel_dp->output_reg, DP);
3990 POSTING_READ(intel_dp->output_reg);
3993 * HW workaround for IBX, we need to move the port
3994 * to transcoder A after disabling it to allow the
3995 * matching HDMI port to be enabled on transcoder A.
3997 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3999 * We get CPU/PCH FIFO underruns on the other pipe when
4000 * doing the workaround. Sweep them under the rug.
4002 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4003 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4005 /* always enable with pattern 1 (as per spec) */
4006 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4007 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4008 DP_LINK_TRAIN_PAT_1;
4009 I915_WRITE(intel_dp->output_reg, DP);
4010 POSTING_READ(intel_dp->output_reg);
4013 I915_WRITE(intel_dp->output_reg, DP);
4014 POSTING_READ(intel_dp->output_reg);
4016 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4017 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4018 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4021 msleep(intel_dp->panel_power_down_delay);
4025 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4026 intel_wakeref_t wakeref;
4028 with_pps_lock(intel_dp, wakeref)
4029 intel_dp->active_pipe = INVALID_PIPE;
4034 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4039 * Prior to DP1.3 the bit represented by
4040 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4041 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4042 * the true capability of the panel. The only way to check is to
4043 * then compare 0000h and 2200h.
4045 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4046 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4049 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4050 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4051 DRM_ERROR("DPCD failed read at extended capabilities\n");
4055 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4056 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4060 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4063 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4064 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4066 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4070 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4072 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4073 sizeof(intel_dp->dpcd)) < 0)
4074 return false; /* aux transfer failed */
4076 intel_dp_extended_receiver_capabilities(intel_dp);
4078 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4080 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4083 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4086 * Clear the cached register set to avoid using stale values
4087 * for the sinks that do not support DSC.
4089 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4091 /* Clear fec_capable to avoid using stale values */
4092 intel_dp->fec_capable = 0;
4094 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4095 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4096 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4097 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4099 sizeof(intel_dp->dsc_dpcd)) < 0)
4100 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4103 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4104 (int)sizeof(intel_dp->dsc_dpcd),
4105 intel_dp->dsc_dpcd);
4107 /* FEC is supported only on DP 1.4 */
4108 if (!intel_dp_is_edp(intel_dp) &&
4109 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4110 &intel_dp->fec_capable) < 0)
4111 DRM_ERROR("Failed to read FEC DPCD register\n");
4113 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4118 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4120 struct drm_i915_private *dev_priv =
4121 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4123 /* this function is meant to be called only once */
4124 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4126 if (!intel_dp_read_dpcd(intel_dp))
4129 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4130 drm_dp_is_branch(intel_dp->dpcd));
4132 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4133 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4134 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4137 * Read the eDP display control registers.
4139 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4140 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4141 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4142 * method). The display control registers should read zero if they're
4143 * not supported anyway.
4145 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4146 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4147 sizeof(intel_dp->edp_dpcd))
4148 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4149 intel_dp->edp_dpcd);
4152 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4153 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4155 intel_psr_init_dpcd(intel_dp);
4157 /* Read the eDP 1.4+ supported link rates. */
4158 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4159 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4162 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4163 sink_rates, sizeof(sink_rates));
4165 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4166 int val = le16_to_cpu(sink_rates[i]);
4171 /* Value read multiplied by 200kHz gives the per-lane
4172 * link rate in kHz. The source rates are, however,
4173 * stored in terms of LS_Clk kHz. The full conversion
4174 * back to symbols is
4175 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4177 intel_dp->sink_rates[i] = (val * 200) / 10;
4179 intel_dp->num_sink_rates = i;
4183 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4184 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4186 if (intel_dp->num_sink_rates)
4187 intel_dp->use_rate_select = true;
4189 intel_dp_set_sink_rates(intel_dp);
4191 intel_dp_set_common_rates(intel_dp);
4193 /* Read the eDP DSC DPCD registers */
4194 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4195 intel_dp_get_dsc_sink_cap(intel_dp);
4202 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4204 if (!intel_dp_read_dpcd(intel_dp))
4207 /* Don't clobber cached eDP rates. */
4208 if (!intel_dp_is_edp(intel_dp)) {
4209 intel_dp_set_sink_rates(intel_dp);
4210 intel_dp_set_common_rates(intel_dp);
4214 * Some eDP panels do not set a valid value for sink count, that is why
4215 * it don't care about read it here and in intel_edp_init_dpcd().
4217 if (!intel_dp_is_edp(intel_dp)) {
4221 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4226 * Sink count can change between short pulse hpd hence
4227 * a member variable in intel_dp will track any changes
4228 * between short pulse interrupts.
4230 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4233 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4234 * a dongle is present but no display. Unless we require to know
4235 * if a dongle is present or not, we don't need to update
4236 * downstream port information. So, an early return here saves
4237 * time from performing other operations which are not required.
4239 if (!intel_dp->sink_count)
4243 if (!drm_dp_is_branch(intel_dp->dpcd))
4244 return true; /* native DP sink */
4246 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4247 return true; /* no per-port downstream info */
4249 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4250 intel_dp->downstream_ports,
4251 DP_MAX_DOWNSTREAM_PORTS) < 0)
4252 return false; /* downstream port status fetch failed */
4258 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4262 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4265 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4268 return mstm_cap & DP_MST_CAP;
4272 intel_dp_can_mst(struct intel_dp *intel_dp)
4274 return i915_modparams.enable_dp_mst &&
4275 intel_dp->can_mst &&
4276 intel_dp_sink_can_mst(intel_dp);
4280 intel_dp_configure_mst(struct intel_dp *intel_dp)
4282 struct intel_encoder *encoder =
4283 &dp_to_dig_port(intel_dp)->base;
4284 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4286 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4287 port_name(encoder->port), yesno(intel_dp->can_mst),
4288 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4290 if (!intel_dp->can_mst)
4293 intel_dp->is_mst = sink_can_mst &&
4294 i915_modparams.enable_dp_mst;
4296 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4301 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4303 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4304 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4308 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4309 int mode_clock, int mode_hdisplay)
4311 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4315 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4316 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4317 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4318 * for MST -> TimeSlotsPerMTP has to be calculated
4320 bits_per_pixel = (link_clock * lane_count * 8 *
4321 DP_DSC_FEC_OVERHEAD_FACTOR) /
4324 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4325 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4329 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4330 * check, output bpp from small joiner RAM check)
4332 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4334 /* Error out if the max bpp is less than smallest allowed valid bpp */
4335 if (bits_per_pixel < valid_dsc_bpp[0]) {
4336 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4340 /* Find the nearest match in the array of known BPPs from VESA */
4341 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4342 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4345 bits_per_pixel = valid_dsc_bpp[i];
4348 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4349 * fractional part is 0
4351 return bits_per_pixel << 4;
4354 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4358 u8 min_slice_count, i;
4359 int max_slice_width;
4361 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4362 min_slice_count = DIV_ROUND_UP(mode_clock,
4363 DP_DSC_MAX_ENC_THROUGHPUT_0);
4365 min_slice_count = DIV_ROUND_UP(mode_clock,
4366 DP_DSC_MAX_ENC_THROUGHPUT_1);
4368 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4369 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4370 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4374 /* Also take into account max slice width */
4375 min_slice_count = min_t(u8, min_slice_count,
4376 DIV_ROUND_UP(mode_hdisplay,
4379 /* Find the closest match to the valid slice count values */
4380 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4381 if (valid_dsc_slicecount[i] >
4382 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4385 if (min_slice_count <= valid_dsc_slicecount[i])
4386 return valid_dsc_slicecount[i];
4389 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4393 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4397 u8 test_lane_count, test_link_bw;
4401 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4402 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4406 DRM_DEBUG_KMS("Lane count read failed\n");
4409 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4411 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4414 DRM_DEBUG_KMS("Link Rate read failed\n");
4417 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4419 /* Validate the requested link rate and lane count */
4420 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4424 intel_dp->compliance.test_lane_count = test_lane_count;
4425 intel_dp->compliance.test_link_rate = test_link_rate;
4430 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4434 __be16 h_width, v_height;
4437 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4438 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4441 DRM_DEBUG_KMS("Test pattern read failed\n");
4444 if (test_pattern != DP_COLOR_RAMP)
4447 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4450 DRM_DEBUG_KMS("H Width read failed\n");
4454 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4457 DRM_DEBUG_KMS("V Height read failed\n");
4461 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4464 DRM_DEBUG_KMS("TEST MISC read failed\n");
4467 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4469 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4471 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4472 case DP_TEST_BIT_DEPTH_6:
4473 intel_dp->compliance.test_data.bpc = 6;
4475 case DP_TEST_BIT_DEPTH_8:
4476 intel_dp->compliance.test_data.bpc = 8;
4482 intel_dp->compliance.test_data.video_pattern = test_pattern;
4483 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4484 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4485 /* Set test active flag here so userspace doesn't interrupt things */
4486 intel_dp->compliance.test_active = 1;
4491 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4493 u8 test_result = DP_TEST_ACK;
4494 struct intel_connector *intel_connector = intel_dp->attached_connector;
4495 struct drm_connector *connector = &intel_connector->base;
4497 if (intel_connector->detect_edid == NULL ||
4498 connector->edid_corrupt ||
4499 intel_dp->aux.i2c_defer_count > 6) {
4500 /* Check EDID read for NACKs, DEFERs and corruption
4501 * (DP CTS 1.2 Core r1.1)
4502 * 4.2.2.4 : Failed EDID read, I2C_NAK
4503 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4504 * 4.2.2.6 : EDID corruption detected
4505 * Use failsafe mode for all cases
4507 if (intel_dp->aux.i2c_nack_count > 0 ||
4508 intel_dp->aux.i2c_defer_count > 0)
4509 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4510 intel_dp->aux.i2c_nack_count,
4511 intel_dp->aux.i2c_defer_count);
4512 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4514 struct edid *block = intel_connector->detect_edid;
4516 /* We have to write the checksum
4517 * of the last block read
4519 block += intel_connector->detect_edid->extensions;
4521 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4522 block->checksum) <= 0)
4523 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4525 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4526 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4529 /* Set test active flag here so userspace doesn't interrupt things */
4530 intel_dp->compliance.test_active = 1;
4535 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4537 u8 test_result = DP_TEST_NAK;
4541 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4543 u8 response = DP_TEST_NAK;
4547 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4549 DRM_DEBUG_KMS("Could not read test request from sink\n");
4554 case DP_TEST_LINK_TRAINING:
4555 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4556 response = intel_dp_autotest_link_training(intel_dp);
4558 case DP_TEST_LINK_VIDEO_PATTERN:
4559 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4560 response = intel_dp_autotest_video_pattern(intel_dp);
4562 case DP_TEST_LINK_EDID_READ:
4563 DRM_DEBUG_KMS("EDID test requested\n");
4564 response = intel_dp_autotest_edid(intel_dp);
4566 case DP_TEST_LINK_PHY_TEST_PATTERN:
4567 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4568 response = intel_dp_autotest_phy_pattern(intel_dp);
4571 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4575 if (response & DP_TEST_ACK)
4576 intel_dp->compliance.test_type = request;
4579 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4581 DRM_DEBUG_KMS("Could not write test response to sink\n");
4585 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4589 if (intel_dp->is_mst) {
4590 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4595 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4596 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4600 /* check link status - esi[10] = 0x200c */
4601 if (intel_dp->active_mst_links > 0 &&
4602 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4603 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4604 intel_dp_start_link_train(intel_dp);
4605 intel_dp_stop_link_train(intel_dp);
4608 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4609 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4612 for (retry = 0; retry < 3; retry++) {
4614 wret = drm_dp_dpcd_write(&intel_dp->aux,
4615 DP_SINK_COUNT_ESI+1,
4622 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4624 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4632 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4633 intel_dp->is_mst = false;
4634 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4642 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4644 u8 link_status[DP_LINK_STATUS_SIZE];
4646 if (!intel_dp->link_trained)
4650 * While PSR source HW is enabled, it will control main-link sending
4651 * frames, enabling and disabling it so trying to do a retrain will fail
4652 * as the link would or not be on or it could mix training patterns
4653 * and frame data at the same time causing retrain to fail.
4654 * Also when exiting PSR, HW will retrain the link anyways fixing
4655 * any link status error.
4657 if (intel_psr_enabled(intel_dp))
4660 if (!intel_dp_get_link_status(intel_dp, link_status))
4664 * Validate the cached values of intel_dp->link_rate and
4665 * intel_dp->lane_count before attempting to retrain.
4667 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4668 intel_dp->lane_count))
4671 /* Retrain if Channel EQ or CR not ok */
4672 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4675 int intel_dp_retrain_link(struct intel_encoder *encoder,
4676 struct drm_modeset_acquire_ctx *ctx)
4678 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4679 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4680 struct intel_connector *connector = intel_dp->attached_connector;
4681 struct drm_connector_state *conn_state;
4682 struct intel_crtc_state *crtc_state;
4683 struct intel_crtc *crtc;
4686 /* FIXME handle the MST connectors as well */
4688 if (!connector || connector->base.status != connector_status_connected)
4691 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4696 conn_state = connector->base.state;
4698 crtc = to_intel_crtc(conn_state->crtc);
4702 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4706 crtc_state = to_intel_crtc_state(crtc->base.state);
4708 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4710 if (!crtc_state->base.active)
4713 if (conn_state->commit &&
4714 !try_wait_for_completion(&conn_state->commit->hw_done))
4717 if (!intel_dp_needs_link_retrain(intel_dp))
4720 /* Suppress underruns caused by re-training */
4721 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4722 if (crtc_state->has_pch_encoder)
4723 intel_set_pch_fifo_underrun_reporting(dev_priv,
4724 intel_crtc_pch_transcoder(crtc), false);
4726 intel_dp_start_link_train(intel_dp);
4727 intel_dp_stop_link_train(intel_dp);
4729 /* Keep underrun reporting disabled until things are stable */
4730 intel_wait_for_vblank(dev_priv, crtc->pipe);
4732 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4733 if (crtc_state->has_pch_encoder)
4734 intel_set_pch_fifo_underrun_reporting(dev_priv,
4735 intel_crtc_pch_transcoder(crtc), true);
4741 * If display is now connected check links status,
4742 * there has been known issues of link loss triggering
4745 * Some sinks (eg. ASUS PB287Q) seem to perform some
4746 * weird HPD ping pong during modesets. So we can apparently
4747 * end up with HPD going low during a modeset, and then
4748 * going back up soon after. And once that happens we must
4749 * retrain the link to get a picture. That's in case no
4750 * userspace component reacted to intermittent HPD dip.
4752 static bool intel_dp_hotplug(struct intel_encoder *encoder,
4753 struct intel_connector *connector)
4755 struct drm_modeset_acquire_ctx ctx;
4759 changed = intel_encoder_hotplug(encoder, connector);
4761 drm_modeset_acquire_init(&ctx, 0);
4764 ret = intel_dp_retrain_link(encoder, &ctx);
4766 if (ret == -EDEADLK) {
4767 drm_modeset_backoff(&ctx);
4774 drm_modeset_drop_locks(&ctx);
4775 drm_modeset_acquire_fini(&ctx);
4776 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4781 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4785 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4788 if (drm_dp_dpcd_readb(&intel_dp->aux,
4789 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4792 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4794 if (val & DP_AUTOMATED_TEST_REQUEST)
4795 intel_dp_handle_test_request(intel_dp);
4797 if (val & DP_CP_IRQ)
4798 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4800 if (val & DP_SINK_SPECIFIC_IRQ)
4801 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4805 * According to DP spec
4808 * 2. Configure link according to Receiver Capabilities
4809 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4810 * 4. Check link status on receipt of hot-plug interrupt
4812 * intel_dp_short_pulse - handles short pulse interrupts
4813 * when full detection is not required.
4814 * Returns %true if short pulse is handled and full detection
4815 * is NOT required and %false otherwise.
4818 intel_dp_short_pulse(struct intel_dp *intel_dp)
4820 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4821 u8 old_sink_count = intel_dp->sink_count;
4825 * Clearing compliance test variables to allow capturing
4826 * of values for next automated test request.
4828 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4831 * Now read the DPCD to see if it's actually running
4832 * If the current value of sink count doesn't match with
4833 * the value that was stored earlier or dpcd read failed
4834 * we need to do full detection
4836 ret = intel_dp_get_dpcd(intel_dp);
4838 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4839 /* No need to proceed if we are going to do full detect */
4843 intel_dp_check_service_irq(intel_dp);
4845 /* Handle CEC interrupts, if any */
4846 drm_dp_cec_irq(&intel_dp->aux);
4848 /* defer to the hotplug work for link retraining if needed */
4849 if (intel_dp_needs_link_retrain(intel_dp))
4852 intel_psr_short_pulse(intel_dp);
4854 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4855 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4856 /* Send a Hotplug Uevent to userspace to start modeset */
4857 drm_kms_helper_hotplug_event(&dev_priv->drm);
4863 /* XXX this is probably wrong for multiple downstream ports */
4864 static enum drm_connector_status
4865 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4867 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4868 u8 *dpcd = intel_dp->dpcd;
4872 lspcon_resume(lspcon);
4874 if (!intel_dp_get_dpcd(intel_dp))
4875 return connector_status_disconnected;
4877 if (intel_dp_is_edp(intel_dp))
4878 return connector_status_connected;
4880 /* if there's no downstream port, we're done */
4881 if (!drm_dp_is_branch(dpcd))
4882 return connector_status_connected;
4884 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4885 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4886 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4888 return intel_dp->sink_count ?
4889 connector_status_connected : connector_status_disconnected;
4892 if (intel_dp_can_mst(intel_dp))
4893 return connector_status_connected;
4895 /* If no HPD, poke DDC gently */
4896 if (drm_probe_ddc(&intel_dp->aux.ddc))
4897 return connector_status_connected;
4899 /* Well we tried, say unknown for unreliable port types */
4900 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4901 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4902 if (type == DP_DS_PORT_TYPE_VGA ||
4903 type == DP_DS_PORT_TYPE_NON_EDID)
4904 return connector_status_unknown;
4906 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4907 DP_DWN_STRM_PORT_TYPE_MASK;
4908 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4909 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4910 return connector_status_unknown;
4913 /* Anything else is out of spec, warn and ignore */
4914 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4915 return connector_status_disconnected;
4918 static enum drm_connector_status
4919 edp_detect(struct intel_dp *intel_dp)
4921 return connector_status_connected;
4924 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
4926 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4929 switch (encoder->hpd_pin) {
4931 bit = SDE_PORTB_HOTPLUG;
4934 bit = SDE_PORTC_HOTPLUG;
4937 bit = SDE_PORTD_HOTPLUG;
4940 MISSING_CASE(encoder->hpd_pin);
4944 return I915_READ(SDEISR) & bit;
4947 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
4949 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4952 switch (encoder->hpd_pin) {
4954 bit = SDE_PORTB_HOTPLUG_CPT;
4957 bit = SDE_PORTC_HOTPLUG_CPT;
4960 bit = SDE_PORTD_HOTPLUG_CPT;
4963 MISSING_CASE(encoder->hpd_pin);
4967 return I915_READ(SDEISR) & bit;
4970 static bool spt_digital_port_connected(struct intel_encoder *encoder)
4972 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4975 switch (encoder->hpd_pin) {
4977 bit = SDE_PORTA_HOTPLUG_SPT;
4980 bit = SDE_PORTE_HOTPLUG_SPT;
4983 return cpt_digital_port_connected(encoder);
4986 return I915_READ(SDEISR) & bit;
4989 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
4991 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4994 switch (encoder->hpd_pin) {
4996 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4999 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5002 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5005 MISSING_CASE(encoder->hpd_pin);
5009 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5012 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5014 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5017 switch (encoder->hpd_pin) {
5019 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5022 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5025 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5028 MISSING_CASE(encoder->hpd_pin);
5032 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5035 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5037 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5039 if (encoder->hpd_pin == HPD_PORT_A)
5040 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5042 return ibx_digital_port_connected(encoder);
5045 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5047 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5049 if (encoder->hpd_pin == HPD_PORT_A)
5050 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5052 return cpt_digital_port_connected(encoder);
5055 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5057 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5059 if (encoder->hpd_pin == HPD_PORT_A)
5060 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5062 return cpt_digital_port_connected(encoder);
5065 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5067 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5069 if (encoder->hpd_pin == HPD_PORT_A)
5070 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5072 return cpt_digital_port_connected(encoder);
5075 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5077 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5080 switch (encoder->hpd_pin) {
5082 bit = BXT_DE_PORT_HP_DDIA;
5085 bit = BXT_DE_PORT_HP_DDIB;
5088 bit = BXT_DE_PORT_HP_DDIC;
5091 MISSING_CASE(encoder->hpd_pin);
5095 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5098 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5099 struct intel_digital_port *intel_dig_port)
5101 enum port port = intel_dig_port->base.port;
5103 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5106 static const char *tc_type_name(enum tc_port_type type)
5108 static const char * const names[] = {
5109 [TC_PORT_UNKNOWN] = "unknown",
5110 [TC_PORT_LEGACY] = "legacy",
5111 [TC_PORT_TYPEC] = "typec",
5112 [TC_PORT_TBT] = "tbt",
5115 if (WARN_ON(type >= ARRAY_SIZE(names)))
5116 type = TC_PORT_UNKNOWN;
5121 static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5122 struct intel_digital_port *intel_dig_port,
5123 bool is_legacy, bool is_typec, bool is_tbt)
5125 enum port port = intel_dig_port->base.port;
5126 enum tc_port_type old_type = intel_dig_port->tc_type;
5128 WARN_ON(is_legacy + is_typec + is_tbt != 1);
5131 intel_dig_port->tc_type = TC_PORT_LEGACY;
5133 intel_dig_port->tc_type = TC_PORT_TYPEC;
5135 intel_dig_port->tc_type = TC_PORT_TBT;
5139 /* Types are not supposed to be changed at runtime. */
5140 WARN_ON(old_type != TC_PORT_UNKNOWN &&
5141 old_type != intel_dig_port->tc_type);
5143 if (old_type != intel_dig_port->tc_type)
5144 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
5145 tc_type_name(intel_dig_port->tc_type));
5149 * This function implements the first part of the Connect Flow described by our
5150 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5151 * lanes, EDID, etc) is done as needed in the typical places.
5153 * Unlike the other ports, type-C ports are not available to use as soon as we
5154 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5155 * display, USB, etc. As a result, handshaking through FIA is required around
5156 * connect and disconnect to cleanly transfer ownership with the controller and
5157 * set the type-C power state.
5159 * We could opt to only do the connect flow when we actually try to use the AUX
5160 * channels or do a modeset, then immediately run the disconnect flow after
5161 * usage, but there are some implications on this for a dynamic environment:
5162 * things may go away or change behind our backs. So for now our driver is
5163 * always trying to acquire ownership of the controller as soon as it gets an
5164 * interrupt (or polls state and sees a port is connected) and only gives it
5165 * back when it sees a disconnect. Implementation of a more fine-grained model
5166 * will require a lot of coordination with user space and thorough testing for
5167 * the extra possible cases.
5169 static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5170 struct intel_digital_port *dig_port)
5172 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5175 if (dig_port->tc_type != TC_PORT_LEGACY &&
5176 dig_port->tc_type != TC_PORT_TYPEC)
5179 val = I915_READ(PORT_TX_DFLEXDPPMS);
5180 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5181 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
5182 WARN_ON(dig_port->tc_legacy_port);
5187 * This function may be called many times in a row without an HPD event
5188 * in between, so try to avoid the write when we can.
5190 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5191 if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
5192 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5193 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5197 * Now we have to re-check the live state, in case the port recently
5198 * became disconnected. Not necessary for legacy mode.
5200 if (dig_port->tc_type == TC_PORT_TYPEC &&
5201 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
5202 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
5203 icl_tc_phy_disconnect(dev_priv, dig_port);
5211 * See the comment at the connect function. This implements the Disconnect
5214 void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5215 struct intel_digital_port *dig_port)
5217 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5219 if (dig_port->tc_type == TC_PORT_UNKNOWN)
5223 * TBT disconnection flow is read the live status, what was done in
5226 if (dig_port->tc_type == TC_PORT_TYPEC ||
5227 dig_port->tc_type == TC_PORT_LEGACY) {
5230 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5231 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5232 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5235 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5236 port_name(dig_port->base.port),
5237 tc_type_name(dig_port->tc_type));
5239 dig_port->tc_type = TC_PORT_UNKNOWN;
5243 * The type-C ports are different because even when they are connected, they may
5244 * not be available/usable by the graphics driver: see the comment on
5245 * icl_tc_phy_connect(). So in our driver instead of adding the additional
5246 * concept of "usable" and make everything check for "connected and usable" we
5247 * define a port as "connected" when it is not only connected, but also when it
5248 * is usable by the rest of the driver. That maintains the old assumption that
5249 * connected ports are usable, and avoids exposing to the users objects they
5252 static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5253 struct intel_digital_port *intel_dig_port)
5255 enum port port = intel_dig_port->base.port;
5256 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
5257 bool is_legacy, is_typec, is_tbt;
5261 * WARN if we got a legacy port HPD, but VBT didn't mark the port as
5262 * legacy. Treat the port as legacy from now on.
5264 if (WARN_ON(!intel_dig_port->tc_legacy_port &&
5265 I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
5266 intel_dig_port->tc_legacy_port = true;
5267 is_legacy = intel_dig_port->tc_legacy_port;
5270 * The spec says we shouldn't be using the ISR bits for detecting
5271 * between TC and TBT. We should use DFLEXDPSP.
5273 dpsp = I915_READ(PORT_TX_DFLEXDPSP);
5274 is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
5275 is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
5277 if (!is_legacy && !is_typec && !is_tbt) {
5278 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
5283 icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
5286 if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
5292 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5294 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5295 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5297 if (intel_port_is_combophy(dev_priv, encoder->port))
5298 return icl_combo_port_connected(dev_priv, dig_port);
5299 else if (intel_port_is_tc(dev_priv, encoder->port))
5300 return icl_tc_port_connected(dev_priv, dig_port);
5302 MISSING_CASE(encoder->hpd_pin);
5308 * intel_digital_port_connected - is the specified port connected?
5309 * @encoder: intel_encoder
5311 * In cases where there's a connector physically connected but it can't be used
5312 * by our hardware we also return false, since the rest of the driver should
5313 * pretty much treat the port as disconnected. This is relevant for type-C
5314 * (starting on ICL) where there's ownership involved.
5316 * Return %true if port is connected, %false otherwise.
5318 bool intel_digital_port_connected(struct intel_encoder *encoder)
5320 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5322 if (HAS_GMCH(dev_priv)) {
5323 if (IS_GM45(dev_priv))
5324 return gm45_digital_port_connected(encoder);
5326 return g4x_digital_port_connected(encoder);
5329 if (INTEL_GEN(dev_priv) >= 11)
5330 return icl_digital_port_connected(encoder);
5331 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5332 return spt_digital_port_connected(encoder);
5333 else if (IS_GEN9_LP(dev_priv))
5334 return bxt_digital_port_connected(encoder);
5335 else if (IS_GEN(dev_priv, 8))
5336 return bdw_digital_port_connected(encoder);
5337 else if (IS_GEN(dev_priv, 7))
5338 return ivb_digital_port_connected(encoder);
5339 else if (IS_GEN(dev_priv, 6))
5340 return snb_digital_port_connected(encoder);
5341 else if (IS_GEN(dev_priv, 5))
5342 return ilk_digital_port_connected(encoder);
5344 MISSING_CASE(INTEL_GEN(dev_priv));
5348 static struct edid *
5349 intel_dp_get_edid(struct intel_dp *intel_dp)
5351 struct intel_connector *intel_connector = intel_dp->attached_connector;
5353 /* use cached edid if we have one */
5354 if (intel_connector->edid) {
5356 if (IS_ERR(intel_connector->edid))
5359 return drm_edid_duplicate(intel_connector->edid);
5361 return drm_get_edid(&intel_connector->base,
5362 &intel_dp->aux.ddc);
5366 intel_dp_set_edid(struct intel_dp *intel_dp)
5368 struct intel_connector *intel_connector = intel_dp->attached_connector;
5371 intel_dp_unset_edid(intel_dp);
5372 edid = intel_dp_get_edid(intel_dp);
5373 intel_connector->detect_edid = edid;
5375 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5376 drm_dp_cec_set_edid(&intel_dp->aux, edid);
5380 intel_dp_unset_edid(struct intel_dp *intel_dp)
5382 struct intel_connector *intel_connector = intel_dp->attached_connector;
5384 drm_dp_cec_unset_edid(&intel_dp->aux);
5385 kfree(intel_connector->detect_edid);
5386 intel_connector->detect_edid = NULL;
5388 intel_dp->has_audio = false;
5392 intel_dp_detect(struct drm_connector *connector,
5393 struct drm_modeset_acquire_ctx *ctx,
5396 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5397 struct intel_dp *intel_dp = intel_attached_dp(connector);
5398 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5399 struct intel_encoder *encoder = &dig_port->base;
5400 enum drm_connector_status status;
5401 enum intel_display_power_domain aux_domain =
5402 intel_aux_power_domain(dig_port);
5403 intel_wakeref_t wakeref;
5405 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5406 connector->base.id, connector->name);
5407 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5409 wakeref = intel_display_power_get(dev_priv, aux_domain);
5411 /* Can't disconnect eDP */
5412 if (intel_dp_is_edp(intel_dp))
5413 status = edp_detect(intel_dp);
5414 else if (intel_digital_port_connected(encoder))
5415 status = intel_dp_detect_dpcd(intel_dp);
5417 status = connector_status_disconnected;
5419 if (status == connector_status_disconnected) {
5420 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5421 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5423 if (intel_dp->is_mst) {
5424 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5426 intel_dp->mst_mgr.mst_state);
5427 intel_dp->is_mst = false;
5428 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5435 if (intel_dp->reset_link_params) {
5436 /* Initial max link lane count */
5437 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5439 /* Initial max link rate */
5440 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5442 intel_dp->reset_link_params = false;
5445 intel_dp_print_rates(intel_dp);
5447 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5448 if (INTEL_GEN(dev_priv) >= 11)
5449 intel_dp_get_dsc_sink_cap(intel_dp);
5451 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5452 drm_dp_is_branch(intel_dp->dpcd));
5454 intel_dp_configure_mst(intel_dp);
5456 if (intel_dp->is_mst) {
5458 * If we are in MST mode then this connector
5459 * won't appear connected or have anything
5462 status = connector_status_disconnected;
5467 * Some external monitors do not signal loss of link synchronization
5468 * with an IRQ_HPD, so force a link status check.
5470 if (!intel_dp_is_edp(intel_dp)) {
5473 ret = intel_dp_retrain_link(encoder, ctx);
5475 intel_display_power_put(dev_priv, aux_domain, wakeref);
5481 * Clearing NACK and defer counts to get their exact values
5482 * while reading EDID which are required by Compliance tests
5483 * 4.2.2.4 and 4.2.2.5
5485 intel_dp->aux.i2c_nack_count = 0;
5486 intel_dp->aux.i2c_defer_count = 0;
5488 intel_dp_set_edid(intel_dp);
5489 if (intel_dp_is_edp(intel_dp) ||
5490 to_intel_connector(connector)->detect_edid)
5491 status = connector_status_connected;
5493 intel_dp_check_service_irq(intel_dp);
5496 if (status != connector_status_connected && !intel_dp->is_mst)
5497 intel_dp_unset_edid(intel_dp);
5499 intel_display_power_put(dev_priv, aux_domain, wakeref);
5504 intel_dp_force(struct drm_connector *connector)
5506 struct intel_dp *intel_dp = intel_attached_dp(connector);
5507 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5508 struct intel_encoder *intel_encoder = &dig_port->base;
5509 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5510 enum intel_display_power_domain aux_domain =
5511 intel_aux_power_domain(dig_port);
5512 intel_wakeref_t wakeref;
5514 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5515 connector->base.id, connector->name);
5516 intel_dp_unset_edid(intel_dp);
5518 if (connector->status != connector_status_connected)
5521 wakeref = intel_display_power_get(dev_priv, aux_domain);
5523 intel_dp_set_edid(intel_dp);
5525 intel_display_power_put(dev_priv, aux_domain, wakeref);
5528 static int intel_dp_get_modes(struct drm_connector *connector)
5530 struct intel_connector *intel_connector = to_intel_connector(connector);
5533 edid = intel_connector->detect_edid;
5535 int ret = intel_connector_update_modes(connector, edid);
5540 /* if eDP has no EDID, fall back to fixed mode */
5541 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5542 intel_connector->panel.fixed_mode) {
5543 struct drm_display_mode *mode;
5545 mode = drm_mode_duplicate(connector->dev,
5546 intel_connector->panel.fixed_mode);
5548 drm_mode_probed_add(connector, mode);
5557 intel_dp_connector_register(struct drm_connector *connector)
5559 struct intel_dp *intel_dp = intel_attached_dp(connector);
5560 struct drm_device *dev = connector->dev;
5563 ret = intel_connector_register(connector);
5567 i915_debugfs_connector_add(connector);
5569 DRM_DEBUG_KMS("registering %s bus for %s\n",
5570 intel_dp->aux.name, connector->kdev->kobj.name);
5572 intel_dp->aux.dev = connector->kdev;
5573 ret = drm_dp_aux_register(&intel_dp->aux);
5575 drm_dp_cec_register_connector(&intel_dp->aux,
5576 connector->name, dev->dev);
5581 intel_dp_connector_unregister(struct drm_connector *connector)
5583 struct intel_dp *intel_dp = intel_attached_dp(connector);
5585 drm_dp_cec_unregister_connector(&intel_dp->aux);
5586 drm_dp_aux_unregister(&intel_dp->aux);
5587 intel_connector_unregister(connector);
5590 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5592 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5593 struct intel_dp *intel_dp = &intel_dig_port->dp;
5595 intel_dp_mst_encoder_cleanup(intel_dig_port);
5596 if (intel_dp_is_edp(intel_dp)) {
5597 intel_wakeref_t wakeref;
5599 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5601 * vdd might still be enabled do to the delayed vdd off.
5602 * Make sure vdd is actually turned off here.
5604 with_pps_lock(intel_dp, wakeref)
5605 edp_panel_vdd_off_sync(intel_dp);
5607 if (intel_dp->edp_notifier.notifier_call) {
5608 unregister_reboot_notifier(&intel_dp->edp_notifier);
5609 intel_dp->edp_notifier.notifier_call = NULL;
5613 intel_dp_aux_fini(intel_dp);
5616 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5618 intel_dp_encoder_flush_work(encoder);
5620 drm_encoder_cleanup(encoder);
5621 kfree(enc_to_dig_port(encoder));
5624 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5626 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5627 intel_wakeref_t wakeref;
5629 if (!intel_dp_is_edp(intel_dp))
5633 * vdd might still be enabled do to the delayed vdd off.
5634 * Make sure vdd is actually turned off here.
5636 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5637 with_pps_lock(intel_dp, wakeref)
5638 edp_panel_vdd_off_sync(intel_dp);
5641 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5645 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5646 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5647 msecs_to_jiffies(timeout));
5650 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5654 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5657 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5658 static const struct drm_dp_aux_msg msg = {
5659 .request = DP_AUX_NATIVE_WRITE,
5660 .address = DP_AUX_HDCP_AKSV,
5661 .size = DRM_HDCP_KSV_LEN,
5663 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5667 /* Output An first, that's easy */
5668 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5669 an, DRM_HDCP_AN_LEN);
5670 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5671 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5673 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5677 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5678 * order to get it on the wire, we need to create the AUX header as if
5679 * we were writing the data, and then tickle the hardware to output the
5680 * data once the header is sent out.
5682 intel_dp_aux_header(txbuf, &msg);
5684 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5685 rxbuf, sizeof(rxbuf),
5686 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5688 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5690 } else if (ret == 0) {
5691 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5695 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5696 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5697 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5704 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5708 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5710 if (ret != DRM_HDCP_KSV_LEN) {
5711 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5712 return ret >= 0 ? -EIO : ret;
5717 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5722 * For some reason the HDMI and DP HDCP specs call this register
5723 * definition by different names. In the HDMI spec, it's called BSTATUS,
5724 * but in DP it's called BINFO.
5726 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5727 bstatus, DRM_HDCP_BSTATUS_LEN);
5728 if (ret != DRM_HDCP_BSTATUS_LEN) {
5729 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5730 return ret >= 0 ? -EIO : ret;
5736 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5741 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5744 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5745 return ret >= 0 ? -EIO : ret;
5752 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5753 bool *repeater_present)
5758 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5762 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5767 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5771 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5772 ri_prime, DRM_HDCP_RI_LEN);
5773 if (ret != DRM_HDCP_RI_LEN) {
5774 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5775 return ret >= 0 ? -EIO : ret;
5781 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5786 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5789 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5790 return ret >= 0 ? -EIO : ret;
5792 *ksv_ready = bstatus & DP_BSTATUS_READY;
5797 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5798 int num_downstream, u8 *ksv_fifo)
5803 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5804 for (i = 0; i < num_downstream; i += 3) {
5805 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5806 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5807 DP_AUX_HDCP_KSV_FIFO,
5808 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5811 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5813 return ret >= 0 ? -EIO : ret;
5820 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5825 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5828 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5829 DP_AUX_HDCP_V_PRIME(i), part,
5830 DRM_HDCP_V_PRIME_PART_LEN);
5831 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5832 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5833 return ret >= 0 ? -EIO : ret;
5839 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5842 /* Not used for single stream DisplayPort setups */
5847 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5852 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5855 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5859 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5863 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5869 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5873 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5877 struct hdcp2_dp_errata_stream_type {
5882 static struct hdcp2_dp_msg_data {
5885 bool msg_detectable;
5887 u32 timeout2; /* Added for non_paired situation */
5888 } hdcp2_msg_data[] = {
5889 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
5890 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5891 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
5892 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5894 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5896 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5897 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5898 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
5899 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
5900 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5901 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
5902 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
5903 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5904 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
5905 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5907 {HDCP_2_2_REP_SEND_RECVID_LIST,
5908 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5909 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
5910 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5912 {HDCP_2_2_REP_STREAM_MANAGE,
5913 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5915 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5916 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
5917 /* local define to shovel this through the write_2_2 interface */
5918 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
5919 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5920 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5925 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5930 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5931 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5932 HDCP_2_2_DP_RXSTATUS_LEN);
5933 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5934 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5935 return ret >= 0 ? -EIO : ret;
5942 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5943 u8 msg_id, bool *msg_ready)
5949 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5954 case HDCP_2_2_AKE_SEND_HPRIME:
5955 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5958 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5959 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5962 case HDCP_2_2_REP_SEND_RECVID_LIST:
5963 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5967 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5975 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5976 struct hdcp2_dp_msg_data *hdcp2_msg_data)
5978 struct intel_dp *dp = &intel_dig_port->dp;
5979 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5980 u8 msg_id = hdcp2_msg_data->msg_id;
5982 bool msg_ready = false;
5984 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5985 timeout = hdcp2_msg_data->timeout2;
5987 timeout = hdcp2_msg_data->timeout;
5990 * There is no way to detect the CERT, LPRIME and STREAM_READY
5991 * availability. So Wait for timeout and read the msg.
5993 if (!hdcp2_msg_data->msg_detectable) {
5998 * As we want to check the msg availability at timeout, Ignoring
5999 * the timeout at wait for CP_IRQ.
6001 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6002 ret = hdcp2_detect_msg_availability(intel_dig_port,
6003 msg_id, &msg_ready);
6009 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6010 hdcp2_msg_data->msg_id, ret, timeout);
6015 static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6019 for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
6020 if (hdcp2_msg_data[i].msg_id == msg_id)
6021 return &hdcp2_msg_data[i];
6027 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6028 void *buf, size_t size)
6030 struct intel_dp *dp = &intel_dig_port->dp;
6031 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6032 unsigned int offset;
6034 ssize_t ret, bytes_to_write, len;
6035 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6037 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6038 if (!hdcp2_msg_data)
6041 offset = hdcp2_msg_data->offset;
6043 /* No msg_id in DP HDCP2.2 msgs */
6044 bytes_to_write = size - 1;
6047 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6049 while (bytes_to_write) {
6050 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6051 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6053 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6054 offset, (void *)byte, len);
6058 bytes_to_write -= ret;
6067 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6069 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6073 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6074 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6075 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6076 if (ret != HDCP_2_2_RXINFO_LEN)
6077 return ret >= 0 ? -EIO : ret;
6079 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6080 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6082 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6083 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6085 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6086 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6087 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6093 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6094 u8 msg_id, void *buf, size_t size)
6096 unsigned int offset;
6098 ssize_t ret, bytes_to_recv, len;
6099 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6101 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6102 if (!hdcp2_msg_data)
6104 offset = hdcp2_msg_data->offset;
6106 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6110 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6111 ret = get_receiver_id_list_size(intel_dig_port);
6117 bytes_to_recv = size - 1;
6119 /* DP adaptation msgs has no msg_id */
6122 while (bytes_to_recv) {
6123 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6124 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6126 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6129 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6133 bytes_to_recv -= ret;
6144 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6145 bool is_repeater, u8 content_type)
6147 struct hdcp2_dp_errata_stream_type stream_type_msg;
6153 * Errata for DP: As Stream type is used for encryption, Receiver
6154 * should be communicated with stream type for the decryption of the
6156 * Repeater will be communicated with stream type as a part of it's
6157 * auth later in time.
6159 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6160 stream_type_msg.stream_type = content_type;
6162 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6163 sizeof(stream_type_msg));
6167 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6172 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6176 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6177 ret = HDCP_REAUTH_REQUEST;
6178 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6179 ret = HDCP_LINK_INTEGRITY_FAILURE;
6180 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6181 ret = HDCP_TOPOLOGY_CHANGE;
6187 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6194 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6195 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6196 rx_caps, HDCP_2_2_RXCAPS_LEN);
6197 if (ret != HDCP_2_2_RXCAPS_LEN)
6198 return ret >= 0 ? -EIO : ret;
6200 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6201 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6207 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6208 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6209 .read_bksv = intel_dp_hdcp_read_bksv,
6210 .read_bstatus = intel_dp_hdcp_read_bstatus,
6211 .repeater_present = intel_dp_hdcp_repeater_present,
6212 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6213 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6214 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6215 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6216 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6217 .check_link = intel_dp_hdcp_check_link,
6218 .hdcp_capable = intel_dp_hdcp_capable,
6219 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6220 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6221 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6222 .check_2_2_link = intel_dp_hdcp2_check_link,
6223 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6224 .protocol = HDCP_PROTOCOL_DP,
6227 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6229 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6230 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6232 lockdep_assert_held(&dev_priv->pps_mutex);
6234 if (!edp_have_panel_vdd(intel_dp))
6238 * The VDD bit needs a power domain reference, so if the bit is
6239 * already enabled when we boot or resume, grab this reference and
6240 * schedule a vdd off, so we don't hold on to the reference
6243 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6244 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6246 edp_panel_vdd_schedule_off(intel_dp);
6249 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6251 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6252 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6255 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6256 encoder->port, &pipe))
6259 return INVALID_PIPE;
6262 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6264 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6265 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6266 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6267 intel_wakeref_t wakeref;
6269 if (!HAS_DDI(dev_priv))
6270 intel_dp->DP = I915_READ(intel_dp->output_reg);
6273 lspcon_resume(lspcon);
6275 intel_dp->reset_link_params = true;
6277 with_pps_lock(intel_dp, wakeref) {
6278 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6279 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6281 if (intel_dp_is_edp(intel_dp)) {
6283 * Reinit the power sequencer, in case BIOS did
6284 * something nasty with it.
6286 intel_dp_pps_init(intel_dp);
6287 intel_edp_panel_vdd_sanitize(intel_dp);
6292 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6293 .force = intel_dp_force,
6294 .fill_modes = drm_helper_probe_single_connector_modes,
6295 .atomic_get_property = intel_digital_connector_atomic_get_property,
6296 .atomic_set_property = intel_digital_connector_atomic_set_property,
6297 .late_register = intel_dp_connector_register,
6298 .early_unregister = intel_dp_connector_unregister,
6299 .destroy = intel_connector_destroy,
6300 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6301 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6304 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6305 .detect_ctx = intel_dp_detect,
6306 .get_modes = intel_dp_get_modes,
6307 .mode_valid = intel_dp_mode_valid,
6308 .atomic_check = intel_digital_connector_atomic_check,
6311 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6312 .reset = intel_dp_encoder_reset,
6313 .destroy = intel_dp_encoder_destroy,
6317 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6319 struct intel_dp *intel_dp = &intel_dig_port->dp;
6320 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6321 enum irqreturn ret = IRQ_NONE;
6322 intel_wakeref_t wakeref;
6324 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6326 * vdd off can generate a long pulse on eDP which
6327 * would require vdd on to handle it, and thus we
6328 * would end up in an endless cycle of
6329 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6331 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6332 port_name(intel_dig_port->base.port));
6336 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6337 port_name(intel_dig_port->base.port),
6338 long_hpd ? "long" : "short");
6341 intel_dp->reset_link_params = true;
6345 wakeref = intel_display_power_get(dev_priv,
6346 intel_aux_power_domain(intel_dig_port));
6348 if (intel_dp->is_mst) {
6349 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6351 * If we were in MST mode, and device is not
6352 * there, get out of MST mode
6354 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6355 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6356 intel_dp->is_mst = false;
6357 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6363 if (!intel_dp->is_mst) {
6366 handled = intel_dp_short_pulse(intel_dp);
6375 intel_display_power_put(dev_priv,
6376 intel_aux_power_domain(intel_dig_port),
6382 /* check the VBT to see whether the eDP is on another port */
6383 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6386 * eDP not supported on g4x. so bail out early just
6387 * for a bit extra safety in case the VBT is bonkers.
6389 if (INTEL_GEN(dev_priv) < 5)
6392 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6395 return intel_bios_is_port_edp(dev_priv, port);
6399 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6401 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6402 enum port port = dp_to_dig_port(intel_dp)->base.port;
6404 if (!IS_G4X(dev_priv) && port != PORT_A)
6405 intel_attach_force_audio_property(connector);
6407 intel_attach_broadcast_rgb_property(connector);
6408 if (HAS_GMCH(dev_priv))
6409 drm_connector_attach_max_bpc_property(connector, 6, 10);
6410 else if (INTEL_GEN(dev_priv) >= 5)
6411 drm_connector_attach_max_bpc_property(connector, 6, 12);
6413 if (intel_dp_is_edp(intel_dp)) {
6414 u32 allowed_scalers;
6416 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6417 if (!HAS_GMCH(dev_priv))
6418 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6420 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6422 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6427 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6429 intel_dp->panel_power_off_time = ktime_get_boottime();
6430 intel_dp->last_power_on = jiffies;
6431 intel_dp->last_backlight_off = jiffies;
6435 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6437 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6438 u32 pp_on, pp_off, pp_ctl;
6439 struct pps_registers regs;
6441 intel_pps_get_registers(intel_dp, ®s);
6443 pp_ctl = ironlake_get_pp_control(intel_dp);
6445 /* Ensure PPS is unlocked */
6446 if (!HAS_DDI(dev_priv))
6447 I915_WRITE(regs.pp_ctrl, pp_ctl);
6449 pp_on = I915_READ(regs.pp_on);
6450 pp_off = I915_READ(regs.pp_off);
6452 /* Pull timing values out of registers */
6453 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6454 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6455 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6456 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6458 if (i915_mmio_reg_valid(regs.pp_div)) {
6461 pp_div = I915_READ(regs.pp_div);
6463 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6465 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6470 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6472 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6474 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6478 intel_pps_verify_state(struct intel_dp *intel_dp)
6480 struct edp_power_seq hw;
6481 struct edp_power_seq *sw = &intel_dp->pps_delays;
6483 intel_pps_readout_hw_state(intel_dp, &hw);
6485 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6486 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6487 DRM_ERROR("PPS state mismatch\n");
6488 intel_pps_dump_state("sw", sw);
6489 intel_pps_dump_state("hw", &hw);
6494 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6496 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6497 struct edp_power_seq cur, vbt, spec,
6498 *final = &intel_dp->pps_delays;
6500 lockdep_assert_held(&dev_priv->pps_mutex);
6502 /* already initialized? */
6503 if (final->t11_t12 != 0)
6506 intel_pps_readout_hw_state(intel_dp, &cur);
6508 intel_pps_dump_state("cur", &cur);
6510 vbt = dev_priv->vbt.edp.pps;
6511 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6512 * of 500ms appears to be too short. Ocassionally the panel
6513 * just fails to power back on. Increasing the delay to 800ms
6514 * seems sufficient to avoid this problem.
6516 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6517 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6518 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6521 /* T11_T12 delay is special and actually in units of 100ms, but zero
6522 * based in the hw (so we need to add 100 ms). But the sw vbt
6523 * table multiplies it with 1000 to make it in units of 100usec,
6525 vbt.t11_t12 += 100 * 10;
6527 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6528 * our hw here, which are all in 100usec. */
6529 spec.t1_t3 = 210 * 10;
6530 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6531 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6532 spec.t10 = 500 * 10;
6533 /* This one is special and actually in units of 100ms, but zero
6534 * based in the hw (so we need to add 100 ms). But the sw vbt
6535 * table multiplies it with 1000 to make it in units of 100usec,
6537 spec.t11_t12 = (510 + 100) * 10;
6539 intel_pps_dump_state("vbt", &vbt);
6541 /* Use the max of the register settings and vbt. If both are
6542 * unset, fall back to the spec limits. */
6543 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6545 max(cur.field, vbt.field))
6546 assign_final(t1_t3);
6550 assign_final(t11_t12);
6553 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6554 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6555 intel_dp->backlight_on_delay = get_delay(t8);
6556 intel_dp->backlight_off_delay = get_delay(t9);
6557 intel_dp->panel_power_down_delay = get_delay(t10);
6558 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6561 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6562 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6563 intel_dp->panel_power_cycle_delay);
6565 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6566 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6569 * We override the HW backlight delays to 1 because we do manual waits
6570 * on them. For T8, even BSpec recommends doing it. For T9, if we
6571 * don't do this, we'll end up waiting for the backlight off delay
6572 * twice: once when we do the manual sleep, and once when we disable
6573 * the panel and wait for the PP_STATUS bit to become zero.
6579 * HW has only a 100msec granularity for t11_t12 so round it up
6582 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6586 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6587 bool force_disable_vdd)
6589 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6590 u32 pp_on, pp_off, port_sel = 0;
6591 int div = dev_priv->rawclk_freq / 1000;
6592 struct pps_registers regs;
6593 enum port port = dp_to_dig_port(intel_dp)->base.port;
6594 const struct edp_power_seq *seq = &intel_dp->pps_delays;
6596 lockdep_assert_held(&dev_priv->pps_mutex);
6598 intel_pps_get_registers(intel_dp, ®s);
6601 * On some VLV machines the BIOS can leave the VDD
6602 * enabled even on power sequencers which aren't
6603 * hooked up to any port. This would mess up the
6604 * power domain tracking the first time we pick
6605 * one of these power sequencers for use since
6606 * edp_panel_vdd_on() would notice that the VDD was
6607 * already on and therefore wouldn't grab the power
6608 * domain reference. Disable VDD first to avoid this.
6609 * This also avoids spuriously turning the VDD on as
6610 * soon as the new power sequencer gets initialized.
6612 if (force_disable_vdd) {
6613 u32 pp = ironlake_get_pp_control(intel_dp);
6615 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6617 if (pp & EDP_FORCE_VDD)
6618 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6620 pp &= ~EDP_FORCE_VDD;
6622 I915_WRITE(regs.pp_ctrl, pp);
6625 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6626 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6627 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6628 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6630 /* Haswell doesn't have any port selection bits for the panel
6631 * power sequencer any more. */
6632 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6633 port_sel = PANEL_PORT_SELECT_VLV(port);
6634 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6637 port_sel = PANEL_PORT_SELECT_DPA;
6640 port_sel = PANEL_PORT_SELECT_DPC;
6643 port_sel = PANEL_PORT_SELECT_DPD;
6653 I915_WRITE(regs.pp_on, pp_on);
6654 I915_WRITE(regs.pp_off, pp_off);
6657 * Compute the divisor for the pp clock, simply match the Bspec formula.
6659 if (i915_mmio_reg_valid(regs.pp_div)) {
6660 I915_WRITE(regs.pp_div,
6661 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6662 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6666 pp_ctl = I915_READ(regs.pp_ctrl);
6667 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6668 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6669 I915_WRITE(regs.pp_ctrl, pp_ctl);
6672 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6673 I915_READ(regs.pp_on),
6674 I915_READ(regs.pp_off),
6675 i915_mmio_reg_valid(regs.pp_div) ?
6676 I915_READ(regs.pp_div) :
6677 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6680 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6682 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6684 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6685 vlv_initial_power_sequencer_setup(intel_dp);
6687 intel_dp_init_panel_power_sequencer(intel_dp);
6688 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6693 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6694 * @dev_priv: i915 device
6695 * @crtc_state: a pointer to the active intel_crtc_state
6696 * @refresh_rate: RR to be programmed
6698 * This function gets called when refresh rate (RR) has to be changed from
6699 * one frequency to another. Switches can be between high and low RR
6700 * supported by the panel or to any other RR based on media playback (in
6701 * this case, RR value needs to be passed from user space).
6703 * The caller of this function needs to take a lock on dev_priv->drrs.
6705 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6706 const struct intel_crtc_state *crtc_state,
6709 struct intel_encoder *encoder;
6710 struct intel_digital_port *dig_port = NULL;
6711 struct intel_dp *intel_dp = dev_priv->drrs.dp;
6712 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6713 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6715 if (refresh_rate <= 0) {
6716 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6720 if (intel_dp == NULL) {
6721 DRM_DEBUG_KMS("DRRS not supported.\n");
6725 dig_port = dp_to_dig_port(intel_dp);
6726 encoder = &dig_port->base;
6729 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6733 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6734 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6738 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6740 index = DRRS_LOW_RR;
6742 if (index == dev_priv->drrs.refresh_rate_type) {
6744 "DRRS requested for previously set RR...ignoring\n");
6748 if (!crtc_state->base.active) {
6749 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6753 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6756 intel_dp_set_m_n(crtc_state, M1_N1);
6759 intel_dp_set_m_n(crtc_state, M2_N2);
6763 DRM_ERROR("Unsupported refreshrate type\n");
6765 } else if (INTEL_GEN(dev_priv) > 6) {
6766 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6769 val = I915_READ(reg);
6770 if (index > DRRS_HIGH_RR) {
6771 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6772 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6774 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6776 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6777 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6779 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6781 I915_WRITE(reg, val);
6784 dev_priv->drrs.refresh_rate_type = index;
6786 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6790 * intel_edp_drrs_enable - init drrs struct if supported
6791 * @intel_dp: DP struct
6792 * @crtc_state: A pointer to the active crtc state.
6794 * Initializes frontbuffer_bits and drrs.dp
6796 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6797 const struct intel_crtc_state *crtc_state)
6799 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6801 if (!crtc_state->has_drrs) {
6802 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6806 if (dev_priv->psr.enabled) {
6807 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6811 mutex_lock(&dev_priv->drrs.mutex);
6812 if (dev_priv->drrs.dp) {
6813 DRM_DEBUG_KMS("DRRS already enabled\n");
6817 dev_priv->drrs.busy_frontbuffer_bits = 0;
6819 dev_priv->drrs.dp = intel_dp;
6822 mutex_unlock(&dev_priv->drrs.mutex);
6826 * intel_edp_drrs_disable - Disable DRRS
6827 * @intel_dp: DP struct
6828 * @old_crtc_state: Pointer to old crtc_state.
6831 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6832 const struct intel_crtc_state *old_crtc_state)
6834 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6836 if (!old_crtc_state->has_drrs)
6839 mutex_lock(&dev_priv->drrs.mutex);
6840 if (!dev_priv->drrs.dp) {
6841 mutex_unlock(&dev_priv->drrs.mutex);
6845 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6846 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6847 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6849 dev_priv->drrs.dp = NULL;
6850 mutex_unlock(&dev_priv->drrs.mutex);
6852 cancel_delayed_work_sync(&dev_priv->drrs.work);
6855 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6857 struct drm_i915_private *dev_priv =
6858 container_of(work, typeof(*dev_priv), drrs.work.work);
6859 struct intel_dp *intel_dp;
6861 mutex_lock(&dev_priv->drrs.mutex);
6863 intel_dp = dev_priv->drrs.dp;
6869 * The delayed work can race with an invalidate hence we need to
6873 if (dev_priv->drrs.busy_frontbuffer_bits)
6876 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6877 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6879 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6880 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6884 mutex_unlock(&dev_priv->drrs.mutex);
6888 * intel_edp_drrs_invalidate - Disable Idleness DRRS
6889 * @dev_priv: i915 device
6890 * @frontbuffer_bits: frontbuffer plane tracking bits
6892 * This function gets called everytime rendering on the given planes start.
6893 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6895 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6897 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6898 unsigned int frontbuffer_bits)
6900 struct drm_crtc *crtc;
6903 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6906 cancel_delayed_work(&dev_priv->drrs.work);
6908 mutex_lock(&dev_priv->drrs.mutex);
6909 if (!dev_priv->drrs.dp) {
6910 mutex_unlock(&dev_priv->drrs.mutex);
6914 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6915 pipe = to_intel_crtc(crtc)->pipe;
6917 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6918 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6920 /* invalidate means busy screen hence upclock */
6921 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6922 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6923 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6925 mutex_unlock(&dev_priv->drrs.mutex);
6929 * intel_edp_drrs_flush - Restart Idleness DRRS
6930 * @dev_priv: i915 device
6931 * @frontbuffer_bits: frontbuffer plane tracking bits
6933 * This function gets called every time rendering on the given planes has
6934 * completed or flip on a crtc is completed. So DRRS should be upclocked
6935 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6936 * if no other planes are dirty.
6938 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6940 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6941 unsigned int frontbuffer_bits)
6943 struct drm_crtc *crtc;
6946 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6949 cancel_delayed_work(&dev_priv->drrs.work);
6951 mutex_lock(&dev_priv->drrs.mutex);
6952 if (!dev_priv->drrs.dp) {
6953 mutex_unlock(&dev_priv->drrs.mutex);
6957 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6958 pipe = to_intel_crtc(crtc)->pipe;
6960 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6961 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6963 /* flush means busy screen hence upclock */
6964 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6965 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6966 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6969 * flush also means no more activity hence schedule downclock, if all
6970 * other fbs are quiescent too
6972 if (!dev_priv->drrs.busy_frontbuffer_bits)
6973 schedule_delayed_work(&dev_priv->drrs.work,
6974 msecs_to_jiffies(1000));
6975 mutex_unlock(&dev_priv->drrs.mutex);
6979 * DOC: Display Refresh Rate Switching (DRRS)
6981 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6982 * which enables swtching between low and high refresh rates,
6983 * dynamically, based on the usage scenario. This feature is applicable
6984 * for internal panels.
6986 * Indication that the panel supports DRRS is given by the panel EDID, which
6987 * would list multiple refresh rates for one resolution.
6989 * DRRS is of 2 types - static and seamless.
6990 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6991 * (may appear as a blink on screen) and is used in dock-undock scenario.
6992 * Seamless DRRS involves changing RR without any visual effect to the user
6993 * and can be used during normal system usage. This is done by programming
6994 * certain registers.
6996 * Support for static/seamless DRRS may be indicated in the VBT based on
6997 * inputs from the panel spec.
6999 * DRRS saves power by switching to low RR based on usage scenarios.
7001 * The implementation is based on frontbuffer tracking implementation. When
7002 * there is a disturbance on the screen triggered by user activity or a periodic
7003 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7004 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7007 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7008 * and intel_edp_drrs_flush() are called.
7010 * DRRS can be further extended to support other internal panels and also
7011 * the scenario of video playback wherein RR is set based on the rate
7012 * requested by userspace.
7016 * intel_dp_drrs_init - Init basic DRRS work and mutex.
7017 * @connector: eDP connector
7018 * @fixed_mode: preferred mode of panel
7020 * This function is called only once at driver load to initialize basic
7024 * Downclock mode if panel supports it, else return NULL.
7025 * DRRS support is determined by the presence of downclock mode (apart
7026 * from VBT setting).
7028 static struct drm_display_mode *
7029 intel_dp_drrs_init(struct intel_connector *connector,
7030 struct drm_display_mode *fixed_mode)
7032 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7033 struct drm_display_mode *downclock_mode = NULL;
7035 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7036 mutex_init(&dev_priv->drrs.mutex);
7038 if (INTEL_GEN(dev_priv) <= 6) {
7039 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7043 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7044 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
7048 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7049 if (!downclock_mode) {
7050 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
7054 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7056 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7057 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
7058 return downclock_mode;
7061 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7062 struct intel_connector *intel_connector)
7064 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7065 struct drm_device *dev = &dev_priv->drm;
7066 struct drm_connector *connector = &intel_connector->base;
7067 struct drm_display_mode *fixed_mode = NULL;
7068 struct drm_display_mode *downclock_mode = NULL;
7070 enum pipe pipe = INVALID_PIPE;
7071 intel_wakeref_t wakeref;
7074 if (!intel_dp_is_edp(intel_dp))
7077 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7080 * On IBX/CPT we may get here with LVDS already registered. Since the
7081 * driver uses the only internal power sequencer available for both
7082 * eDP and LVDS bail out early in this case to prevent interfering
7083 * with an already powered-on LVDS power sequencer.
7085 if (intel_get_lvds_encoder(dev_priv)) {
7086 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7087 DRM_INFO("LVDS was detected, not registering eDP\n");
7092 with_pps_lock(intel_dp, wakeref) {
7093 intel_dp_init_panel_power_timestamps(intel_dp);
7094 intel_dp_pps_init(intel_dp);
7095 intel_edp_panel_vdd_sanitize(intel_dp);
7098 /* Cache DPCD and EDID for edp. */
7099 has_dpcd = intel_edp_init_dpcd(intel_dp);
7102 /* if this fails, presume the device is a ghost */
7103 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7107 mutex_lock(&dev->mode_config.mutex);
7108 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7110 if (drm_add_edid_modes(connector, edid)) {
7111 drm_connector_update_edid_property(connector,
7115 edid = ERR_PTR(-EINVAL);
7118 edid = ERR_PTR(-ENOENT);
7120 intel_connector->edid = edid;
7122 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7124 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7126 /* fallback to VBT if available for eDP */
7128 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7129 mutex_unlock(&dev->mode_config.mutex);
7131 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7132 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7133 register_reboot_notifier(&intel_dp->edp_notifier);
7136 * Figure out the current pipe for the initial backlight setup.
7137 * If the current pipe isn't valid, try the PPS pipe, and if that
7138 * fails just assume pipe A.
7140 pipe = vlv_active_pipe(intel_dp);
7142 if (pipe != PIPE_A && pipe != PIPE_B)
7143 pipe = intel_dp->pps_pipe;
7145 if (pipe != PIPE_A && pipe != PIPE_B)
7148 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7152 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7153 intel_connector->panel.backlight.power = intel_edp_backlight_power;
7154 intel_panel_setup_backlight(connector, pipe);
7157 drm_connector_init_panel_orientation_property(
7158 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7163 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7165 * vdd might still be enabled do to the delayed vdd off.
7166 * Make sure vdd is actually turned off here.
7168 with_pps_lock(intel_dp, wakeref)
7169 edp_panel_vdd_off_sync(intel_dp);
7174 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7176 struct intel_connector *intel_connector;
7177 struct drm_connector *connector;
7179 intel_connector = container_of(work, typeof(*intel_connector),
7180 modeset_retry_work);
7181 connector = &intel_connector->base;
7182 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7185 /* Grab the locks before changing connector property*/
7186 mutex_lock(&connector->dev->mode_config.mutex);
7187 /* Set connector link status to BAD and send a Uevent to notify
7188 * userspace to do a modeset.
7190 drm_connector_set_link_status_property(connector,
7191 DRM_MODE_LINK_STATUS_BAD);
7192 mutex_unlock(&connector->dev->mode_config.mutex);
7193 /* Send Hotplug uevent so userspace can reprobe */
7194 drm_kms_helper_hotplug_event(connector->dev);
7198 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7199 struct intel_connector *intel_connector)
7201 struct drm_connector *connector = &intel_connector->base;
7202 struct intel_dp *intel_dp = &intel_dig_port->dp;
7203 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7204 struct drm_device *dev = intel_encoder->base.dev;
7205 struct drm_i915_private *dev_priv = to_i915(dev);
7206 enum port port = intel_encoder->port;
7209 /* Initialize the work for modeset in case of link train failure */
7210 INIT_WORK(&intel_connector->modeset_retry_work,
7211 intel_dp_modeset_retry_work_fn);
7213 if (WARN(intel_dig_port->max_lanes < 1,
7214 "Not enough lanes (%d) for DP on port %c\n",
7215 intel_dig_port->max_lanes, port_name(port)))
7218 intel_dp_set_source_rates(intel_dp);
7220 intel_dp->reset_link_params = true;
7221 intel_dp->pps_pipe = INVALID_PIPE;
7222 intel_dp->active_pipe = INVALID_PIPE;
7224 /* intel_dp vfuncs */
7225 if (HAS_DDI(dev_priv))
7226 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
7228 /* Preserve the current hw state. */
7229 intel_dp->DP = I915_READ(intel_dp->output_reg);
7230 intel_dp->attached_connector = intel_connector;
7232 if (intel_dp_is_port_edp(dev_priv, port))
7233 type = DRM_MODE_CONNECTOR_eDP;
7235 type = DRM_MODE_CONNECTOR_DisplayPort;
7237 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7238 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7241 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7242 * for DP the encoder type can be set by the caller to
7243 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7245 if (type == DRM_MODE_CONNECTOR_eDP)
7246 intel_encoder->type = INTEL_OUTPUT_EDP;
7248 /* eDP only on port B and/or C on vlv/chv */
7249 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7250 intel_dp_is_edp(intel_dp) &&
7251 port != PORT_B && port != PORT_C))
7254 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7255 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7258 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7259 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7261 if (!HAS_GMCH(dev_priv))
7262 connector->interlace_allowed = true;
7263 connector->doublescan_allowed = 0;
7265 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7267 intel_dp_aux_init(intel_dp);
7269 intel_connector_attach_encoder(intel_connector, intel_encoder);
7271 if (HAS_DDI(dev_priv))
7272 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7274 intel_connector->get_hw_state = intel_connector_get_hw_state;
7276 /* init MST on ports that can support it */
7277 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7278 (port == PORT_B || port == PORT_C ||
7279 port == PORT_D || port == PORT_F))
7280 intel_dp_mst_encoder_init(intel_dig_port,
7281 intel_connector->base.base.id);
7283 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7284 intel_dp_aux_fini(intel_dp);
7285 intel_dp_mst_encoder_cleanup(intel_dig_port);
7289 intel_dp_add_properties(intel_dp, connector);
7291 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7292 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7294 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7297 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7298 * 0xd. Failure to do so will result in spurious interrupts being
7299 * generated on the port when a cable is not attached.
7301 if (IS_G45(dev_priv)) {
7302 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7303 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7309 drm_connector_cleanup(connector);
7314 bool intel_dp_init(struct drm_i915_private *dev_priv,
7315 i915_reg_t output_reg,
7318 struct intel_digital_port *intel_dig_port;
7319 struct intel_encoder *intel_encoder;
7320 struct drm_encoder *encoder;
7321 struct intel_connector *intel_connector;
7323 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7324 if (!intel_dig_port)
7327 intel_connector = intel_connector_alloc();
7328 if (!intel_connector)
7329 goto err_connector_alloc;
7331 intel_encoder = &intel_dig_port->base;
7332 encoder = &intel_encoder->base;
7334 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7335 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7336 "DP %c", port_name(port)))
7337 goto err_encoder_init;
7339 intel_encoder->hotplug = intel_dp_hotplug;
7340 intel_encoder->compute_config = intel_dp_compute_config;
7341 intel_encoder->get_hw_state = intel_dp_get_hw_state;
7342 intel_encoder->get_config = intel_dp_get_config;
7343 intel_encoder->update_pipe = intel_panel_update_backlight;
7344 intel_encoder->suspend = intel_dp_encoder_suspend;
7345 if (IS_CHERRYVIEW(dev_priv)) {
7346 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7347 intel_encoder->pre_enable = chv_pre_enable_dp;
7348 intel_encoder->enable = vlv_enable_dp;
7349 intel_encoder->disable = vlv_disable_dp;
7350 intel_encoder->post_disable = chv_post_disable_dp;
7351 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7352 } else if (IS_VALLEYVIEW(dev_priv)) {
7353 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7354 intel_encoder->pre_enable = vlv_pre_enable_dp;
7355 intel_encoder->enable = vlv_enable_dp;
7356 intel_encoder->disable = vlv_disable_dp;
7357 intel_encoder->post_disable = vlv_post_disable_dp;
7359 intel_encoder->pre_enable = g4x_pre_enable_dp;
7360 intel_encoder->enable = g4x_enable_dp;
7361 intel_encoder->disable = g4x_disable_dp;
7362 intel_encoder->post_disable = g4x_post_disable_dp;
7365 intel_dig_port->dp.output_reg = output_reg;
7366 intel_dig_port->max_lanes = 4;
7368 intel_encoder->type = INTEL_OUTPUT_DP;
7369 intel_encoder->power_domain = intel_port_to_power_domain(port);
7370 if (IS_CHERRYVIEW(dev_priv)) {
7372 intel_encoder->crtc_mask = 1 << 2;
7374 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7376 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7378 intel_encoder->cloneable = 0;
7379 intel_encoder->port = port;
7381 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7384 intel_infoframe_init(intel_dig_port);
7386 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7387 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7388 goto err_init_connector;
7393 drm_encoder_cleanup(encoder);
7395 kfree(intel_connector);
7396 err_connector_alloc:
7397 kfree(intel_dig_port);
7401 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7403 struct intel_encoder *encoder;
7405 for_each_intel_encoder(&dev_priv->drm, encoder) {
7406 struct intel_dp *intel_dp;
7408 if (encoder->type != INTEL_OUTPUT_DDI)
7411 intel_dp = enc_to_intel_dp(&encoder->base);
7413 if (!intel_dp->can_mst)
7416 if (intel_dp->is_mst)
7417 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7421 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7423 struct intel_encoder *encoder;
7425 for_each_intel_encoder(&dev_priv->drm, encoder) {
7426 struct intel_dp *intel_dp;
7429 if (encoder->type != INTEL_OUTPUT_DDI)
7432 intel_dp = enc_to_intel_dp(&encoder->base);
7434 if (!intel_dp->can_mst)
7437 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7439 intel_dp->is_mst = false;
7440 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,