2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
35 #include <asm/byteorder.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
45 #include "i915_debugfs.h"
47 #include "intel_atomic.h"
48 #include "intel_audio.h"
49 #include "intel_connector.h"
50 #include "intel_ddi.h"
52 #include "intel_dp_link_training.h"
53 #include "intel_dp_mst.h"
54 #include "intel_dpio_phy.h"
55 #include "intel_drv.h"
56 #include "intel_fifo_underrun.h"
57 #include "intel_hdcp.h"
58 #include "intel_hdmi.h"
59 #include "intel_hotplug.h"
60 #include "intel_lspcon.h"
61 #include "intel_lvds.h"
62 #include "intel_panel.h"
63 #include "intel_psr.h"
64 #include "intel_sideband.h"
65 #include "intel_vdsc.h"
67 #define DP_DPRX_ESI_LEN 14
69 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
70 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
71 #define DP_DSC_MIN_SUPPORTED_BPC 8
72 #define DP_DSC_MAX_SUPPORTED_BPC 10
74 /* DP DSC throughput values used for slice count calculations KPixels/s */
75 #define DP_DSC_PEAK_PIXEL_RATE 2720000
76 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
77 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
79 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
80 #define DP_DSC_FEC_OVERHEAD_FACTOR 976
82 /* Compliance test status bits */
83 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
84 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
85 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
86 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
93 static const struct dp_link_dpll g4x_dpll[] = {
95 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
97 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
100 static const struct dp_link_dpll pch_dpll[] = {
102 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
104 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
107 static const struct dp_link_dpll vlv_dpll[] = {
109 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
111 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
115 * CHV supports eDP 1.4 that have more link rates.
116 * Below only provides the fixed rate but exclude variable rate.
118 static const struct dp_link_dpll chv_dpll[] = {
120 * CHV requires to program fractional division for m2.
121 * m2 is stored in fixed point format using formula below
122 * (m2_int << 22) | m2_fraction
124 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
125 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
126 { 270000, /* m2_int = 27, m2_fraction = 0 */
127 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
130 /* Constants for DP DSC configurations */
131 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
133 /* With Single pipe configuration, HW is capable of supporting maximum
134 * of 4 slices per line.
136 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
139 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
140 * @intel_dp: DP struct
142 * If a CPU or PCH DP output is attached to an eDP panel, this function
143 * will return true, and false otherwise.
145 bool intel_dp_is_edp(struct intel_dp *intel_dp)
147 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
149 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
152 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
154 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
157 static void intel_dp_link_down(struct intel_encoder *encoder,
158 const struct intel_crtc_state *old_crtc_state);
159 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
160 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
161 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
162 const struct intel_crtc_state *crtc_state);
163 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
165 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
167 /* update sink rates from dpcd */
168 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
170 static const int dp_rates[] = {
171 162000, 270000, 540000, 810000
175 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
178 if (dp_rates[i] > max_rate)
180 intel_dp->sink_rates[i] = dp_rates[i];
183 intel_dp->num_sink_rates = i;
186 /* Get length of rates array potentially limited by max_rate. */
187 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
191 /* Limit results by potentially reduced max rate */
192 for (i = 0; i < len; i++) {
193 if (rates[len - i - 1] <= max_rate)
200 /* Get length of common rates array potentially limited by max_rate. */
201 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
204 return intel_dp_rate_limit_len(intel_dp->common_rates,
205 intel_dp->num_common_rates, max_rate);
208 /* Theoretical max between source and sink */
209 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
211 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
214 static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
216 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
217 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
218 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
219 intel_wakeref_t wakeref;
222 if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
226 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
227 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
228 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
229 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
233 MISSING_CASE(lane_info);
248 /* Theoretical max between source and sink */
249 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
251 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
252 int source_max = intel_dig_port->max_lanes;
253 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
254 int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
256 return min3(source_max, sink_max, fia_max);
259 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
261 return intel_dp->max_link_lane_count;
265 intel_dp_link_required(int pixel_clock, int bpp)
267 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
268 return DIV_ROUND_UP(pixel_clock * bpp, 8);
272 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
274 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
275 * link rate that is generally expressed in Gbps. Since, 8 bits of data
276 * is transmitted every LS_Clk per lane, there is no need to account for
277 * the channel encoding that is done in the PHY layer here.
280 return max_link_clock * max_lanes;
284 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
286 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
287 struct intel_encoder *encoder = &intel_dig_port->base;
288 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
289 int max_dotclk = dev_priv->max_dotclk_freq;
292 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
294 if (type != DP_DS_PORT_TYPE_VGA)
297 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
298 intel_dp->downstream_ports);
300 if (ds_max_dotclk != 0)
301 max_dotclk = min(max_dotclk, ds_max_dotclk);
306 static int cnl_max_source_rate(struct intel_dp *intel_dp)
308 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
309 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
310 enum port port = dig_port->base.port;
312 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
314 /* Low voltage SKUs are limited to max of 5.4G */
315 if (voltage == VOLTAGE_INFO_0_85V)
318 /* For this SKU 8.1G is supported in all ports */
319 if (IS_CNL_WITH_PORT_F(dev_priv))
322 /* For other SKUs, max rate on ports A and D is 5.4G */
323 if (port == PORT_A || port == PORT_D)
329 static int icl_max_source_rate(struct intel_dp *intel_dp)
331 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
332 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
333 enum port port = dig_port->base.port;
335 if (intel_port_is_combophy(dev_priv, port) &&
336 !IS_ELKHARTLAKE(dev_priv) &&
337 !intel_dp_is_edp(intel_dp))
344 intel_dp_set_source_rates(struct intel_dp *intel_dp)
346 /* The values must be in increasing order */
347 static const int cnl_rates[] = {
348 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
350 static const int bxt_rates[] = {
351 162000, 216000, 243000, 270000, 324000, 432000, 540000
353 static const int skl_rates[] = {
354 162000, 216000, 270000, 324000, 432000, 540000
356 static const int hsw_rates[] = {
357 162000, 270000, 540000
359 static const int g4x_rates[] = {
362 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
363 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
364 const struct ddi_vbt_port_info *info =
365 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
366 const int *source_rates;
367 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
369 /* This should only be done once */
370 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
372 if (INTEL_GEN(dev_priv) >= 10) {
373 source_rates = cnl_rates;
374 size = ARRAY_SIZE(cnl_rates);
375 if (IS_GEN(dev_priv, 10))
376 max_rate = cnl_max_source_rate(intel_dp);
378 max_rate = icl_max_source_rate(intel_dp);
379 } else if (IS_GEN9_LP(dev_priv)) {
380 source_rates = bxt_rates;
381 size = ARRAY_SIZE(bxt_rates);
382 } else if (IS_GEN9_BC(dev_priv)) {
383 source_rates = skl_rates;
384 size = ARRAY_SIZE(skl_rates);
385 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
386 IS_BROADWELL(dev_priv)) {
387 source_rates = hsw_rates;
388 size = ARRAY_SIZE(hsw_rates);
390 source_rates = g4x_rates;
391 size = ARRAY_SIZE(g4x_rates);
394 if (max_rate && vbt_max_rate)
395 max_rate = min(max_rate, vbt_max_rate);
396 else if (vbt_max_rate)
397 max_rate = vbt_max_rate;
400 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
402 intel_dp->source_rates = source_rates;
403 intel_dp->num_source_rates = size;
406 static int intersect_rates(const int *source_rates, int source_len,
407 const int *sink_rates, int sink_len,
410 int i = 0, j = 0, k = 0;
412 while (i < source_len && j < sink_len) {
413 if (source_rates[i] == sink_rates[j]) {
414 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
416 common_rates[k] = source_rates[i];
420 } else if (source_rates[i] < sink_rates[j]) {
429 /* return index of rate in rates array, or -1 if not found */
430 static int intel_dp_rate_index(const int *rates, int len, int rate)
434 for (i = 0; i < len; i++)
435 if (rate == rates[i])
441 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
443 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
445 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
446 intel_dp->num_source_rates,
447 intel_dp->sink_rates,
448 intel_dp->num_sink_rates,
449 intel_dp->common_rates);
451 /* Paranoia, there should always be something in common. */
452 if (WARN_ON(intel_dp->num_common_rates == 0)) {
453 intel_dp->common_rates[0] = 162000;
454 intel_dp->num_common_rates = 1;
458 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
462 * FIXME: we need to synchronize the current link parameters with
463 * hardware readout. Currently fast link training doesn't work on
466 if (link_rate == 0 ||
467 link_rate > intel_dp->max_link_rate)
470 if (lane_count == 0 ||
471 lane_count > intel_dp_max_lane_count(intel_dp))
477 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
481 const struct drm_display_mode *fixed_mode =
482 intel_dp->attached_connector->panel.fixed_mode;
483 int mode_rate, max_rate;
485 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
486 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
487 if (mode_rate > max_rate)
493 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
494 int link_rate, u8 lane_count)
498 index = intel_dp_rate_index(intel_dp->common_rates,
499 intel_dp->num_common_rates,
502 if (intel_dp_is_edp(intel_dp) &&
503 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
504 intel_dp->common_rates[index - 1],
506 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
509 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
510 intel_dp->max_link_lane_count = lane_count;
511 } else if (lane_count > 1) {
512 if (intel_dp_is_edp(intel_dp) &&
513 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
514 intel_dp_max_common_rate(intel_dp),
516 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
519 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
520 intel_dp->max_link_lane_count = lane_count >> 1;
522 DRM_ERROR("Link Training Unsuccessful\n");
529 static enum drm_mode_status
530 intel_dp_mode_valid(struct drm_connector *connector,
531 struct drm_display_mode *mode)
533 struct intel_dp *intel_dp = intel_attached_dp(connector);
534 struct intel_connector *intel_connector = to_intel_connector(connector);
535 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
536 struct drm_i915_private *dev_priv = to_i915(connector->dev);
537 int target_clock = mode->clock;
538 int max_rate, mode_rate, max_lanes, max_link_clock;
540 u16 dsc_max_output_bpp = 0;
541 u8 dsc_slice_count = 0;
543 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
544 return MODE_NO_DBLESCAN;
546 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
548 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
549 if (mode->hdisplay > fixed_mode->hdisplay)
552 if (mode->vdisplay > fixed_mode->vdisplay)
555 target_clock = fixed_mode->clock;
558 max_link_clock = intel_dp_max_link_rate(intel_dp);
559 max_lanes = intel_dp_max_lane_count(intel_dp);
561 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
562 mode_rate = intel_dp_link_required(target_clock, 18);
565 * Output bpp is stored in 6.4 format so right shift by 4 to get the
566 * integer value since we support only integer values of bpp.
568 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
569 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
570 if (intel_dp_is_edp(intel_dp)) {
572 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
574 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
576 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
578 intel_dp_dsc_get_output_bpp(max_link_clock,
581 mode->hdisplay) >> 4;
583 intel_dp_dsc_get_slice_count(intel_dp,
589 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
590 target_clock > max_dotclk)
591 return MODE_CLOCK_HIGH;
593 if (mode->clock < 10000)
594 return MODE_CLOCK_LOW;
596 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
597 return MODE_H_ILLEGAL;
602 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
609 for (i = 0; i < src_bytes; i++)
610 v |= ((u32)src[i]) << ((3 - i) * 8);
614 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
619 for (i = 0; i < dst_bytes; i++)
620 dst[i] = src >> ((3-i) * 8);
624 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
626 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
627 bool force_disable_vdd);
629 intel_dp_pps_init(struct intel_dp *intel_dp);
631 static intel_wakeref_t
632 pps_lock(struct intel_dp *intel_dp)
634 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
635 intel_wakeref_t wakeref;
638 * See intel_power_sequencer_reset() why we need
639 * a power domain reference here.
641 wakeref = intel_display_power_get(dev_priv,
642 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
644 mutex_lock(&dev_priv->pps_mutex);
649 static intel_wakeref_t
650 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
652 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
654 mutex_unlock(&dev_priv->pps_mutex);
655 intel_display_power_put(dev_priv,
656 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
661 #define with_pps_lock(dp, wf) \
662 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
665 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
667 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 enum pipe pipe = intel_dp->pps_pipe;
670 bool pll_enabled, release_cl_override = false;
671 enum dpio_phy phy = DPIO_PHY(pipe);
672 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
675 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
676 "skipping pipe %c power sequencer kick due to port %c being active\n",
677 pipe_name(pipe), port_name(intel_dig_port->base.port)))
680 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
681 pipe_name(pipe), port_name(intel_dig_port->base.port));
683 /* Preserve the BIOS-computed detected bit. This is
684 * supposed to be read-only.
686 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
687 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
688 DP |= DP_PORT_WIDTH(1);
689 DP |= DP_LINK_TRAIN_PAT_1;
691 if (IS_CHERRYVIEW(dev_priv))
692 DP |= DP_PIPE_SEL_CHV(pipe);
694 DP |= DP_PIPE_SEL(pipe);
696 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
699 * The DPLL for the pipe must be enabled for this to work.
700 * So enable temporarily it if it's not already enabled.
703 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
704 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
706 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
707 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
708 DRM_ERROR("Failed to force on pll for pipe %c!\n",
715 * Similar magic as in intel_dp_enable_port().
716 * We _must_ do this port enable + disable trick
717 * to make this power sequencer lock onto the port.
718 * Otherwise even VDD force bit won't work.
720 I915_WRITE(intel_dp->output_reg, DP);
721 POSTING_READ(intel_dp->output_reg);
723 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
724 POSTING_READ(intel_dp->output_reg);
726 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
727 POSTING_READ(intel_dp->output_reg);
730 vlv_force_pll_off(dev_priv, pipe);
732 if (release_cl_override)
733 chv_phy_powergate_ch(dev_priv, phy, ch, false);
737 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
739 struct intel_encoder *encoder;
740 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
743 * We don't have power sequencer currently.
744 * Pick one that's not used by other ports.
746 for_each_intel_dp(&dev_priv->drm, encoder) {
747 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
749 if (encoder->type == INTEL_OUTPUT_EDP) {
750 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
751 intel_dp->active_pipe != intel_dp->pps_pipe);
753 if (intel_dp->pps_pipe != INVALID_PIPE)
754 pipes &= ~(1 << intel_dp->pps_pipe);
756 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
758 if (intel_dp->active_pipe != INVALID_PIPE)
759 pipes &= ~(1 << intel_dp->active_pipe);
766 return ffs(pipes) - 1;
770 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
772 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
773 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
776 lockdep_assert_held(&dev_priv->pps_mutex);
778 /* We should never land here with regular DP ports */
779 WARN_ON(!intel_dp_is_edp(intel_dp));
781 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
782 intel_dp->active_pipe != intel_dp->pps_pipe);
784 if (intel_dp->pps_pipe != INVALID_PIPE)
785 return intel_dp->pps_pipe;
787 pipe = vlv_find_free_pps(dev_priv);
790 * Didn't find one. This should not happen since there
791 * are two power sequencers and up to two eDP ports.
793 if (WARN_ON(pipe == INVALID_PIPE))
796 vlv_steal_power_sequencer(dev_priv, pipe);
797 intel_dp->pps_pipe = pipe;
799 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
800 pipe_name(intel_dp->pps_pipe),
801 port_name(intel_dig_port->base.port));
803 /* init power sequencer on this pipe and port */
804 intel_dp_init_panel_power_sequencer(intel_dp);
805 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
808 * Even vdd force doesn't work until we've made
809 * the power sequencer lock in on the port.
811 vlv_power_sequencer_kick(intel_dp);
813 return intel_dp->pps_pipe;
817 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
819 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
820 int backlight_controller = dev_priv->vbt.backlight.controller;
822 lockdep_assert_held(&dev_priv->pps_mutex);
824 /* We should never land here with regular DP ports */
825 WARN_ON(!intel_dp_is_edp(intel_dp));
827 if (!intel_dp->pps_reset)
828 return backlight_controller;
830 intel_dp->pps_reset = false;
833 * Only the HW needs to be reprogrammed, the SW state is fixed and
834 * has been setup during connector init.
836 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
838 return backlight_controller;
841 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
844 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
847 return I915_READ(PP_STATUS(pipe)) & PP_ON;
850 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
853 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
856 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
863 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
865 vlv_pipe_check pipe_check)
869 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
870 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
871 PANEL_PORT_SELECT_MASK;
873 if (port_sel != PANEL_PORT_SELECT_VLV(port))
876 if (!pipe_check(dev_priv, pipe))
886 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
888 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
889 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
890 enum port port = intel_dig_port->base.port;
892 lockdep_assert_held(&dev_priv->pps_mutex);
894 /* try to find a pipe with this port selected */
895 /* first pick one where the panel is on */
896 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
898 /* didn't find one? pick one where vdd is on */
899 if (intel_dp->pps_pipe == INVALID_PIPE)
900 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
901 vlv_pipe_has_vdd_on);
902 /* didn't find one? pick one with just the correct port */
903 if (intel_dp->pps_pipe == INVALID_PIPE)
904 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
907 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
908 if (intel_dp->pps_pipe == INVALID_PIPE) {
909 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
914 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
915 port_name(port), pipe_name(intel_dp->pps_pipe));
917 intel_dp_init_panel_power_sequencer(intel_dp);
918 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
921 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
923 struct intel_encoder *encoder;
925 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
926 !IS_GEN9_LP(dev_priv)))
930 * We can't grab pps_mutex here due to deadlock with power_domain
931 * mutex when power_domain functions are called while holding pps_mutex.
932 * That also means that in order to use pps_pipe the code needs to
933 * hold both a power domain reference and pps_mutex, and the power domain
934 * reference get/put must be done while _not_ holding pps_mutex.
935 * pps_{lock,unlock}() do these steps in the correct order, so one
936 * should use them always.
939 for_each_intel_dp(&dev_priv->drm, encoder) {
940 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
942 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
944 if (encoder->type != INTEL_OUTPUT_EDP)
947 if (IS_GEN9_LP(dev_priv))
948 intel_dp->pps_reset = true;
950 intel_dp->pps_pipe = INVALID_PIPE;
954 struct pps_registers {
962 static void intel_pps_get_registers(struct intel_dp *intel_dp,
963 struct pps_registers *regs)
965 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
968 memset(regs, 0, sizeof(*regs));
970 if (IS_GEN9_LP(dev_priv))
971 pps_idx = bxt_power_sequencer_idx(intel_dp);
972 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
973 pps_idx = vlv_power_sequencer_pipe(intel_dp);
975 regs->pp_ctrl = PP_CONTROL(pps_idx);
976 regs->pp_stat = PP_STATUS(pps_idx);
977 regs->pp_on = PP_ON_DELAYS(pps_idx);
978 regs->pp_off = PP_OFF_DELAYS(pps_idx);
980 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
981 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
982 regs->pp_div = INVALID_MMIO_REG;
984 regs->pp_div = PP_DIVISOR(pps_idx);
988 _pp_ctrl_reg(struct intel_dp *intel_dp)
990 struct pps_registers regs;
992 intel_pps_get_registers(intel_dp, ®s);
998 _pp_stat_reg(struct intel_dp *intel_dp)
1000 struct pps_registers regs;
1002 intel_pps_get_registers(intel_dp, ®s);
1004 return regs.pp_stat;
1007 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1008 This function only applicable when panel PM state is not to be tracked */
1009 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1012 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1014 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1015 intel_wakeref_t wakeref;
1017 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1020 with_pps_lock(intel_dp, wakeref) {
1021 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1022 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1023 i915_reg_t pp_ctrl_reg, pp_div_reg;
1026 pp_ctrl_reg = PP_CONTROL(pipe);
1027 pp_div_reg = PP_DIVISOR(pipe);
1028 pp_div = I915_READ(pp_div_reg);
1029 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1031 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1032 I915_WRITE(pp_div_reg, pp_div | 0x1F);
1033 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1034 msleep(intel_dp->panel_power_cycle_delay);
1041 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1043 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1045 lockdep_assert_held(&dev_priv->pps_mutex);
1047 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1048 intel_dp->pps_pipe == INVALID_PIPE)
1051 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1054 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1056 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1058 lockdep_assert_held(&dev_priv->pps_mutex);
1060 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1061 intel_dp->pps_pipe == INVALID_PIPE)
1064 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1068 intel_dp_check_edp(struct intel_dp *intel_dp)
1070 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1072 if (!intel_dp_is_edp(intel_dp))
1075 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1076 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1077 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1078 I915_READ(_pp_stat_reg(intel_dp)),
1079 I915_READ(_pp_ctrl_reg(intel_dp)));
1084 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1086 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1087 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1091 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1092 done = wait_event_timeout(i915->gmbus_wait_queue, C,
1093 msecs_to_jiffies_timeout(10));
1095 /* just trace the final value */
1096 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1099 DRM_ERROR("dp aux hw did not signal timeout!\n");
1105 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1107 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1113 * The clock divider is based off the hrawclk, and would like to run at
1114 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1116 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1119 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1121 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1122 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1128 * The clock divider is based off the cdclk or PCH rawclk, and would
1129 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1130 * divide by 2000 and use that
1132 if (dig_port->aux_ch == AUX_CH_A)
1133 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1135 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1138 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1140 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1141 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1143 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1144 /* Workaround for non-ULT HSW */
1152 return ilk_get_aux_clock_divider(intel_dp, index);
1155 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1158 * SKL doesn't need us to program the AUX clock divider (Hardware will
1159 * derive the clock from CDCLK automatically). We still implement the
1160 * get_aux_clock_divider vfunc to plug-in into the existing code.
1162 return index ? 0 : 1;
1165 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1167 u32 aux_clock_divider)
1169 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1170 struct drm_i915_private *dev_priv =
1171 to_i915(intel_dig_port->base.base.dev);
1172 u32 precharge, timeout;
1174 if (IS_GEN(dev_priv, 6))
1179 if (IS_BROADWELL(dev_priv))
1180 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1182 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1184 return DP_AUX_CH_CTL_SEND_BUSY |
1185 DP_AUX_CH_CTL_DONE |
1186 DP_AUX_CH_CTL_INTERRUPT |
1187 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1189 DP_AUX_CH_CTL_RECEIVE_ERROR |
1190 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1191 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1192 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1195 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1199 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1202 ret = DP_AUX_CH_CTL_SEND_BUSY |
1203 DP_AUX_CH_CTL_DONE |
1204 DP_AUX_CH_CTL_INTERRUPT |
1205 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1206 DP_AUX_CH_CTL_TIME_OUT_MAX |
1207 DP_AUX_CH_CTL_RECEIVE_ERROR |
1208 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1209 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1210 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1212 if (intel_dig_port->tc_type == TC_PORT_TBT)
1213 ret |= DP_AUX_CH_CTL_TBT_IO;
1219 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1220 const u8 *send, int send_bytes,
1221 u8 *recv, int recv_size,
1222 u32 aux_send_ctl_flags)
1224 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1225 struct drm_i915_private *i915 =
1226 to_i915(intel_dig_port->base.base.dev);
1227 struct intel_uncore *uncore = &i915->uncore;
1228 i915_reg_t ch_ctl, ch_data[5];
1229 u32 aux_clock_divider;
1230 enum intel_display_power_domain aux_domain =
1231 intel_aux_power_domain(intel_dig_port);
1232 intel_wakeref_t aux_wakeref;
1233 intel_wakeref_t pps_wakeref;
1234 int i, ret, recv_bytes;
1239 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1240 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1241 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1243 aux_wakeref = intel_display_power_get(i915, aux_domain);
1244 pps_wakeref = pps_lock(intel_dp);
1247 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1248 * In such cases we want to leave VDD enabled and it's up to upper layers
1249 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1252 vdd = edp_panel_vdd_on(intel_dp);
1254 /* dp aux is extremely sensitive to irq latency, hence request the
1255 * lowest possible wakeup latency and so prevent the cpu from going into
1256 * deep sleep states.
1258 pm_qos_update_request(&i915->pm_qos, 0);
1260 intel_dp_check_edp(intel_dp);
1262 /* Try to wait for any previous AUX channel activity */
1263 for (try = 0; try < 3; try++) {
1264 status = intel_uncore_read_notrace(uncore, ch_ctl);
1265 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1269 /* just trace the final value */
1270 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1273 static u32 last_status = -1;
1274 const u32 status = intel_uncore_read(uncore, ch_ctl);
1276 if (status != last_status) {
1277 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1279 last_status = status;
1286 /* Only 5 data registers! */
1287 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1292 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1293 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1297 send_ctl |= aux_send_ctl_flags;
1299 /* Must try at least 3 times according to DP spec */
1300 for (try = 0; try < 5; try++) {
1301 /* Load the send data into the aux channel data registers */
1302 for (i = 0; i < send_bytes; i += 4)
1303 intel_uncore_write(uncore,
1305 intel_dp_pack_aux(send + i,
1308 /* Send the command and wait for it to complete */
1309 intel_uncore_write(uncore, ch_ctl, send_ctl);
1311 status = intel_dp_aux_wait_done(intel_dp);
1313 /* Clear done status and any errors */
1314 intel_uncore_write(uncore,
1317 DP_AUX_CH_CTL_DONE |
1318 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1319 DP_AUX_CH_CTL_RECEIVE_ERROR);
1321 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1322 * 400us delay required for errors and timeouts
1323 * Timeout errors from the HW already meet this
1324 * requirement so skip to next iteration
1326 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1329 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1330 usleep_range(400, 500);
1333 if (status & DP_AUX_CH_CTL_DONE)
1338 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1339 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1345 /* Check for timeout or receive error.
1346 * Timeouts occur when the sink is not connected
1348 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1349 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1354 /* Timeouts occur when the device isn't connected, so they're
1355 * "normal" -- don't fill the kernel log with these */
1356 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1357 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1362 /* Unload any bytes sent back from the other side */
1363 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1364 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1367 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1368 * We have no idea of what happened so we return -EBUSY so
1369 * drm layer takes care for the necessary retries.
1371 if (recv_bytes == 0 || recv_bytes > 20) {
1372 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1378 if (recv_bytes > recv_size)
1379 recv_bytes = recv_size;
1381 for (i = 0; i < recv_bytes; i += 4)
1382 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1383 recv + i, recv_bytes - i);
1387 pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1390 edp_panel_vdd_off(intel_dp, false);
1392 pps_unlock(intel_dp, pps_wakeref);
1393 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1398 #define BARE_ADDRESS_SIZE 3
1399 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1402 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1403 const struct drm_dp_aux_msg *msg)
1405 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1406 txbuf[1] = (msg->address >> 8) & 0xff;
1407 txbuf[2] = msg->address & 0xff;
1408 txbuf[3] = msg->size - 1;
1412 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1414 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1415 u8 txbuf[20], rxbuf[20];
1416 size_t txsize, rxsize;
1419 intel_dp_aux_header(txbuf, msg);
1421 switch (msg->request & ~DP_AUX_I2C_MOT) {
1422 case DP_AUX_NATIVE_WRITE:
1423 case DP_AUX_I2C_WRITE:
1424 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1425 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1426 rxsize = 2; /* 0 or 1 data bytes */
1428 if (WARN_ON(txsize > 20))
1431 WARN_ON(!msg->buffer != !msg->size);
1434 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1436 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1439 msg->reply = rxbuf[0] >> 4;
1442 /* Number of bytes written in a short write. */
1443 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1445 /* Return payload size. */
1451 case DP_AUX_NATIVE_READ:
1452 case DP_AUX_I2C_READ:
1453 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1454 rxsize = msg->size + 1;
1456 if (WARN_ON(rxsize > 20))
1459 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1462 msg->reply = rxbuf[0] >> 4;
1464 * Assume happy day, and copy the data. The caller is
1465 * expected to check msg->reply before touching it.
1467 * Return payload size.
1470 memcpy(msg->buffer, rxbuf + 1, ret);
1483 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1485 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1486 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1487 enum aux_ch aux_ch = dig_port->aux_ch;
1493 return DP_AUX_CH_CTL(aux_ch);
1495 MISSING_CASE(aux_ch);
1496 return DP_AUX_CH_CTL(AUX_CH_B);
1500 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1502 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1503 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1504 enum aux_ch aux_ch = dig_port->aux_ch;
1510 return DP_AUX_CH_DATA(aux_ch, index);
1512 MISSING_CASE(aux_ch);
1513 return DP_AUX_CH_DATA(AUX_CH_B, index);
1517 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1519 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1520 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1521 enum aux_ch aux_ch = dig_port->aux_ch;
1525 return DP_AUX_CH_CTL(aux_ch);
1529 return PCH_DP_AUX_CH_CTL(aux_ch);
1531 MISSING_CASE(aux_ch);
1532 return DP_AUX_CH_CTL(AUX_CH_A);
1536 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1538 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1539 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1540 enum aux_ch aux_ch = dig_port->aux_ch;
1544 return DP_AUX_CH_DATA(aux_ch, index);
1548 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1550 MISSING_CASE(aux_ch);
1551 return DP_AUX_CH_DATA(AUX_CH_A, index);
1555 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1557 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1558 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1559 enum aux_ch aux_ch = dig_port->aux_ch;
1568 return DP_AUX_CH_CTL(aux_ch);
1570 MISSING_CASE(aux_ch);
1571 return DP_AUX_CH_CTL(AUX_CH_A);
1575 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1577 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1578 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1579 enum aux_ch aux_ch = dig_port->aux_ch;
1588 return DP_AUX_CH_DATA(aux_ch, index);
1590 MISSING_CASE(aux_ch);
1591 return DP_AUX_CH_DATA(AUX_CH_A, index);
1596 intel_dp_aux_fini(struct intel_dp *intel_dp)
1598 kfree(intel_dp->aux.name);
1602 intel_dp_aux_init(struct intel_dp *intel_dp)
1604 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1605 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1606 struct intel_encoder *encoder = &dig_port->base;
1608 if (INTEL_GEN(dev_priv) >= 9) {
1609 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1610 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1611 } else if (HAS_PCH_SPLIT(dev_priv)) {
1612 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1613 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1615 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1616 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1619 if (INTEL_GEN(dev_priv) >= 9)
1620 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1621 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1622 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1623 else if (HAS_PCH_SPLIT(dev_priv))
1624 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1626 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1628 if (INTEL_GEN(dev_priv) >= 9)
1629 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1631 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1633 drm_dp_aux_init(&intel_dp->aux);
1635 /* Failure to allocate our preferred name is not critical */
1636 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1637 port_name(encoder->port));
1638 intel_dp->aux.transfer = intel_dp_aux_transfer;
1641 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1643 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1645 return max_rate >= 540000;
1648 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1650 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1652 return max_rate >= 810000;
1656 intel_dp_set_clock(struct intel_encoder *encoder,
1657 struct intel_crtc_state *pipe_config)
1659 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1660 const struct dp_link_dpll *divisor = NULL;
1663 if (IS_G4X(dev_priv)) {
1665 count = ARRAY_SIZE(g4x_dpll);
1666 } else if (HAS_PCH_SPLIT(dev_priv)) {
1668 count = ARRAY_SIZE(pch_dpll);
1669 } else if (IS_CHERRYVIEW(dev_priv)) {
1671 count = ARRAY_SIZE(chv_dpll);
1672 } else if (IS_VALLEYVIEW(dev_priv)) {
1674 count = ARRAY_SIZE(vlv_dpll);
1677 if (divisor && count) {
1678 for (i = 0; i < count; i++) {
1679 if (pipe_config->port_clock == divisor[i].clock) {
1680 pipe_config->dpll = divisor[i].dpll;
1681 pipe_config->clock_set = true;
1688 static void snprintf_int_array(char *str, size_t len,
1689 const int *array, int nelem)
1695 for (i = 0; i < nelem; i++) {
1696 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1704 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1706 char str[128]; /* FIXME: too big for stack? */
1708 if ((drm_debug & DRM_UT_KMS) == 0)
1711 snprintf_int_array(str, sizeof(str),
1712 intel_dp->source_rates, intel_dp->num_source_rates);
1713 DRM_DEBUG_KMS("source rates: %s\n", str);
1715 snprintf_int_array(str, sizeof(str),
1716 intel_dp->sink_rates, intel_dp->num_sink_rates);
1717 DRM_DEBUG_KMS("sink rates: %s\n", str);
1719 snprintf_int_array(str, sizeof(str),
1720 intel_dp->common_rates, intel_dp->num_common_rates);
1721 DRM_DEBUG_KMS("common rates: %s\n", str);
1725 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1729 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1730 if (WARN_ON(len <= 0))
1733 return intel_dp->common_rates[len - 1];
1736 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1738 int i = intel_dp_rate_index(intel_dp->sink_rates,
1739 intel_dp->num_sink_rates, rate);
1747 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1748 u8 *link_bw, u8 *rate_select)
1750 /* eDP 1.4 rate select method. */
1751 if (intel_dp->use_rate_select) {
1754 intel_dp_rate_select(intel_dp, port_clock);
1756 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1761 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1762 const struct intel_crtc_state *pipe_config)
1764 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1766 return INTEL_GEN(dev_priv) >= 11 &&
1767 pipe_config->cpu_transcoder != TRANSCODER_A;
1770 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1771 const struct intel_crtc_state *pipe_config)
1773 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1774 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1777 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1778 const struct intel_crtc_state *pipe_config)
1780 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1782 return INTEL_GEN(dev_priv) >= 10 &&
1783 pipe_config->cpu_transcoder != TRANSCODER_A;
1786 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1787 const struct intel_crtc_state *pipe_config)
1789 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1792 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1793 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1796 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1797 struct intel_crtc_state *pipe_config)
1799 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1800 struct intel_connector *intel_connector = intel_dp->attached_connector;
1803 bpp = pipe_config->pipe_bpp;
1804 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1807 bpp = min(bpp, 3*bpc);
1809 if (intel_dp_is_edp(intel_dp)) {
1810 /* Get bpp from vbt only for panels that dont have bpp in edid */
1811 if (intel_connector->base.display_info.bpc == 0 &&
1812 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1813 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1814 dev_priv->vbt.edp.bpp);
1815 bpp = dev_priv->vbt.edp.bpp;
1822 /* Adjust link config limits based on compliance test requests. */
1824 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1825 struct intel_crtc_state *pipe_config,
1826 struct link_config_limits *limits)
1828 /* For DP Compliance we override the computed bpp for the pipe */
1829 if (intel_dp->compliance.test_data.bpc != 0) {
1830 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1832 limits->min_bpp = limits->max_bpp = bpp;
1833 pipe_config->dither_force_disable = bpp == 6 * 3;
1835 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1838 /* Use values requested by Compliance Test Request */
1839 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1842 /* Validate the compliance test data since max values
1843 * might have changed due to link train fallback.
1845 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1846 intel_dp->compliance.test_lane_count)) {
1847 index = intel_dp_rate_index(intel_dp->common_rates,
1848 intel_dp->num_common_rates,
1849 intel_dp->compliance.test_link_rate);
1851 limits->min_clock = limits->max_clock = index;
1852 limits->min_lane_count = limits->max_lane_count =
1853 intel_dp->compliance.test_lane_count;
1858 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1861 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1862 * format of the number of bytes per pixel will be half the number
1863 * of bytes of RGB pixel.
1865 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1871 /* Optimize link config in order: max bpp, min clock, min lanes */
1873 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1874 struct intel_crtc_state *pipe_config,
1875 const struct link_config_limits *limits)
1877 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1878 int bpp, clock, lane_count;
1879 int mode_rate, link_clock, link_avail;
1881 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1882 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1885 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1886 for (lane_count = limits->min_lane_count;
1887 lane_count <= limits->max_lane_count;
1889 link_clock = intel_dp->common_rates[clock];
1890 link_avail = intel_dp_max_data_rate(link_clock,
1893 if (mode_rate <= link_avail) {
1894 pipe_config->lane_count = lane_count;
1895 pipe_config->pipe_bpp = bpp;
1896 pipe_config->port_clock = link_clock;
1907 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1910 u8 dsc_bpc[3] = {0};
1912 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1914 for (i = 0; i < num_bpc; i++) {
1915 if (dsc_max_bpc >= dsc_bpc[i])
1916 return dsc_bpc[i] * 3;
1922 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1923 struct intel_crtc_state *pipe_config,
1924 struct drm_connector_state *conn_state,
1925 struct link_config_limits *limits)
1927 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1928 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1929 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1934 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1935 intel_dp_supports_fec(intel_dp, pipe_config);
1937 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1940 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1941 conn_state->max_requested_bpc);
1943 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1944 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1945 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1950 * For now enable DSC for max bpp, max link rate, max lane count.
1951 * Optimize this later for the minimum possible link rate/lane count
1952 * with DSC enabled for the requested mode.
1954 pipe_config->pipe_bpp = pipe_bpp;
1955 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1956 pipe_config->lane_count = limits->max_lane_count;
1958 if (intel_dp_is_edp(intel_dp)) {
1959 pipe_config->dsc_params.compressed_bpp =
1960 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1961 pipe_config->pipe_bpp);
1962 pipe_config->dsc_params.slice_count =
1963 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1966 u16 dsc_max_output_bpp;
1967 u8 dsc_dp_slice_count;
1969 dsc_max_output_bpp =
1970 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1971 pipe_config->lane_count,
1972 adjusted_mode->crtc_clock,
1973 adjusted_mode->crtc_hdisplay);
1974 dsc_dp_slice_count =
1975 intel_dp_dsc_get_slice_count(intel_dp,
1976 adjusted_mode->crtc_clock,
1977 adjusted_mode->crtc_hdisplay);
1978 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1979 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1982 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1983 dsc_max_output_bpp >> 4,
1984 pipe_config->pipe_bpp);
1985 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1988 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1989 * is greater than the maximum Cdclock and if slice count is even
1990 * then we need to use 2 VDSC instances.
1992 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1993 if (pipe_config->dsc_params.slice_count > 1) {
1994 pipe_config->dsc_params.dsc_split = true;
1996 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
2001 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
2003 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
2004 "Compressed BPP = %d\n",
2005 pipe_config->pipe_bpp,
2006 pipe_config->dsc_params.compressed_bpp);
2010 pipe_config->dsc_params.compression_enable = true;
2011 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2012 "Compressed Bpp = %d Slice Count = %d\n",
2013 pipe_config->pipe_bpp,
2014 pipe_config->dsc_params.compressed_bpp,
2015 pipe_config->dsc_params.slice_count);
2020 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2022 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2029 intel_dp_compute_link_config(struct intel_encoder *encoder,
2030 struct intel_crtc_state *pipe_config,
2031 struct drm_connector_state *conn_state)
2033 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2034 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2035 struct link_config_limits limits;
2039 common_len = intel_dp_common_len_rate_limit(intel_dp,
2040 intel_dp->max_link_rate);
2042 /* No common link rates between source and sink */
2043 WARN_ON(common_len <= 0);
2045 limits.min_clock = 0;
2046 limits.max_clock = common_len - 1;
2048 limits.min_lane_count = 1;
2049 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2051 limits.min_bpp = intel_dp_min_bpp(pipe_config);
2052 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2054 if (intel_dp_is_edp(intel_dp)) {
2056 * Use the maximum clock and number of lanes the eDP panel
2057 * advertizes being capable of. The panels are generally
2058 * designed to support only a single clock and lane
2059 * configuration, and typically these values correspond to the
2060 * native resolution of the panel.
2062 limits.min_lane_count = limits.max_lane_count;
2063 limits.min_clock = limits.max_clock;
2066 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2068 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2069 "max rate %d max bpp %d pixel clock %iKHz\n",
2070 limits.max_lane_count,
2071 intel_dp->common_rates[limits.max_clock],
2072 limits.max_bpp, adjusted_mode->crtc_clock);
2075 * Optimize for slow and wide. This is the place to add alternative
2076 * optimization policy.
2078 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2080 /* enable compression if the mode doesn't fit available BW */
2081 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2082 if (ret || intel_dp->force_dsc_en) {
2083 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2084 conn_state, &limits);
2089 if (pipe_config->dsc_params.compression_enable) {
2090 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2091 pipe_config->lane_count, pipe_config->port_clock,
2092 pipe_config->pipe_bpp,
2093 pipe_config->dsc_params.compressed_bpp);
2095 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2096 intel_dp_link_required(adjusted_mode->crtc_clock,
2097 pipe_config->dsc_params.compressed_bpp),
2098 intel_dp_max_data_rate(pipe_config->port_clock,
2099 pipe_config->lane_count));
2101 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2102 pipe_config->lane_count, pipe_config->port_clock,
2103 pipe_config->pipe_bpp);
2105 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2106 intel_dp_link_required(adjusted_mode->crtc_clock,
2107 pipe_config->pipe_bpp),
2108 intel_dp_max_data_rate(pipe_config->port_clock,
2109 pipe_config->lane_count));
2115 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2116 struct drm_connector *connector,
2117 struct intel_crtc_state *crtc_state)
2119 const struct drm_display_info *info = &connector->display_info;
2120 const struct drm_display_mode *adjusted_mode =
2121 &crtc_state->base.adjusted_mode;
2122 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2125 if (!drm_mode_is_420_only(info, adjusted_mode) ||
2126 !intel_dp_get_colorimetry_status(intel_dp) ||
2127 !connector->ycbcr_420_allowed)
2130 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2132 /* YCBCR 420 output conversion needs a scaler */
2133 ret = skl_update_scaler_crtc(crtc_state);
2135 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2139 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2144 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2145 const struct drm_connector_state *conn_state)
2147 const struct intel_digital_connector_state *intel_conn_state =
2148 to_intel_digital_connector_state(conn_state);
2149 const struct drm_display_mode *adjusted_mode =
2150 &crtc_state->base.adjusted_mode;
2152 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2155 * CEA-861-E - 5.1 Default Encoding Parameters
2156 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2158 return crtc_state->pipe_bpp != 18 &&
2159 drm_default_rgb_quant_range(adjusted_mode) ==
2160 HDMI_QUANTIZATION_RANGE_LIMITED;
2162 return intel_conn_state->broadcast_rgb ==
2163 INTEL_BROADCAST_RGB_LIMITED;
2168 intel_dp_compute_config(struct intel_encoder *encoder,
2169 struct intel_crtc_state *pipe_config,
2170 struct drm_connector_state *conn_state)
2172 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2173 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2174 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2175 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2176 enum port port = encoder->port;
2177 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2178 struct intel_connector *intel_connector = intel_dp->attached_connector;
2179 struct intel_digital_connector_state *intel_conn_state =
2180 to_intel_digital_connector_state(conn_state);
2181 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2182 DP_DPCD_QUIRK_CONSTANT_N);
2183 int ret = 0, output_bpp;
2185 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2186 pipe_config->has_pch_encoder = true;
2188 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2190 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2192 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2198 pipe_config->has_drrs = false;
2199 if (IS_G4X(dev_priv) || port == PORT_A)
2200 pipe_config->has_audio = false;
2201 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2202 pipe_config->has_audio = intel_dp->has_audio;
2204 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2206 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2207 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2210 if (INTEL_GEN(dev_priv) >= 9) {
2211 ret = skl_update_scaler_crtc(pipe_config);
2216 if (HAS_GMCH(dev_priv))
2217 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2218 conn_state->scaling_mode);
2220 intel_pch_panel_fitting(intel_crtc, pipe_config,
2221 conn_state->scaling_mode);
2224 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2227 if (HAS_GMCH(dev_priv) &&
2228 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2231 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2234 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2238 pipe_config->limited_color_range =
2239 intel_dp_limited_color_range(pipe_config, conn_state);
2241 if (pipe_config->dsc_params.compression_enable)
2242 output_bpp = pipe_config->dsc_params.compressed_bpp;
2244 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2246 intel_link_compute_m_n(output_bpp,
2247 pipe_config->lane_count,
2248 adjusted_mode->crtc_clock,
2249 pipe_config->port_clock,
2250 &pipe_config->dp_m_n,
2253 if (intel_connector->panel.downclock_mode != NULL &&
2254 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2255 pipe_config->has_drrs = true;
2256 intel_link_compute_m_n(output_bpp,
2257 pipe_config->lane_count,
2258 intel_connector->panel.downclock_mode->clock,
2259 pipe_config->port_clock,
2260 &pipe_config->dp_m2_n2,
2264 if (!HAS_DDI(dev_priv))
2265 intel_dp_set_clock(encoder, pipe_config);
2267 intel_psr_compute_config(intel_dp, pipe_config);
2272 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2273 int link_rate, u8 lane_count,
2276 intel_dp->link_trained = false;
2277 intel_dp->link_rate = link_rate;
2278 intel_dp->lane_count = lane_count;
2279 intel_dp->link_mst = link_mst;
2282 static void intel_dp_prepare(struct intel_encoder *encoder,
2283 const struct intel_crtc_state *pipe_config)
2285 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2286 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2287 enum port port = encoder->port;
2288 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2289 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2291 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2292 pipe_config->lane_count,
2293 intel_crtc_has_type(pipe_config,
2294 INTEL_OUTPUT_DP_MST));
2297 * There are four kinds of DP registers:
2304 * IBX PCH and CPU are the same for almost everything,
2305 * except that the CPU DP PLL is configured in this
2308 * CPT PCH is quite different, having many bits moved
2309 * to the TRANS_DP_CTL register instead. That
2310 * configuration happens (oddly) in ironlake_pch_enable
2313 /* Preserve the BIOS-computed detected bit. This is
2314 * supposed to be read-only.
2316 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2318 /* Handle DP bits in common between all three register formats */
2319 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2320 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2322 /* Split out the IBX/CPU vs CPT settings */
2324 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2325 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2326 intel_dp->DP |= DP_SYNC_HS_HIGH;
2327 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2328 intel_dp->DP |= DP_SYNC_VS_HIGH;
2329 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2331 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2332 intel_dp->DP |= DP_ENHANCED_FRAMING;
2334 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2335 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2338 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2340 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2341 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2342 trans_dp |= TRANS_DP_ENH_FRAMING;
2344 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2345 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2347 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2348 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2350 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2351 intel_dp->DP |= DP_SYNC_HS_HIGH;
2352 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2353 intel_dp->DP |= DP_SYNC_VS_HIGH;
2354 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2356 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2357 intel_dp->DP |= DP_ENHANCED_FRAMING;
2359 if (IS_CHERRYVIEW(dev_priv))
2360 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2362 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2366 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2367 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2369 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2370 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2372 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2373 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2375 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2377 static void wait_panel_status(struct intel_dp *intel_dp,
2381 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2382 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2384 lockdep_assert_held(&dev_priv->pps_mutex);
2386 intel_pps_verify_state(intel_dp);
2388 pp_stat_reg = _pp_stat_reg(intel_dp);
2389 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2391 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2393 I915_READ(pp_stat_reg),
2394 I915_READ(pp_ctrl_reg));
2396 if (intel_wait_for_register(&dev_priv->uncore,
2397 pp_stat_reg, mask, value,
2399 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2400 I915_READ(pp_stat_reg),
2401 I915_READ(pp_ctrl_reg));
2403 DRM_DEBUG_KMS("Wait complete\n");
2406 static void wait_panel_on(struct intel_dp *intel_dp)
2408 DRM_DEBUG_KMS("Wait for panel power on\n");
2409 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2412 static void wait_panel_off(struct intel_dp *intel_dp)
2414 DRM_DEBUG_KMS("Wait for panel power off time\n");
2415 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2418 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2420 ktime_t panel_power_on_time;
2421 s64 panel_power_off_duration;
2423 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2425 /* take the difference of currrent time and panel power off time
2426 * and then make panel wait for t11_t12 if needed. */
2427 panel_power_on_time = ktime_get_boottime();
2428 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2430 /* When we disable the VDD override bit last we have to do the manual
2432 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2433 wait_remaining_ms_from_jiffies(jiffies,
2434 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2436 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2439 static void wait_backlight_on(struct intel_dp *intel_dp)
2441 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2442 intel_dp->backlight_on_delay);
2445 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2447 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2448 intel_dp->backlight_off_delay);
2451 /* Read the current pp_control value, unlocking the register if it
2455 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2457 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2460 lockdep_assert_held(&dev_priv->pps_mutex);
2462 control = I915_READ(_pp_ctrl_reg(intel_dp));
2463 if (WARN_ON(!HAS_DDI(dev_priv) &&
2464 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2465 control &= ~PANEL_UNLOCK_MASK;
2466 control |= PANEL_UNLOCK_REGS;
2472 * Must be paired with edp_panel_vdd_off().
2473 * Must hold pps_mutex around the whole on/off sequence.
2474 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2476 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2478 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2479 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2481 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2482 bool need_to_disable = !intel_dp->want_panel_vdd;
2484 lockdep_assert_held(&dev_priv->pps_mutex);
2486 if (!intel_dp_is_edp(intel_dp))
2489 cancel_delayed_work(&intel_dp->panel_vdd_work);
2490 intel_dp->want_panel_vdd = true;
2492 if (edp_have_panel_vdd(intel_dp))
2493 return need_to_disable;
2495 intel_display_power_get(dev_priv,
2496 intel_aux_power_domain(intel_dig_port));
2498 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2499 port_name(intel_dig_port->base.port));
2501 if (!edp_have_panel_power(intel_dp))
2502 wait_panel_power_cycle(intel_dp);
2504 pp = ironlake_get_pp_control(intel_dp);
2505 pp |= EDP_FORCE_VDD;
2507 pp_stat_reg = _pp_stat_reg(intel_dp);
2508 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2510 I915_WRITE(pp_ctrl_reg, pp);
2511 POSTING_READ(pp_ctrl_reg);
2512 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2513 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2515 * If the panel wasn't on, delay before accessing aux channel
2517 if (!edp_have_panel_power(intel_dp)) {
2518 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2519 port_name(intel_dig_port->base.port));
2520 msleep(intel_dp->panel_power_up_delay);
2523 return need_to_disable;
2527 * Must be paired with intel_edp_panel_vdd_off() or
2528 * intel_edp_panel_off().
2529 * Nested calls to these functions are not allowed since
2530 * we drop the lock. Caller must use some higher level
2531 * locking to prevent nested calls from other threads.
2533 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2535 intel_wakeref_t wakeref;
2538 if (!intel_dp_is_edp(intel_dp))
2542 with_pps_lock(intel_dp, wakeref)
2543 vdd = edp_panel_vdd_on(intel_dp);
2544 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2545 port_name(dp_to_dig_port(intel_dp)->base.port));
2548 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2550 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2551 struct intel_digital_port *intel_dig_port =
2552 dp_to_dig_port(intel_dp);
2554 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2556 lockdep_assert_held(&dev_priv->pps_mutex);
2558 WARN_ON(intel_dp->want_panel_vdd);
2560 if (!edp_have_panel_vdd(intel_dp))
2563 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2564 port_name(intel_dig_port->base.port));
2566 pp = ironlake_get_pp_control(intel_dp);
2567 pp &= ~EDP_FORCE_VDD;
2569 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2570 pp_stat_reg = _pp_stat_reg(intel_dp);
2572 I915_WRITE(pp_ctrl_reg, pp);
2573 POSTING_READ(pp_ctrl_reg);
2575 /* Make sure sequencer is idle before allowing subsequent activity */
2576 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2577 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2579 if ((pp & PANEL_POWER_ON) == 0)
2580 intel_dp->panel_power_off_time = ktime_get_boottime();
2582 intel_display_power_put_unchecked(dev_priv,
2583 intel_aux_power_domain(intel_dig_port));
2586 static void edp_panel_vdd_work(struct work_struct *__work)
2588 struct intel_dp *intel_dp =
2589 container_of(to_delayed_work(__work),
2590 struct intel_dp, panel_vdd_work);
2591 intel_wakeref_t wakeref;
2593 with_pps_lock(intel_dp, wakeref) {
2594 if (!intel_dp->want_panel_vdd)
2595 edp_panel_vdd_off_sync(intel_dp);
2599 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2601 unsigned long delay;
2604 * Queue the timer to fire a long time from now (relative to the power
2605 * down delay) to keep the panel power up across a sequence of
2608 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2609 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2613 * Must be paired with edp_panel_vdd_on().
2614 * Must hold pps_mutex around the whole on/off sequence.
2615 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2617 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2619 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2621 lockdep_assert_held(&dev_priv->pps_mutex);
2623 if (!intel_dp_is_edp(intel_dp))
2626 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2627 port_name(dp_to_dig_port(intel_dp)->base.port));
2629 intel_dp->want_panel_vdd = false;
2632 edp_panel_vdd_off_sync(intel_dp);
2634 edp_panel_vdd_schedule_off(intel_dp);
2637 static void edp_panel_on(struct intel_dp *intel_dp)
2639 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2641 i915_reg_t pp_ctrl_reg;
2643 lockdep_assert_held(&dev_priv->pps_mutex);
2645 if (!intel_dp_is_edp(intel_dp))
2648 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2649 port_name(dp_to_dig_port(intel_dp)->base.port));
2651 if (WARN(edp_have_panel_power(intel_dp),
2652 "eDP port %c panel power already on\n",
2653 port_name(dp_to_dig_port(intel_dp)->base.port)))
2656 wait_panel_power_cycle(intel_dp);
2658 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2659 pp = ironlake_get_pp_control(intel_dp);
2660 if (IS_GEN(dev_priv, 5)) {
2661 /* ILK workaround: disable reset around power sequence */
2662 pp &= ~PANEL_POWER_RESET;
2663 I915_WRITE(pp_ctrl_reg, pp);
2664 POSTING_READ(pp_ctrl_reg);
2667 pp |= PANEL_POWER_ON;
2668 if (!IS_GEN(dev_priv, 5))
2669 pp |= PANEL_POWER_RESET;
2671 I915_WRITE(pp_ctrl_reg, pp);
2672 POSTING_READ(pp_ctrl_reg);
2674 wait_panel_on(intel_dp);
2675 intel_dp->last_power_on = jiffies;
2677 if (IS_GEN(dev_priv, 5)) {
2678 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2679 I915_WRITE(pp_ctrl_reg, pp);
2680 POSTING_READ(pp_ctrl_reg);
2684 void intel_edp_panel_on(struct intel_dp *intel_dp)
2686 intel_wakeref_t wakeref;
2688 if (!intel_dp_is_edp(intel_dp))
2691 with_pps_lock(intel_dp, wakeref)
2692 edp_panel_on(intel_dp);
2696 static void edp_panel_off(struct intel_dp *intel_dp)
2698 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2699 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2701 i915_reg_t pp_ctrl_reg;
2703 lockdep_assert_held(&dev_priv->pps_mutex);
2705 if (!intel_dp_is_edp(intel_dp))
2708 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2709 port_name(dig_port->base.port));
2711 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2712 port_name(dig_port->base.port));
2714 pp = ironlake_get_pp_control(intel_dp);
2715 /* We need to switch off panel power _and_ force vdd, for otherwise some
2716 * panels get very unhappy and cease to work. */
2717 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2720 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2722 intel_dp->want_panel_vdd = false;
2724 I915_WRITE(pp_ctrl_reg, pp);
2725 POSTING_READ(pp_ctrl_reg);
2727 wait_panel_off(intel_dp);
2728 intel_dp->panel_power_off_time = ktime_get_boottime();
2730 /* We got a reference when we enabled the VDD. */
2731 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2734 void intel_edp_panel_off(struct intel_dp *intel_dp)
2736 intel_wakeref_t wakeref;
2738 if (!intel_dp_is_edp(intel_dp))
2741 with_pps_lock(intel_dp, wakeref)
2742 edp_panel_off(intel_dp);
2745 /* Enable backlight in the panel power control. */
2746 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2748 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2749 intel_wakeref_t wakeref;
2752 * If we enable the backlight right away following a panel power
2753 * on, we may see slight flicker as the panel syncs with the eDP
2754 * link. So delay a bit to make sure the image is solid before
2755 * allowing it to appear.
2757 wait_backlight_on(intel_dp);
2759 with_pps_lock(intel_dp, wakeref) {
2760 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2763 pp = ironlake_get_pp_control(intel_dp);
2764 pp |= EDP_BLC_ENABLE;
2766 I915_WRITE(pp_ctrl_reg, pp);
2767 POSTING_READ(pp_ctrl_reg);
2771 /* Enable backlight PWM and backlight PP control. */
2772 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2773 const struct drm_connector_state *conn_state)
2775 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2777 if (!intel_dp_is_edp(intel_dp))
2780 DRM_DEBUG_KMS("\n");
2782 intel_panel_enable_backlight(crtc_state, conn_state);
2783 _intel_edp_backlight_on(intel_dp);
2786 /* Disable backlight in the panel power control. */
2787 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2789 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2790 intel_wakeref_t wakeref;
2792 if (!intel_dp_is_edp(intel_dp))
2795 with_pps_lock(intel_dp, wakeref) {
2796 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2799 pp = ironlake_get_pp_control(intel_dp);
2800 pp &= ~EDP_BLC_ENABLE;
2802 I915_WRITE(pp_ctrl_reg, pp);
2803 POSTING_READ(pp_ctrl_reg);
2806 intel_dp->last_backlight_off = jiffies;
2807 edp_wait_backlight_off(intel_dp);
2810 /* Disable backlight PP control and backlight PWM. */
2811 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2813 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2815 if (!intel_dp_is_edp(intel_dp))
2818 DRM_DEBUG_KMS("\n");
2820 _intel_edp_backlight_off(intel_dp);
2821 intel_panel_disable_backlight(old_conn_state);
2825 * Hook for controlling the panel power control backlight through the bl_power
2826 * sysfs attribute. Take care to handle multiple calls.
2828 static void intel_edp_backlight_power(struct intel_connector *connector,
2831 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2832 intel_wakeref_t wakeref;
2836 with_pps_lock(intel_dp, wakeref)
2837 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2838 if (is_enabled == enable)
2841 DRM_DEBUG_KMS("panel power control backlight %s\n",
2842 enable ? "enable" : "disable");
2845 _intel_edp_backlight_on(intel_dp);
2847 _intel_edp_backlight_off(intel_dp);
2850 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2852 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2853 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2854 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2856 I915_STATE_WARN(cur_state != state,
2857 "DP port %c state assertion failure (expected %s, current %s)\n",
2858 port_name(dig_port->base.port),
2859 onoff(state), onoff(cur_state));
2861 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2863 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2865 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2867 I915_STATE_WARN(cur_state != state,
2868 "eDP PLL state assertion failure (expected %s, current %s)\n",
2869 onoff(state), onoff(cur_state));
2871 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2872 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2874 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2875 const struct intel_crtc_state *pipe_config)
2877 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2878 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2880 assert_pipe_disabled(dev_priv, crtc->pipe);
2881 assert_dp_port_disabled(intel_dp);
2882 assert_edp_pll_disabled(dev_priv);
2884 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2885 pipe_config->port_clock);
2887 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2889 if (pipe_config->port_clock == 162000)
2890 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2892 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2894 I915_WRITE(DP_A, intel_dp->DP);
2899 * [DevILK] Work around required when enabling DP PLL
2900 * while a pipe is enabled going to FDI:
2901 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2902 * 2. Program DP PLL enable
2904 if (IS_GEN(dev_priv, 5))
2905 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2907 intel_dp->DP |= DP_PLL_ENABLE;
2909 I915_WRITE(DP_A, intel_dp->DP);
2914 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2915 const struct intel_crtc_state *old_crtc_state)
2917 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2918 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2920 assert_pipe_disabled(dev_priv, crtc->pipe);
2921 assert_dp_port_disabled(intel_dp);
2922 assert_edp_pll_enabled(dev_priv);
2924 DRM_DEBUG_KMS("disabling eDP PLL\n");
2926 intel_dp->DP &= ~DP_PLL_ENABLE;
2928 I915_WRITE(DP_A, intel_dp->DP);
2933 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2936 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2937 * be capable of signalling downstream hpd with a long pulse.
2938 * Whether or not that means D3 is safe to use is not clear,
2939 * but let's assume so until proven otherwise.
2941 * FIXME should really check all downstream ports...
2943 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2944 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2945 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2948 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2949 const struct intel_crtc_state *crtc_state,
2954 if (!crtc_state->dsc_params.compression_enable)
2957 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2958 enable ? DP_DECOMPRESSION_EN : 0);
2960 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2961 enable ? "enable" : "disable");
2964 /* If the sink supports it, try to set the power state appropriately */
2965 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2969 /* Should have a valid DPCD by this point */
2970 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2973 if (mode != DRM_MODE_DPMS_ON) {
2974 if (downstream_hpd_needs_d0(intel_dp))
2977 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2980 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2983 * When turning on, we need to retry for 1ms to give the sink
2986 for (i = 0; i < 3; i++) {
2987 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2994 if (ret == 1 && lspcon->active)
2995 lspcon_wait_pcon_mode(lspcon);
2999 DRM_DEBUG_KMS("failed to %s sink power state\n",
3000 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3003 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3004 enum port port, enum pipe *pipe)
3008 for_each_pipe(dev_priv, p) {
3009 u32 val = I915_READ(TRANS_DP_CTL(p));
3011 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3017 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
3019 /* must initialize pipe to something for the asserts */
3025 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3026 i915_reg_t dp_reg, enum port port,
3032 val = I915_READ(dp_reg);
3034 ret = val & DP_PORT_EN;
3036 /* asserts want to know the pipe even if the port is disabled */
3037 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3038 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3039 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3040 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3041 else if (IS_CHERRYVIEW(dev_priv))
3042 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3044 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3049 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3052 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3053 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3054 intel_wakeref_t wakeref;
3057 wakeref = intel_display_power_get_if_enabled(dev_priv,
3058 encoder->power_domain);
3062 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3063 encoder->port, pipe);
3065 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3070 static void intel_dp_get_config(struct intel_encoder *encoder,
3071 struct intel_crtc_state *pipe_config)
3073 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3074 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3076 enum port port = encoder->port;
3077 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3079 if (encoder->type == INTEL_OUTPUT_EDP)
3080 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3082 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3084 tmp = I915_READ(intel_dp->output_reg);
3086 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3088 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3089 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3091 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3092 flags |= DRM_MODE_FLAG_PHSYNC;
3094 flags |= DRM_MODE_FLAG_NHSYNC;
3096 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3097 flags |= DRM_MODE_FLAG_PVSYNC;
3099 flags |= DRM_MODE_FLAG_NVSYNC;
3101 if (tmp & DP_SYNC_HS_HIGH)
3102 flags |= DRM_MODE_FLAG_PHSYNC;
3104 flags |= DRM_MODE_FLAG_NHSYNC;
3106 if (tmp & DP_SYNC_VS_HIGH)
3107 flags |= DRM_MODE_FLAG_PVSYNC;
3109 flags |= DRM_MODE_FLAG_NVSYNC;
3112 pipe_config->base.adjusted_mode.flags |= flags;
3114 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3115 pipe_config->limited_color_range = true;
3117 pipe_config->lane_count =
3118 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3120 intel_dp_get_m_n(crtc, pipe_config);
3122 if (port == PORT_A) {
3123 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3124 pipe_config->port_clock = 162000;
3126 pipe_config->port_clock = 270000;
3129 pipe_config->base.adjusted_mode.crtc_clock =
3130 intel_dotclock_calculate(pipe_config->port_clock,
3131 &pipe_config->dp_m_n);
3133 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3134 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3136 * This is a big fat ugly hack.
3138 * Some machines in UEFI boot mode provide us a VBT that has 18
3139 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3140 * unknown we fail to light up. Yet the same BIOS boots up with
3141 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3142 * max, not what it tells us to use.
3144 * Note: This will still be broken if the eDP panel is not lit
3145 * up by the BIOS, and thus we can't get the mode at module
3148 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3149 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3150 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3154 static void intel_disable_dp(struct intel_encoder *encoder,
3155 const struct intel_crtc_state *old_crtc_state,
3156 const struct drm_connector_state *old_conn_state)
3158 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3160 intel_dp->link_trained = false;
3162 if (old_crtc_state->has_audio)
3163 intel_audio_codec_disable(encoder,
3164 old_crtc_state, old_conn_state);
3166 /* Make sure the panel is off before trying to change the mode. But also
3167 * ensure that we have vdd while we switch off the panel. */
3168 intel_edp_panel_vdd_on(intel_dp);
3169 intel_edp_backlight_off(old_conn_state);
3170 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3171 intel_edp_panel_off(intel_dp);
3174 static void g4x_disable_dp(struct intel_encoder *encoder,
3175 const struct intel_crtc_state *old_crtc_state,
3176 const struct drm_connector_state *old_conn_state)
3178 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3181 static void vlv_disable_dp(struct intel_encoder *encoder,
3182 const struct intel_crtc_state *old_crtc_state,
3183 const struct drm_connector_state *old_conn_state)
3185 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3188 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3189 const struct intel_crtc_state *old_crtc_state,
3190 const struct drm_connector_state *old_conn_state)
3192 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3193 enum port port = encoder->port;
3196 * Bspec does not list a specific disable sequence for g4x DP.
3197 * Follow the ilk+ sequence (disable pipe before the port) for
3198 * g4x DP as it does not suffer from underruns like the normal
3199 * g4x modeset sequence (disable pipe after the port).
3201 intel_dp_link_down(encoder, old_crtc_state);
3203 /* Only ilk+ has port A */
3205 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3208 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3209 const struct intel_crtc_state *old_crtc_state,
3210 const struct drm_connector_state *old_conn_state)
3212 intel_dp_link_down(encoder, old_crtc_state);
3215 static void chv_post_disable_dp(struct intel_encoder *encoder,
3216 const struct intel_crtc_state *old_crtc_state,
3217 const struct drm_connector_state *old_conn_state)
3219 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3221 intel_dp_link_down(encoder, old_crtc_state);
3223 vlv_dpio_get(dev_priv);
3225 /* Assert data lane reset */
3226 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3228 vlv_dpio_put(dev_priv);
3232 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3236 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3237 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3238 enum port port = intel_dig_port->base.port;
3239 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3241 if (dp_train_pat & train_pat_mask)
3242 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3243 dp_train_pat & train_pat_mask);
3245 if (HAS_DDI(dev_priv)) {
3246 u32 temp = I915_READ(DP_TP_CTL(port));
3248 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3249 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3251 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3253 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3254 switch (dp_train_pat & train_pat_mask) {
3255 case DP_TRAINING_PATTERN_DISABLE:
3256 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3259 case DP_TRAINING_PATTERN_1:
3260 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3262 case DP_TRAINING_PATTERN_2:
3263 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3265 case DP_TRAINING_PATTERN_3:
3266 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3268 case DP_TRAINING_PATTERN_4:
3269 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3272 I915_WRITE(DP_TP_CTL(port), temp);
3274 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3275 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3276 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3278 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3279 case DP_TRAINING_PATTERN_DISABLE:
3280 *DP |= DP_LINK_TRAIN_OFF_CPT;
3282 case DP_TRAINING_PATTERN_1:
3283 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3285 case DP_TRAINING_PATTERN_2:
3286 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3288 case DP_TRAINING_PATTERN_3:
3289 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3290 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3295 *DP &= ~DP_LINK_TRAIN_MASK;
3297 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3298 case DP_TRAINING_PATTERN_DISABLE:
3299 *DP |= DP_LINK_TRAIN_OFF;
3301 case DP_TRAINING_PATTERN_1:
3302 *DP |= DP_LINK_TRAIN_PAT_1;
3304 case DP_TRAINING_PATTERN_2:
3305 *DP |= DP_LINK_TRAIN_PAT_2;
3307 case DP_TRAINING_PATTERN_3:
3308 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3309 *DP |= DP_LINK_TRAIN_PAT_2;
3315 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3316 const struct intel_crtc_state *old_crtc_state)
3318 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3320 /* enable with pattern 1 (as per spec) */
3322 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3325 * Magic for VLV/CHV. We _must_ first set up the register
3326 * without actually enabling the port, and then do another
3327 * write to enable the port. Otherwise link training will
3328 * fail when the power sequencer is freshly used for this port.
3330 intel_dp->DP |= DP_PORT_EN;
3331 if (old_crtc_state->has_audio)
3332 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3334 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3335 POSTING_READ(intel_dp->output_reg);
3338 static void intel_enable_dp(struct intel_encoder *encoder,
3339 const struct intel_crtc_state *pipe_config,
3340 const struct drm_connector_state *conn_state)
3342 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3343 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3344 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3345 u32 dp_reg = I915_READ(intel_dp->output_reg);
3346 enum pipe pipe = crtc->pipe;
3347 intel_wakeref_t wakeref;
3349 if (WARN_ON(dp_reg & DP_PORT_EN))
3352 with_pps_lock(intel_dp, wakeref) {
3353 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3354 vlv_init_panel_power_sequencer(encoder, pipe_config);
3356 intel_dp_enable_port(intel_dp, pipe_config);
3358 edp_panel_vdd_on(intel_dp);
3359 edp_panel_on(intel_dp);
3360 edp_panel_vdd_off(intel_dp, true);
3363 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3364 unsigned int lane_mask = 0x0;
3366 if (IS_CHERRYVIEW(dev_priv))
3367 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3369 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3373 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3374 intel_dp_start_link_train(intel_dp);
3375 intel_dp_stop_link_train(intel_dp);
3377 if (pipe_config->has_audio) {
3378 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3380 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3384 static void g4x_enable_dp(struct intel_encoder *encoder,
3385 const struct intel_crtc_state *pipe_config,
3386 const struct drm_connector_state *conn_state)
3388 intel_enable_dp(encoder, pipe_config, conn_state);
3389 intel_edp_backlight_on(pipe_config, conn_state);
3392 static void vlv_enable_dp(struct intel_encoder *encoder,
3393 const struct intel_crtc_state *pipe_config,
3394 const struct drm_connector_state *conn_state)
3396 intel_edp_backlight_on(pipe_config, conn_state);
3399 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3400 const struct intel_crtc_state *pipe_config,
3401 const struct drm_connector_state *conn_state)
3403 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3404 enum port port = encoder->port;
3406 intel_dp_prepare(encoder, pipe_config);
3408 /* Only ilk+ has port A */
3410 ironlake_edp_pll_on(intel_dp, pipe_config);
3413 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3415 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3416 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3417 enum pipe pipe = intel_dp->pps_pipe;
3418 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3420 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3422 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3425 edp_panel_vdd_off_sync(intel_dp);
3428 * VLV seems to get confused when multiple power sequencers
3429 * have the same port selected (even if only one has power/vdd
3430 * enabled). The failure manifests as vlv_wait_port_ready() failing
3431 * CHV on the other hand doesn't seem to mind having the same port
3432 * selected in multiple power sequencers, but let's clear the
3433 * port select always when logically disconnecting a power sequencer
3436 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3437 pipe_name(pipe), port_name(intel_dig_port->base.port));
3438 I915_WRITE(pp_on_reg, 0);
3439 POSTING_READ(pp_on_reg);
3441 intel_dp->pps_pipe = INVALID_PIPE;
3444 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3447 struct intel_encoder *encoder;
3449 lockdep_assert_held(&dev_priv->pps_mutex);
3451 for_each_intel_dp(&dev_priv->drm, encoder) {
3452 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3453 enum port port = encoder->port;
3455 WARN(intel_dp->active_pipe == pipe,
3456 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3457 pipe_name(pipe), port_name(port));
3459 if (intel_dp->pps_pipe != pipe)
3462 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3463 pipe_name(pipe), port_name(port));
3465 /* make sure vdd is off before we steal it */
3466 vlv_detach_power_sequencer(intel_dp);
3470 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3471 const struct intel_crtc_state *crtc_state)
3473 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3474 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3475 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3477 lockdep_assert_held(&dev_priv->pps_mutex);
3479 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3481 if (intel_dp->pps_pipe != INVALID_PIPE &&
3482 intel_dp->pps_pipe != crtc->pipe) {
3484 * If another power sequencer was being used on this
3485 * port previously make sure to turn off vdd there while
3486 * we still have control of it.
3488 vlv_detach_power_sequencer(intel_dp);
3492 * We may be stealing the power
3493 * sequencer from another port.
3495 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3497 intel_dp->active_pipe = crtc->pipe;
3499 if (!intel_dp_is_edp(intel_dp))
3502 /* now it's all ours */
3503 intel_dp->pps_pipe = crtc->pipe;
3505 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3506 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3508 /* init power sequencer on this pipe and port */
3509 intel_dp_init_panel_power_sequencer(intel_dp);
3510 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3513 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3514 const struct intel_crtc_state *pipe_config,
3515 const struct drm_connector_state *conn_state)
3517 vlv_phy_pre_encoder_enable(encoder, pipe_config);
3519 intel_enable_dp(encoder, pipe_config, conn_state);
3522 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3523 const struct intel_crtc_state *pipe_config,
3524 const struct drm_connector_state *conn_state)
3526 intel_dp_prepare(encoder, pipe_config);
3528 vlv_phy_pre_pll_enable(encoder, pipe_config);
3531 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3532 const struct intel_crtc_state *pipe_config,
3533 const struct drm_connector_state *conn_state)
3535 chv_phy_pre_encoder_enable(encoder, pipe_config);
3537 intel_enable_dp(encoder, pipe_config, conn_state);
3539 /* Second common lane will stay alive on its own now */
3540 chv_phy_release_cl2_override(encoder);
3543 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3544 const struct intel_crtc_state *pipe_config,
3545 const struct drm_connector_state *conn_state)
3547 intel_dp_prepare(encoder, pipe_config);
3549 chv_phy_pre_pll_enable(encoder, pipe_config);
3552 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3553 const struct intel_crtc_state *old_crtc_state,
3554 const struct drm_connector_state *old_conn_state)
3556 chv_phy_post_pll_disable(encoder, old_crtc_state);
3560 * Fetch AUX CH registers 0x202 - 0x207 which contain
3561 * link status information
3564 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3566 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3567 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3570 /* These are source-specific values. */
3572 intel_dp_voltage_max(struct intel_dp *intel_dp)
3574 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3575 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3576 enum port port = encoder->port;
3578 if (HAS_DDI(dev_priv))
3579 return intel_ddi_dp_voltage_max(encoder);
3580 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3581 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3582 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3583 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3584 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3585 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3587 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3591 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3593 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3594 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3595 enum port port = encoder->port;
3597 if (HAS_DDI(dev_priv)) {
3598 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3599 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3600 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3601 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3602 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3603 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3604 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3605 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3606 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3607 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3609 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3611 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3612 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3613 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3614 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3616 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3617 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3619 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3622 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3623 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3624 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3625 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3626 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3627 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3628 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3629 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3631 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3636 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3638 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3639 unsigned long demph_reg_value, preemph_reg_value,
3640 uniqtranscale_reg_value;
3641 u8 train_set = intel_dp->train_set[0];
3643 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3644 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3645 preemph_reg_value = 0x0004000;
3646 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3648 demph_reg_value = 0x2B405555;
3649 uniqtranscale_reg_value = 0x552AB83A;
3651 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3652 demph_reg_value = 0x2B404040;
3653 uniqtranscale_reg_value = 0x5548B83A;
3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3656 demph_reg_value = 0x2B245555;
3657 uniqtranscale_reg_value = 0x5560B83A;
3659 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3660 demph_reg_value = 0x2B405555;
3661 uniqtranscale_reg_value = 0x5598DA3A;
3667 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3668 preemph_reg_value = 0x0002000;
3669 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3670 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3671 demph_reg_value = 0x2B404040;
3672 uniqtranscale_reg_value = 0x5552B83A;
3674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3675 demph_reg_value = 0x2B404848;
3676 uniqtranscale_reg_value = 0x5580B83A;
3678 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3679 demph_reg_value = 0x2B404040;
3680 uniqtranscale_reg_value = 0x55ADDA3A;
3686 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3687 preemph_reg_value = 0x0000000;
3688 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3689 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3690 demph_reg_value = 0x2B305555;
3691 uniqtranscale_reg_value = 0x5570B83A;
3693 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3694 demph_reg_value = 0x2B2B4040;
3695 uniqtranscale_reg_value = 0x55ADDA3A;
3701 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3702 preemph_reg_value = 0x0006000;
3703 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3704 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3705 demph_reg_value = 0x1B405555;
3706 uniqtranscale_reg_value = 0x55ADDA3A;
3716 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3717 uniqtranscale_reg_value, 0);
3722 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3724 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3725 u32 deemph_reg_value, margin_reg_value;
3726 bool uniq_trans_scale = false;
3727 u8 train_set = intel_dp->train_set[0];
3729 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3730 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3731 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3732 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3733 deemph_reg_value = 128;
3734 margin_reg_value = 52;
3736 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3737 deemph_reg_value = 128;
3738 margin_reg_value = 77;
3740 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3741 deemph_reg_value = 128;
3742 margin_reg_value = 102;
3744 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3745 deemph_reg_value = 128;
3746 margin_reg_value = 154;
3747 uniq_trans_scale = true;
3753 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3754 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3755 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3756 deemph_reg_value = 85;
3757 margin_reg_value = 78;
3759 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3760 deemph_reg_value = 85;
3761 margin_reg_value = 116;
3763 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3764 deemph_reg_value = 85;
3765 margin_reg_value = 154;
3771 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3772 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3773 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3774 deemph_reg_value = 64;
3775 margin_reg_value = 104;
3777 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3778 deemph_reg_value = 64;
3779 margin_reg_value = 154;
3785 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3786 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3787 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3788 deemph_reg_value = 43;
3789 margin_reg_value = 154;
3799 chv_set_phy_signal_level(encoder, deemph_reg_value,
3800 margin_reg_value, uniq_trans_scale);
3806 g4x_signal_levels(u8 train_set)
3808 u32 signal_levels = 0;
3810 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3811 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3813 signal_levels |= DP_VOLTAGE_0_4;
3815 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3816 signal_levels |= DP_VOLTAGE_0_6;
3818 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3819 signal_levels |= DP_VOLTAGE_0_8;
3821 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3822 signal_levels |= DP_VOLTAGE_1_2;
3825 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3826 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3828 signal_levels |= DP_PRE_EMPHASIS_0;
3830 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3831 signal_levels |= DP_PRE_EMPHASIS_3_5;
3833 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3834 signal_levels |= DP_PRE_EMPHASIS_6;
3836 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3837 signal_levels |= DP_PRE_EMPHASIS_9_5;
3840 return signal_levels;
3843 /* SNB CPU eDP voltage swing and pre-emphasis control */
3845 snb_cpu_edp_signal_levels(u8 train_set)
3847 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3848 DP_TRAIN_PRE_EMPHASIS_MASK);
3849 switch (signal_levels) {
3850 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3851 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3852 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3853 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3854 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3855 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3856 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3857 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3859 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3860 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3863 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3865 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3866 "0x%x\n", signal_levels);
3867 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3871 /* IVB CPU eDP voltage swing and pre-emphasis control */
3873 ivb_cpu_edp_signal_levels(u8 train_set)
3875 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3876 DP_TRAIN_PRE_EMPHASIS_MASK);
3877 switch (signal_levels) {
3878 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3879 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3880 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3881 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3882 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3883 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3885 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3886 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3887 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3888 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3890 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3891 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3892 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3893 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3896 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3897 "0x%x\n", signal_levels);
3898 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3903 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3905 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3906 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3907 enum port port = intel_dig_port->base.port;
3908 u32 signal_levels, mask = 0;
3909 u8 train_set = intel_dp->train_set[0];
3911 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3912 signal_levels = bxt_signal_levels(intel_dp);
3913 } else if (HAS_DDI(dev_priv)) {
3914 signal_levels = ddi_signal_levels(intel_dp);
3915 mask = DDI_BUF_EMP_MASK;
3916 } else if (IS_CHERRYVIEW(dev_priv)) {
3917 signal_levels = chv_signal_levels(intel_dp);
3918 } else if (IS_VALLEYVIEW(dev_priv)) {
3919 signal_levels = vlv_signal_levels(intel_dp);
3920 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3921 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3922 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3923 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3924 signal_levels = snb_cpu_edp_signal_levels(train_set);
3925 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3927 signal_levels = g4x_signal_levels(train_set);
3928 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3932 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3934 DRM_DEBUG_KMS("Using vswing level %d\n",
3935 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3936 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3937 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3938 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3940 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3942 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3943 POSTING_READ(intel_dp->output_reg);
3947 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3950 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3951 struct drm_i915_private *dev_priv =
3952 to_i915(intel_dig_port->base.base.dev);
3954 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3956 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3957 POSTING_READ(intel_dp->output_reg);
3960 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3962 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3963 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3964 enum port port = intel_dig_port->base.port;
3967 if (!HAS_DDI(dev_priv))
3970 val = I915_READ(DP_TP_CTL(port));
3971 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3972 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3973 I915_WRITE(DP_TP_CTL(port), val);
3976 * On PORT_A we can have only eDP in SST mode. There the only reason
3977 * we need to set idle transmission mode is to work around a HW issue
3978 * where we enable the pipe while not in idle link-training mode.
3979 * In this case there is requirement to wait for a minimum number of
3980 * idle patterns to be sent.
3985 if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
3986 DP_TP_STATUS_IDLE_DONE,
3987 DP_TP_STATUS_IDLE_DONE,
3989 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3993 intel_dp_link_down(struct intel_encoder *encoder,
3994 const struct intel_crtc_state *old_crtc_state)
3996 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3997 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3998 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3999 enum port port = encoder->port;
4000 u32 DP = intel_dp->DP;
4002 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4005 DRM_DEBUG_KMS("\n");
4007 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4008 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4009 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4010 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4012 DP &= ~DP_LINK_TRAIN_MASK;
4013 DP |= DP_LINK_TRAIN_PAT_IDLE;
4015 I915_WRITE(intel_dp->output_reg, DP);
4016 POSTING_READ(intel_dp->output_reg);
4018 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4019 I915_WRITE(intel_dp->output_reg, DP);
4020 POSTING_READ(intel_dp->output_reg);
4023 * HW workaround for IBX, we need to move the port
4024 * to transcoder A after disabling it to allow the
4025 * matching HDMI port to be enabled on transcoder A.
4027 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4029 * We get CPU/PCH FIFO underruns on the other pipe when
4030 * doing the workaround. Sweep them under the rug.
4032 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4033 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4035 /* always enable with pattern 1 (as per spec) */
4036 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4037 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4038 DP_LINK_TRAIN_PAT_1;
4039 I915_WRITE(intel_dp->output_reg, DP);
4040 POSTING_READ(intel_dp->output_reg);
4043 I915_WRITE(intel_dp->output_reg, DP);
4044 POSTING_READ(intel_dp->output_reg);
4046 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4047 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4048 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4051 msleep(intel_dp->panel_power_down_delay);
4055 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4056 intel_wakeref_t wakeref;
4058 with_pps_lock(intel_dp, wakeref)
4059 intel_dp->active_pipe = INVALID_PIPE;
4064 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4069 * Prior to DP1.3 the bit represented by
4070 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4071 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4072 * the true capability of the panel. The only way to check is to
4073 * then compare 0000h and 2200h.
4075 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4076 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4079 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4080 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4081 DRM_ERROR("DPCD failed read at extended capabilities\n");
4085 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4086 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4090 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4093 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4094 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4096 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4100 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4102 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4103 sizeof(intel_dp->dpcd)) < 0)
4104 return false; /* aux transfer failed */
4106 intel_dp_extended_receiver_capabilities(intel_dp);
4108 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4110 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4113 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4117 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4120 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4123 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4126 * Clear the cached register set to avoid using stale values
4127 * for the sinks that do not support DSC.
4129 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4131 /* Clear fec_capable to avoid using stale values */
4132 intel_dp->fec_capable = 0;
4134 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4135 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4136 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4137 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4139 sizeof(intel_dp->dsc_dpcd)) < 0)
4140 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4143 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4144 (int)sizeof(intel_dp->dsc_dpcd),
4145 intel_dp->dsc_dpcd);
4147 /* FEC is supported only on DP 1.4 */
4148 if (!intel_dp_is_edp(intel_dp) &&
4149 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4150 &intel_dp->fec_capable) < 0)
4151 DRM_ERROR("Failed to read FEC DPCD register\n");
4153 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4158 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4160 struct drm_i915_private *dev_priv =
4161 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4163 /* this function is meant to be called only once */
4164 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4166 if (!intel_dp_read_dpcd(intel_dp))
4169 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4170 drm_dp_is_branch(intel_dp->dpcd));
4172 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4173 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4174 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4177 * Read the eDP display control registers.
4179 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4180 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4181 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4182 * method). The display control registers should read zero if they're
4183 * not supported anyway.
4185 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4186 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4187 sizeof(intel_dp->edp_dpcd))
4188 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4189 intel_dp->edp_dpcd);
4192 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4193 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4195 intel_psr_init_dpcd(intel_dp);
4197 /* Read the eDP 1.4+ supported link rates. */
4198 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4199 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4202 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4203 sink_rates, sizeof(sink_rates));
4205 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4206 int val = le16_to_cpu(sink_rates[i]);
4211 /* Value read multiplied by 200kHz gives the per-lane
4212 * link rate in kHz. The source rates are, however,
4213 * stored in terms of LS_Clk kHz. The full conversion
4214 * back to symbols is
4215 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4217 intel_dp->sink_rates[i] = (val * 200) / 10;
4219 intel_dp->num_sink_rates = i;
4223 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4224 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4226 if (intel_dp->num_sink_rates)
4227 intel_dp->use_rate_select = true;
4229 intel_dp_set_sink_rates(intel_dp);
4231 intel_dp_set_common_rates(intel_dp);
4233 /* Read the eDP DSC DPCD registers */
4234 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4235 intel_dp_get_dsc_sink_cap(intel_dp);
4242 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4244 if (!intel_dp_read_dpcd(intel_dp))
4247 /* Don't clobber cached eDP rates. */
4248 if (!intel_dp_is_edp(intel_dp)) {
4249 intel_dp_set_sink_rates(intel_dp);
4250 intel_dp_set_common_rates(intel_dp);
4254 * Some eDP panels do not set a valid value for sink count, that is why
4255 * it don't care about read it here and in intel_edp_init_dpcd().
4257 if (!intel_dp_is_edp(intel_dp)) {
4261 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4266 * Sink count can change between short pulse hpd hence
4267 * a member variable in intel_dp will track any changes
4268 * between short pulse interrupts.
4270 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4273 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4274 * a dongle is present but no display. Unless we require to know
4275 * if a dongle is present or not, we don't need to update
4276 * downstream port information. So, an early return here saves
4277 * time from performing other operations which are not required.
4279 if (!intel_dp->sink_count)
4283 if (!drm_dp_is_branch(intel_dp->dpcd))
4284 return true; /* native DP sink */
4286 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4287 return true; /* no per-port downstream info */
4289 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4290 intel_dp->downstream_ports,
4291 DP_MAX_DOWNSTREAM_PORTS) < 0)
4292 return false; /* downstream port status fetch failed */
4298 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4302 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4305 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4308 return mstm_cap & DP_MST_CAP;
4312 intel_dp_can_mst(struct intel_dp *intel_dp)
4314 return i915_modparams.enable_dp_mst &&
4315 intel_dp->can_mst &&
4316 intel_dp_sink_can_mst(intel_dp);
4320 intel_dp_configure_mst(struct intel_dp *intel_dp)
4322 struct intel_encoder *encoder =
4323 &dp_to_dig_port(intel_dp)->base;
4324 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4326 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4327 port_name(encoder->port), yesno(intel_dp->can_mst),
4328 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4330 if (!intel_dp->can_mst)
4333 intel_dp->is_mst = sink_can_mst &&
4334 i915_modparams.enable_dp_mst;
4336 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4341 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4343 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4344 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4348 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4349 int mode_clock, int mode_hdisplay)
4351 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4355 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4356 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4357 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4358 * for MST -> TimeSlotsPerMTP has to be calculated
4360 bits_per_pixel = (link_clock * lane_count * 8 *
4361 DP_DSC_FEC_OVERHEAD_FACTOR) /
4364 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4365 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4369 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4370 * check, output bpp from small joiner RAM check)
4372 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4374 /* Error out if the max bpp is less than smallest allowed valid bpp */
4375 if (bits_per_pixel < valid_dsc_bpp[0]) {
4376 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4380 /* Find the nearest match in the array of known BPPs from VESA */
4381 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4382 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4385 bits_per_pixel = valid_dsc_bpp[i];
4388 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4389 * fractional part is 0
4391 return bits_per_pixel << 4;
4394 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4398 u8 min_slice_count, i;
4399 int max_slice_width;
4401 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4402 min_slice_count = DIV_ROUND_UP(mode_clock,
4403 DP_DSC_MAX_ENC_THROUGHPUT_0);
4405 min_slice_count = DIV_ROUND_UP(mode_clock,
4406 DP_DSC_MAX_ENC_THROUGHPUT_1);
4408 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4409 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4410 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4414 /* Also take into account max slice width */
4415 min_slice_count = min_t(u8, min_slice_count,
4416 DIV_ROUND_UP(mode_hdisplay,
4419 /* Find the closest match to the valid slice count values */
4420 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4421 if (valid_dsc_slicecount[i] >
4422 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4425 if (min_slice_count <= valid_dsc_slicecount[i])
4426 return valid_dsc_slicecount[i];
4429 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4434 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4435 const struct intel_crtc_state *crtc_state)
4437 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4438 struct dp_sdp vsc_sdp = {};
4440 /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4441 vsc_sdp.sdp_header.HB0 = 0;
4442 vsc_sdp.sdp_header.HB1 = 0x7;
4445 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4446 * Colorimetry Format indication.
4448 vsc_sdp.sdp_header.HB2 = 0x5;
4451 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4452 * Colorimetry Format indication (HB2 = 05h).
4454 vsc_sdp.sdp_header.HB3 = 0x13;
4457 * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4458 * DB16[3:0] DP 1.4a spec, Table 2-120
4460 vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4461 /* RGB->YCBCR color conversion uses the BT.709 color space. */
4462 vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4465 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4466 * the following Component Bit Depth values are defined:
4472 switch (crtc_state->pipe_bpp) {
4474 vsc_sdp.db[17] = 0x1;
4476 case 30: /* 10bpc */
4477 vsc_sdp.db[17] = 0x2;
4479 case 36: /* 12bpc */
4480 vsc_sdp.db[17] = 0x3;
4482 case 48: /* 16bpc */
4483 vsc_sdp.db[17] = 0x4;
4486 MISSING_CASE(crtc_state->pipe_bpp);
4491 * Dynamic Range (Bit 7)
4492 * 0 = VESA range, 1 = CTA range.
4493 * all YCbCr are always limited range
4495 vsc_sdp.db[17] |= 0x80;
4498 * Content Type (Bits 2:0)
4499 * 000b = Not defined.
4504 * All other values are RESERVED.
4505 * Note: See CTA-861-G for the definition and expected
4506 * processing by a stream sink for the above contect types.
4510 intel_dig_port->write_infoframe(&intel_dig_port->base,
4511 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4514 void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4515 const struct intel_crtc_state *crtc_state)
4517 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4520 intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4523 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4527 u8 test_lane_count, test_link_bw;
4531 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4532 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4536 DRM_DEBUG_KMS("Lane count read failed\n");
4539 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4541 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4544 DRM_DEBUG_KMS("Link Rate read failed\n");
4547 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4549 /* Validate the requested link rate and lane count */
4550 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4554 intel_dp->compliance.test_lane_count = test_lane_count;
4555 intel_dp->compliance.test_link_rate = test_link_rate;
4560 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4564 __be16 h_width, v_height;
4567 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4568 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4571 DRM_DEBUG_KMS("Test pattern read failed\n");
4574 if (test_pattern != DP_COLOR_RAMP)
4577 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4580 DRM_DEBUG_KMS("H Width read failed\n");
4584 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4587 DRM_DEBUG_KMS("V Height read failed\n");
4591 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4594 DRM_DEBUG_KMS("TEST MISC read failed\n");
4597 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4599 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4601 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4602 case DP_TEST_BIT_DEPTH_6:
4603 intel_dp->compliance.test_data.bpc = 6;
4605 case DP_TEST_BIT_DEPTH_8:
4606 intel_dp->compliance.test_data.bpc = 8;
4612 intel_dp->compliance.test_data.video_pattern = test_pattern;
4613 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4614 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4615 /* Set test active flag here so userspace doesn't interrupt things */
4616 intel_dp->compliance.test_active = 1;
4621 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4623 u8 test_result = DP_TEST_ACK;
4624 struct intel_connector *intel_connector = intel_dp->attached_connector;
4625 struct drm_connector *connector = &intel_connector->base;
4627 if (intel_connector->detect_edid == NULL ||
4628 connector->edid_corrupt ||
4629 intel_dp->aux.i2c_defer_count > 6) {
4630 /* Check EDID read for NACKs, DEFERs and corruption
4631 * (DP CTS 1.2 Core r1.1)
4632 * 4.2.2.4 : Failed EDID read, I2C_NAK
4633 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4634 * 4.2.2.6 : EDID corruption detected
4635 * Use failsafe mode for all cases
4637 if (intel_dp->aux.i2c_nack_count > 0 ||
4638 intel_dp->aux.i2c_defer_count > 0)
4639 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4640 intel_dp->aux.i2c_nack_count,
4641 intel_dp->aux.i2c_defer_count);
4642 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4644 struct edid *block = intel_connector->detect_edid;
4646 /* We have to write the checksum
4647 * of the last block read
4649 block += intel_connector->detect_edid->extensions;
4651 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4652 block->checksum) <= 0)
4653 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4655 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4656 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4659 /* Set test active flag here so userspace doesn't interrupt things */
4660 intel_dp->compliance.test_active = 1;
4665 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4667 u8 test_result = DP_TEST_NAK;
4671 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4673 u8 response = DP_TEST_NAK;
4677 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4679 DRM_DEBUG_KMS("Could not read test request from sink\n");
4684 case DP_TEST_LINK_TRAINING:
4685 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4686 response = intel_dp_autotest_link_training(intel_dp);
4688 case DP_TEST_LINK_VIDEO_PATTERN:
4689 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4690 response = intel_dp_autotest_video_pattern(intel_dp);
4692 case DP_TEST_LINK_EDID_READ:
4693 DRM_DEBUG_KMS("EDID test requested\n");
4694 response = intel_dp_autotest_edid(intel_dp);
4696 case DP_TEST_LINK_PHY_TEST_PATTERN:
4697 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4698 response = intel_dp_autotest_phy_pattern(intel_dp);
4701 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4705 if (response & DP_TEST_ACK)
4706 intel_dp->compliance.test_type = request;
4709 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4711 DRM_DEBUG_KMS("Could not write test response to sink\n");
4715 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4719 if (intel_dp->is_mst) {
4720 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4725 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4726 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4730 /* check link status - esi[10] = 0x200c */
4731 if (intel_dp->active_mst_links > 0 &&
4732 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4733 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4734 intel_dp_start_link_train(intel_dp);
4735 intel_dp_stop_link_train(intel_dp);
4738 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4739 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4742 for (retry = 0; retry < 3; retry++) {
4744 wret = drm_dp_dpcd_write(&intel_dp->aux,
4745 DP_SINK_COUNT_ESI+1,
4752 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4754 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4762 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4763 intel_dp->is_mst = false;
4764 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4772 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4774 u8 link_status[DP_LINK_STATUS_SIZE];
4776 if (!intel_dp->link_trained)
4780 * While PSR source HW is enabled, it will control main-link sending
4781 * frames, enabling and disabling it so trying to do a retrain will fail
4782 * as the link would or not be on or it could mix training patterns
4783 * and frame data at the same time causing retrain to fail.
4784 * Also when exiting PSR, HW will retrain the link anyways fixing
4785 * any link status error.
4787 if (intel_psr_enabled(intel_dp))
4790 if (!intel_dp_get_link_status(intel_dp, link_status))
4794 * Validate the cached values of intel_dp->link_rate and
4795 * intel_dp->lane_count before attempting to retrain.
4797 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4798 intel_dp->lane_count))
4801 /* Retrain if Channel EQ or CR not ok */
4802 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4805 int intel_dp_retrain_link(struct intel_encoder *encoder,
4806 struct drm_modeset_acquire_ctx *ctx)
4808 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4809 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4810 struct intel_connector *connector = intel_dp->attached_connector;
4811 struct drm_connector_state *conn_state;
4812 struct intel_crtc_state *crtc_state;
4813 struct intel_crtc *crtc;
4816 /* FIXME handle the MST connectors as well */
4818 if (!connector || connector->base.status != connector_status_connected)
4821 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4826 conn_state = connector->base.state;
4828 crtc = to_intel_crtc(conn_state->crtc);
4832 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4836 crtc_state = to_intel_crtc_state(crtc->base.state);
4838 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4840 if (!crtc_state->base.active)
4843 if (conn_state->commit &&
4844 !try_wait_for_completion(&conn_state->commit->hw_done))
4847 if (!intel_dp_needs_link_retrain(intel_dp))
4850 /* Suppress underruns caused by re-training */
4851 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4852 if (crtc_state->has_pch_encoder)
4853 intel_set_pch_fifo_underrun_reporting(dev_priv,
4854 intel_crtc_pch_transcoder(crtc), false);
4856 intel_dp_start_link_train(intel_dp);
4857 intel_dp_stop_link_train(intel_dp);
4859 /* Keep underrun reporting disabled until things are stable */
4860 intel_wait_for_vblank(dev_priv, crtc->pipe);
4862 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4863 if (crtc_state->has_pch_encoder)
4864 intel_set_pch_fifo_underrun_reporting(dev_priv,
4865 intel_crtc_pch_transcoder(crtc), true);
4871 * If display is now connected check links status,
4872 * there has been known issues of link loss triggering
4875 * Some sinks (eg. ASUS PB287Q) seem to perform some
4876 * weird HPD ping pong during modesets. So we can apparently
4877 * end up with HPD going low during a modeset, and then
4878 * going back up soon after. And once that happens we must
4879 * retrain the link to get a picture. That's in case no
4880 * userspace component reacted to intermittent HPD dip.
4882 static bool intel_dp_hotplug(struct intel_encoder *encoder,
4883 struct intel_connector *connector)
4885 struct drm_modeset_acquire_ctx ctx;
4889 changed = intel_encoder_hotplug(encoder, connector);
4891 drm_modeset_acquire_init(&ctx, 0);
4894 ret = intel_dp_retrain_link(encoder, &ctx);
4896 if (ret == -EDEADLK) {
4897 drm_modeset_backoff(&ctx);
4904 drm_modeset_drop_locks(&ctx);
4905 drm_modeset_acquire_fini(&ctx);
4906 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4911 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4915 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4918 if (drm_dp_dpcd_readb(&intel_dp->aux,
4919 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4922 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4924 if (val & DP_AUTOMATED_TEST_REQUEST)
4925 intel_dp_handle_test_request(intel_dp);
4927 if (val & DP_CP_IRQ)
4928 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4930 if (val & DP_SINK_SPECIFIC_IRQ)
4931 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4935 * According to DP spec
4938 * 2. Configure link according to Receiver Capabilities
4939 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4940 * 4. Check link status on receipt of hot-plug interrupt
4942 * intel_dp_short_pulse - handles short pulse interrupts
4943 * when full detection is not required.
4944 * Returns %true if short pulse is handled and full detection
4945 * is NOT required and %false otherwise.
4948 intel_dp_short_pulse(struct intel_dp *intel_dp)
4950 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4951 u8 old_sink_count = intel_dp->sink_count;
4955 * Clearing compliance test variables to allow capturing
4956 * of values for next automated test request.
4958 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4961 * Now read the DPCD to see if it's actually running
4962 * If the current value of sink count doesn't match with
4963 * the value that was stored earlier or dpcd read failed
4964 * we need to do full detection
4966 ret = intel_dp_get_dpcd(intel_dp);
4968 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4969 /* No need to proceed if we are going to do full detect */
4973 intel_dp_check_service_irq(intel_dp);
4975 /* Handle CEC interrupts, if any */
4976 drm_dp_cec_irq(&intel_dp->aux);
4978 /* defer to the hotplug work for link retraining if needed */
4979 if (intel_dp_needs_link_retrain(intel_dp))
4982 intel_psr_short_pulse(intel_dp);
4984 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4985 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4986 /* Send a Hotplug Uevent to userspace to start modeset */
4987 drm_kms_helper_hotplug_event(&dev_priv->drm);
4993 /* XXX this is probably wrong for multiple downstream ports */
4994 static enum drm_connector_status
4995 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4997 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4998 u8 *dpcd = intel_dp->dpcd;
5001 if (WARN_ON(intel_dp_is_edp(intel_dp)))
5002 return connector_status_connected;
5005 lspcon_resume(lspcon);
5007 if (!intel_dp_get_dpcd(intel_dp))
5008 return connector_status_disconnected;
5010 /* if there's no downstream port, we're done */
5011 if (!drm_dp_is_branch(dpcd))
5012 return connector_status_connected;
5014 /* If we're HPD-aware, SINK_COUNT changes dynamically */
5015 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5016 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5018 return intel_dp->sink_count ?
5019 connector_status_connected : connector_status_disconnected;
5022 if (intel_dp_can_mst(intel_dp))
5023 return connector_status_connected;
5025 /* If no HPD, poke DDC gently */
5026 if (drm_probe_ddc(&intel_dp->aux.ddc))
5027 return connector_status_connected;
5029 /* Well we tried, say unknown for unreliable port types */
5030 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5031 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5032 if (type == DP_DS_PORT_TYPE_VGA ||
5033 type == DP_DS_PORT_TYPE_NON_EDID)
5034 return connector_status_unknown;
5036 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5037 DP_DWN_STRM_PORT_TYPE_MASK;
5038 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5039 type == DP_DWN_STRM_PORT_TYPE_OTHER)
5040 return connector_status_unknown;
5043 /* Anything else is out of spec, warn and ignore */
5044 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5045 return connector_status_disconnected;
5048 static enum drm_connector_status
5049 edp_detect(struct intel_dp *intel_dp)
5051 return connector_status_connected;
5054 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5056 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5059 switch (encoder->hpd_pin) {
5061 bit = SDE_PORTB_HOTPLUG;
5064 bit = SDE_PORTC_HOTPLUG;
5067 bit = SDE_PORTD_HOTPLUG;
5070 MISSING_CASE(encoder->hpd_pin);
5074 return I915_READ(SDEISR) & bit;
5077 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5079 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5082 switch (encoder->hpd_pin) {
5084 bit = SDE_PORTB_HOTPLUG_CPT;
5087 bit = SDE_PORTC_HOTPLUG_CPT;
5090 bit = SDE_PORTD_HOTPLUG_CPT;
5093 MISSING_CASE(encoder->hpd_pin);
5097 return I915_READ(SDEISR) & bit;
5100 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5102 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5105 switch (encoder->hpd_pin) {
5107 bit = SDE_PORTA_HOTPLUG_SPT;
5110 bit = SDE_PORTE_HOTPLUG_SPT;
5113 return cpt_digital_port_connected(encoder);
5116 return I915_READ(SDEISR) & bit;
5119 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5121 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5124 switch (encoder->hpd_pin) {
5126 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5129 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5132 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5135 MISSING_CASE(encoder->hpd_pin);
5139 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5142 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5144 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5147 switch (encoder->hpd_pin) {
5149 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5152 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5155 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5158 MISSING_CASE(encoder->hpd_pin);
5162 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5165 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5167 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5169 if (encoder->hpd_pin == HPD_PORT_A)
5170 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5172 return ibx_digital_port_connected(encoder);
5175 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5177 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5179 if (encoder->hpd_pin == HPD_PORT_A)
5180 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5182 return cpt_digital_port_connected(encoder);
5185 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5187 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5189 if (encoder->hpd_pin == HPD_PORT_A)
5190 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5192 return cpt_digital_port_connected(encoder);
5195 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5197 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5199 if (encoder->hpd_pin == HPD_PORT_A)
5200 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5202 return cpt_digital_port_connected(encoder);
5205 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5207 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5210 switch (encoder->hpd_pin) {
5212 bit = BXT_DE_PORT_HP_DDIA;
5215 bit = BXT_DE_PORT_HP_DDIB;
5218 bit = BXT_DE_PORT_HP_DDIC;
5221 MISSING_CASE(encoder->hpd_pin);
5225 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5228 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5229 struct intel_digital_port *intel_dig_port)
5231 enum port port = intel_dig_port->base.port;
5233 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5236 static const char *tc_type_name(enum tc_port_type type)
5238 static const char * const names[] = {
5239 [TC_PORT_UNKNOWN] = "unknown",
5240 [TC_PORT_LEGACY] = "legacy",
5241 [TC_PORT_TYPEC] = "typec",
5242 [TC_PORT_TBT] = "tbt",
5245 if (WARN_ON(type >= ARRAY_SIZE(names)))
5246 type = TC_PORT_UNKNOWN;
5251 static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5252 struct intel_digital_port *intel_dig_port,
5253 bool is_legacy, bool is_typec, bool is_tbt)
5255 enum port port = intel_dig_port->base.port;
5256 enum tc_port_type old_type = intel_dig_port->tc_type;
5258 WARN_ON(is_legacy + is_typec + is_tbt != 1);
5261 intel_dig_port->tc_type = TC_PORT_LEGACY;
5263 intel_dig_port->tc_type = TC_PORT_TYPEC;
5265 intel_dig_port->tc_type = TC_PORT_TBT;
5269 /* Types are not supposed to be changed at runtime. */
5270 WARN_ON(old_type != TC_PORT_UNKNOWN &&
5271 old_type != intel_dig_port->tc_type);
5273 if (old_type != intel_dig_port->tc_type)
5274 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
5275 tc_type_name(intel_dig_port->tc_type));
5279 * This function implements the first part of the Connect Flow described by our
5280 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5281 * lanes, EDID, etc) is done as needed in the typical places.
5283 * Unlike the other ports, type-C ports are not available to use as soon as we
5284 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5285 * display, USB, etc. As a result, handshaking through FIA is required around
5286 * connect and disconnect to cleanly transfer ownership with the controller and
5287 * set the type-C power state.
5289 * We could opt to only do the connect flow when we actually try to use the AUX
5290 * channels or do a modeset, then immediately run the disconnect flow after
5291 * usage, but there are some implications on this for a dynamic environment:
5292 * things may go away or change behind our backs. So for now our driver is
5293 * always trying to acquire ownership of the controller as soon as it gets an
5294 * interrupt (or polls state and sees a port is connected) and only gives it
5295 * back when it sees a disconnect. Implementation of a more fine-grained model
5296 * will require a lot of coordination with user space and thorough testing for
5297 * the extra possible cases.
5299 static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5300 struct intel_digital_port *dig_port)
5302 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5305 if (dig_port->tc_type != TC_PORT_LEGACY &&
5306 dig_port->tc_type != TC_PORT_TYPEC)
5309 val = I915_READ(PORT_TX_DFLEXDPPMS);
5310 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5311 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
5312 WARN_ON(dig_port->tc_legacy_port);
5317 * This function may be called many times in a row without an HPD event
5318 * in between, so try to avoid the write when we can.
5320 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5321 if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
5322 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5323 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5327 * Now we have to re-check the live state, in case the port recently
5328 * became disconnected. Not necessary for legacy mode.
5330 if (dig_port->tc_type == TC_PORT_TYPEC &&
5331 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
5332 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
5333 icl_tc_phy_disconnect(dev_priv, dig_port);
5341 * See the comment at the connect function. This implements the Disconnect
5344 void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5345 struct intel_digital_port *dig_port)
5347 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5349 if (dig_port->tc_type == TC_PORT_UNKNOWN)
5353 * TBT disconnection flow is read the live status, what was done in
5356 if (dig_port->tc_type == TC_PORT_TYPEC ||
5357 dig_port->tc_type == TC_PORT_LEGACY) {
5360 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5361 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5362 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5365 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5366 port_name(dig_port->base.port),
5367 tc_type_name(dig_port->tc_type));
5369 dig_port->tc_type = TC_PORT_UNKNOWN;
5373 * The type-C ports are different because even when they are connected, they may
5374 * not be available/usable by the graphics driver: see the comment on
5375 * icl_tc_phy_connect(). So in our driver instead of adding the additional
5376 * concept of "usable" and make everything check for "connected and usable" we
5377 * define a port as "connected" when it is not only connected, but also when it
5378 * is usable by the rest of the driver. That maintains the old assumption that
5379 * connected ports are usable, and avoids exposing to the users objects they
5382 static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5383 struct intel_digital_port *intel_dig_port)
5385 enum port port = intel_dig_port->base.port;
5386 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
5387 bool is_legacy, is_typec, is_tbt;
5391 * Complain if we got a legacy port HPD, but VBT didn't mark the port as
5392 * legacy. Treat the port as legacy from now on.
5394 if (!intel_dig_port->tc_legacy_port &&
5395 I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
5396 DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
5398 intel_dig_port->tc_legacy_port = true;
5400 is_legacy = intel_dig_port->tc_legacy_port;
5403 * The spec says we shouldn't be using the ISR bits for detecting
5404 * between TC and TBT. We should use DFLEXDPSP.
5406 dpsp = I915_READ(PORT_TX_DFLEXDPSP);
5407 is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
5408 is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
5410 if (!is_legacy && !is_typec && !is_tbt) {
5411 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
5416 icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
5419 if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
5425 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5427 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5428 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5430 if (intel_port_is_combophy(dev_priv, encoder->port))
5431 return icl_combo_port_connected(dev_priv, dig_port);
5432 else if (intel_port_is_tc(dev_priv, encoder->port))
5433 return icl_tc_port_connected(dev_priv, dig_port);
5435 MISSING_CASE(encoder->hpd_pin);
5441 * intel_digital_port_connected - is the specified port connected?
5442 * @encoder: intel_encoder
5444 * In cases where there's a connector physically connected but it can't be used
5445 * by our hardware we also return false, since the rest of the driver should
5446 * pretty much treat the port as disconnected. This is relevant for type-C
5447 * (starting on ICL) where there's ownership involved.
5449 * Return %true if port is connected, %false otherwise.
5451 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5453 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5455 if (HAS_GMCH(dev_priv)) {
5456 if (IS_GM45(dev_priv))
5457 return gm45_digital_port_connected(encoder);
5459 return g4x_digital_port_connected(encoder);
5462 if (INTEL_GEN(dev_priv) >= 11)
5463 return icl_digital_port_connected(encoder);
5464 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5465 return spt_digital_port_connected(encoder);
5466 else if (IS_GEN9_LP(dev_priv))
5467 return bxt_digital_port_connected(encoder);
5468 else if (IS_GEN(dev_priv, 8))
5469 return bdw_digital_port_connected(encoder);
5470 else if (IS_GEN(dev_priv, 7))
5471 return ivb_digital_port_connected(encoder);
5472 else if (IS_GEN(dev_priv, 6))
5473 return snb_digital_port_connected(encoder);
5474 else if (IS_GEN(dev_priv, 5))
5475 return ilk_digital_port_connected(encoder);
5477 MISSING_CASE(INTEL_GEN(dev_priv));
5481 bool intel_digital_port_connected(struct intel_encoder *encoder)
5483 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5484 bool is_connected = false;
5485 intel_wakeref_t wakeref;
5487 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5488 is_connected = __intel_digital_port_connected(encoder);
5490 return is_connected;
5493 static struct edid *
5494 intel_dp_get_edid(struct intel_dp *intel_dp)
5496 struct intel_connector *intel_connector = intel_dp->attached_connector;
5498 /* use cached edid if we have one */
5499 if (intel_connector->edid) {
5501 if (IS_ERR(intel_connector->edid))
5504 return drm_edid_duplicate(intel_connector->edid);
5506 return drm_get_edid(&intel_connector->base,
5507 &intel_dp->aux.ddc);
5511 intel_dp_set_edid(struct intel_dp *intel_dp)
5513 struct intel_connector *intel_connector = intel_dp->attached_connector;
5516 intel_dp_unset_edid(intel_dp);
5517 edid = intel_dp_get_edid(intel_dp);
5518 intel_connector->detect_edid = edid;
5520 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5521 drm_dp_cec_set_edid(&intel_dp->aux, edid);
5525 intel_dp_unset_edid(struct intel_dp *intel_dp)
5527 struct intel_connector *intel_connector = intel_dp->attached_connector;
5529 drm_dp_cec_unset_edid(&intel_dp->aux);
5530 kfree(intel_connector->detect_edid);
5531 intel_connector->detect_edid = NULL;
5533 intel_dp->has_audio = false;
5537 intel_dp_detect(struct drm_connector *connector,
5538 struct drm_modeset_acquire_ctx *ctx,
5541 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5542 struct intel_dp *intel_dp = intel_attached_dp(connector);
5543 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5544 struct intel_encoder *encoder = &dig_port->base;
5545 enum drm_connector_status status;
5547 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5548 connector->base.id, connector->name);
5549 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5551 /* Can't disconnect eDP */
5552 if (intel_dp_is_edp(intel_dp))
5553 status = edp_detect(intel_dp);
5554 else if (intel_digital_port_connected(encoder))
5555 status = intel_dp_detect_dpcd(intel_dp);
5557 status = connector_status_disconnected;
5559 if (status == connector_status_disconnected) {
5560 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5561 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5563 if (intel_dp->is_mst) {
5564 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5566 intel_dp->mst_mgr.mst_state);
5567 intel_dp->is_mst = false;
5568 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5575 if (intel_dp->reset_link_params) {
5576 /* Initial max link lane count */
5577 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5579 /* Initial max link rate */
5580 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5582 intel_dp->reset_link_params = false;
5585 intel_dp_print_rates(intel_dp);
5587 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5588 if (INTEL_GEN(dev_priv) >= 11)
5589 intel_dp_get_dsc_sink_cap(intel_dp);
5591 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5592 drm_dp_is_branch(intel_dp->dpcd));
5594 intel_dp_configure_mst(intel_dp);
5596 if (intel_dp->is_mst) {
5598 * If we are in MST mode then this connector
5599 * won't appear connected or have anything
5602 status = connector_status_disconnected;
5607 * Some external monitors do not signal loss of link synchronization
5608 * with an IRQ_HPD, so force a link status check.
5610 if (!intel_dp_is_edp(intel_dp)) {
5613 ret = intel_dp_retrain_link(encoder, ctx);
5619 * Clearing NACK and defer counts to get their exact values
5620 * while reading EDID which are required by Compliance tests
5621 * 4.2.2.4 and 4.2.2.5
5623 intel_dp->aux.i2c_nack_count = 0;
5624 intel_dp->aux.i2c_defer_count = 0;
5626 intel_dp_set_edid(intel_dp);
5627 if (intel_dp_is_edp(intel_dp) ||
5628 to_intel_connector(connector)->detect_edid)
5629 status = connector_status_connected;
5631 intel_dp_check_service_irq(intel_dp);
5634 if (status != connector_status_connected && !intel_dp->is_mst)
5635 intel_dp_unset_edid(intel_dp);
5641 intel_dp_force(struct drm_connector *connector)
5643 struct intel_dp *intel_dp = intel_attached_dp(connector);
5644 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5645 struct intel_encoder *intel_encoder = &dig_port->base;
5646 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5647 enum intel_display_power_domain aux_domain =
5648 intel_aux_power_domain(dig_port);
5649 intel_wakeref_t wakeref;
5651 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5652 connector->base.id, connector->name);
5653 intel_dp_unset_edid(intel_dp);
5655 if (connector->status != connector_status_connected)
5658 wakeref = intel_display_power_get(dev_priv, aux_domain);
5660 intel_dp_set_edid(intel_dp);
5662 intel_display_power_put(dev_priv, aux_domain, wakeref);
5665 static int intel_dp_get_modes(struct drm_connector *connector)
5667 struct intel_connector *intel_connector = to_intel_connector(connector);
5670 edid = intel_connector->detect_edid;
5672 int ret = intel_connector_update_modes(connector, edid);
5677 /* if eDP has no EDID, fall back to fixed mode */
5678 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5679 intel_connector->panel.fixed_mode) {
5680 struct drm_display_mode *mode;
5682 mode = drm_mode_duplicate(connector->dev,
5683 intel_connector->panel.fixed_mode);
5685 drm_mode_probed_add(connector, mode);
5694 intel_dp_connector_register(struct drm_connector *connector)
5696 struct intel_dp *intel_dp = intel_attached_dp(connector);
5697 struct drm_device *dev = connector->dev;
5700 ret = intel_connector_register(connector);
5704 i915_debugfs_connector_add(connector);
5706 DRM_DEBUG_KMS("registering %s bus for %s\n",
5707 intel_dp->aux.name, connector->kdev->kobj.name);
5709 intel_dp->aux.dev = connector->kdev;
5710 ret = drm_dp_aux_register(&intel_dp->aux);
5712 drm_dp_cec_register_connector(&intel_dp->aux,
5713 connector->name, dev->dev);
5718 intel_dp_connector_unregister(struct drm_connector *connector)
5720 struct intel_dp *intel_dp = intel_attached_dp(connector);
5722 drm_dp_cec_unregister_connector(&intel_dp->aux);
5723 drm_dp_aux_unregister(&intel_dp->aux);
5724 intel_connector_unregister(connector);
5727 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5729 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5730 struct intel_dp *intel_dp = &intel_dig_port->dp;
5732 intel_dp_mst_encoder_cleanup(intel_dig_port);
5733 if (intel_dp_is_edp(intel_dp)) {
5734 intel_wakeref_t wakeref;
5736 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5738 * vdd might still be enabled do to the delayed vdd off.
5739 * Make sure vdd is actually turned off here.
5741 with_pps_lock(intel_dp, wakeref)
5742 edp_panel_vdd_off_sync(intel_dp);
5744 if (intel_dp->edp_notifier.notifier_call) {
5745 unregister_reboot_notifier(&intel_dp->edp_notifier);
5746 intel_dp->edp_notifier.notifier_call = NULL;
5750 intel_dp_aux_fini(intel_dp);
5753 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5755 intel_dp_encoder_flush_work(encoder);
5757 drm_encoder_cleanup(encoder);
5758 kfree(enc_to_dig_port(encoder));
5761 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5763 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5764 intel_wakeref_t wakeref;
5766 if (!intel_dp_is_edp(intel_dp))
5770 * vdd might still be enabled do to the delayed vdd off.
5771 * Make sure vdd is actually turned off here.
5773 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5774 with_pps_lock(intel_dp, wakeref)
5775 edp_panel_vdd_off_sync(intel_dp);
5778 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5782 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5783 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5784 msecs_to_jiffies(timeout));
5787 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5791 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5794 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5795 static const struct drm_dp_aux_msg msg = {
5796 .request = DP_AUX_NATIVE_WRITE,
5797 .address = DP_AUX_HDCP_AKSV,
5798 .size = DRM_HDCP_KSV_LEN,
5800 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5804 /* Output An first, that's easy */
5805 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5806 an, DRM_HDCP_AN_LEN);
5807 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5808 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5810 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5814 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5815 * order to get it on the wire, we need to create the AUX header as if
5816 * we were writing the data, and then tickle the hardware to output the
5817 * data once the header is sent out.
5819 intel_dp_aux_header(txbuf, &msg);
5821 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5822 rxbuf, sizeof(rxbuf),
5823 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5825 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5827 } else if (ret == 0) {
5828 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5832 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5833 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5834 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5841 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5845 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5847 if (ret != DRM_HDCP_KSV_LEN) {
5848 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5849 return ret >= 0 ? -EIO : ret;
5854 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5859 * For some reason the HDMI and DP HDCP specs call this register
5860 * definition by different names. In the HDMI spec, it's called BSTATUS,
5861 * but in DP it's called BINFO.
5863 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5864 bstatus, DRM_HDCP_BSTATUS_LEN);
5865 if (ret != DRM_HDCP_BSTATUS_LEN) {
5866 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5867 return ret >= 0 ? -EIO : ret;
5873 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5878 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5881 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5882 return ret >= 0 ? -EIO : ret;
5889 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5890 bool *repeater_present)
5895 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5899 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5904 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5908 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5909 ri_prime, DRM_HDCP_RI_LEN);
5910 if (ret != DRM_HDCP_RI_LEN) {
5911 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5912 return ret >= 0 ? -EIO : ret;
5918 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5923 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5926 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5927 return ret >= 0 ? -EIO : ret;
5929 *ksv_ready = bstatus & DP_BSTATUS_READY;
5934 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5935 int num_downstream, u8 *ksv_fifo)
5940 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5941 for (i = 0; i < num_downstream; i += 3) {
5942 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5943 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5944 DP_AUX_HDCP_KSV_FIFO,
5945 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5948 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5950 return ret >= 0 ? -EIO : ret;
5957 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5962 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5965 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5966 DP_AUX_HDCP_V_PRIME(i), part,
5967 DRM_HDCP_V_PRIME_PART_LEN);
5968 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5969 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5970 return ret >= 0 ? -EIO : ret;
5976 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5979 /* Not used for single stream DisplayPort setups */
5984 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5989 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5992 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5996 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
6000 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
6006 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
6010 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
6014 struct hdcp2_dp_errata_stream_type {
6019 static struct hdcp2_dp_msg_data {
6022 bool msg_detectable;
6024 u32 timeout2; /* Added for non_paired situation */
6025 } hdcp2_msg_data[] = {
6026 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
6027 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
6028 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
6029 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
6031 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
6033 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
6034 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
6035 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
6036 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
6037 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
6038 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
6039 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
6040 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
6041 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
6042 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
6044 {HDCP_2_2_REP_SEND_RECVID_LIST,
6045 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
6046 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
6047 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
6049 {HDCP_2_2_REP_STREAM_MANAGE,
6050 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
6052 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
6053 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
6054 /* local define to shovel this through the write_2_2 interface */
6055 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
6056 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
6057 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
6062 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
6067 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6068 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
6069 HDCP_2_2_DP_RXSTATUS_LEN);
6070 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
6071 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
6072 return ret >= 0 ? -EIO : ret;
6079 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
6080 u8 msg_id, bool *msg_ready)
6086 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6091 case HDCP_2_2_AKE_SEND_HPRIME:
6092 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
6095 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
6096 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
6099 case HDCP_2_2_REP_SEND_RECVID_LIST:
6100 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6104 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
6112 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
6113 struct hdcp2_dp_msg_data *hdcp2_msg_data)
6115 struct intel_dp *dp = &intel_dig_port->dp;
6116 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6117 u8 msg_id = hdcp2_msg_data->msg_id;
6119 bool msg_ready = false;
6121 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
6122 timeout = hdcp2_msg_data->timeout2;
6124 timeout = hdcp2_msg_data->timeout;
6127 * There is no way to detect the CERT, LPRIME and STREAM_READY
6128 * availability. So Wait for timeout and read the msg.
6130 if (!hdcp2_msg_data->msg_detectable) {
6135 * As we want to check the msg availability at timeout, Ignoring
6136 * the timeout at wait for CP_IRQ.
6138 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6139 ret = hdcp2_detect_msg_availability(intel_dig_port,
6140 msg_id, &msg_ready);
6146 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6147 hdcp2_msg_data->msg_id, ret, timeout);
6152 static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6156 for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
6157 if (hdcp2_msg_data[i].msg_id == msg_id)
6158 return &hdcp2_msg_data[i];
6164 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6165 void *buf, size_t size)
6167 struct intel_dp *dp = &intel_dig_port->dp;
6168 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6169 unsigned int offset;
6171 ssize_t ret, bytes_to_write, len;
6172 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6174 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6175 if (!hdcp2_msg_data)
6178 offset = hdcp2_msg_data->offset;
6180 /* No msg_id in DP HDCP2.2 msgs */
6181 bytes_to_write = size - 1;
6184 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6186 while (bytes_to_write) {
6187 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6188 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6190 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6191 offset, (void *)byte, len);
6195 bytes_to_write -= ret;
6204 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6206 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6210 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6211 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6212 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6213 if (ret != HDCP_2_2_RXINFO_LEN)
6214 return ret >= 0 ? -EIO : ret;
6216 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6217 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6219 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6220 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6222 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6223 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6224 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6230 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6231 u8 msg_id, void *buf, size_t size)
6233 unsigned int offset;
6235 ssize_t ret, bytes_to_recv, len;
6236 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6238 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6239 if (!hdcp2_msg_data)
6241 offset = hdcp2_msg_data->offset;
6243 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6247 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6248 ret = get_receiver_id_list_size(intel_dig_port);
6254 bytes_to_recv = size - 1;
6256 /* DP adaptation msgs has no msg_id */
6259 while (bytes_to_recv) {
6260 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6261 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6263 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6266 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6270 bytes_to_recv -= ret;
6281 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6282 bool is_repeater, u8 content_type)
6284 struct hdcp2_dp_errata_stream_type stream_type_msg;
6290 * Errata for DP: As Stream type is used for encryption, Receiver
6291 * should be communicated with stream type for the decryption of the
6293 * Repeater will be communicated with stream type as a part of it's
6294 * auth later in time.
6296 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6297 stream_type_msg.stream_type = content_type;
6299 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6300 sizeof(stream_type_msg));
6304 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6309 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6313 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6314 ret = HDCP_REAUTH_REQUEST;
6315 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6316 ret = HDCP_LINK_INTEGRITY_FAILURE;
6317 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6318 ret = HDCP_TOPOLOGY_CHANGE;
6324 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6331 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6332 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6333 rx_caps, HDCP_2_2_RXCAPS_LEN);
6334 if (ret != HDCP_2_2_RXCAPS_LEN)
6335 return ret >= 0 ? -EIO : ret;
6337 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6338 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6344 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6345 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6346 .read_bksv = intel_dp_hdcp_read_bksv,
6347 .read_bstatus = intel_dp_hdcp_read_bstatus,
6348 .repeater_present = intel_dp_hdcp_repeater_present,
6349 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6350 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6351 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6352 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6353 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6354 .check_link = intel_dp_hdcp_check_link,
6355 .hdcp_capable = intel_dp_hdcp_capable,
6356 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6357 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6358 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6359 .check_2_2_link = intel_dp_hdcp2_check_link,
6360 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6361 .protocol = HDCP_PROTOCOL_DP,
6364 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6366 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6367 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6369 lockdep_assert_held(&dev_priv->pps_mutex);
6371 if (!edp_have_panel_vdd(intel_dp))
6375 * The VDD bit needs a power domain reference, so if the bit is
6376 * already enabled when we boot or resume, grab this reference and
6377 * schedule a vdd off, so we don't hold on to the reference
6380 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6381 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6383 edp_panel_vdd_schedule_off(intel_dp);
6386 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6388 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6389 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6392 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6393 encoder->port, &pipe))
6396 return INVALID_PIPE;
6399 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6401 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6402 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6403 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6404 intel_wakeref_t wakeref;
6406 if (!HAS_DDI(dev_priv))
6407 intel_dp->DP = I915_READ(intel_dp->output_reg);
6410 lspcon_resume(lspcon);
6412 intel_dp->reset_link_params = true;
6414 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6415 !intel_dp_is_edp(intel_dp))
6418 with_pps_lock(intel_dp, wakeref) {
6419 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6420 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6422 if (intel_dp_is_edp(intel_dp)) {
6424 * Reinit the power sequencer, in case BIOS did
6425 * something nasty with it.
6427 intel_dp_pps_init(intel_dp);
6428 intel_edp_panel_vdd_sanitize(intel_dp);
6433 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6434 .force = intel_dp_force,
6435 .fill_modes = drm_helper_probe_single_connector_modes,
6436 .atomic_get_property = intel_digital_connector_atomic_get_property,
6437 .atomic_set_property = intel_digital_connector_atomic_set_property,
6438 .late_register = intel_dp_connector_register,
6439 .early_unregister = intel_dp_connector_unregister,
6440 .destroy = intel_connector_destroy,
6441 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6442 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6445 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6446 .detect_ctx = intel_dp_detect,
6447 .get_modes = intel_dp_get_modes,
6448 .mode_valid = intel_dp_mode_valid,
6449 .atomic_check = intel_digital_connector_atomic_check,
6452 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6453 .reset = intel_dp_encoder_reset,
6454 .destroy = intel_dp_encoder_destroy,
6458 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6460 struct intel_dp *intel_dp = &intel_dig_port->dp;
6462 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6464 * vdd off can generate a long pulse on eDP which
6465 * would require vdd on to handle it, and thus we
6466 * would end up in an endless cycle of
6467 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6469 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6470 port_name(intel_dig_port->base.port));
6474 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6475 port_name(intel_dig_port->base.port),
6476 long_hpd ? "long" : "short");
6479 intel_dp->reset_link_params = true;
6483 if (intel_dp->is_mst) {
6484 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6486 * If we were in MST mode, and device is not
6487 * there, get out of MST mode
6489 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6490 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6491 intel_dp->is_mst = false;
6492 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6499 if (!intel_dp->is_mst) {
6502 handled = intel_dp_short_pulse(intel_dp);
6511 /* check the VBT to see whether the eDP is on another port */
6512 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6515 * eDP not supported on g4x. so bail out early just
6516 * for a bit extra safety in case the VBT is bonkers.
6518 if (INTEL_GEN(dev_priv) < 5)
6521 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6524 return intel_bios_is_port_edp(dev_priv, port);
6528 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6530 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6531 enum port port = dp_to_dig_port(intel_dp)->base.port;
6533 if (!IS_G4X(dev_priv) && port != PORT_A)
6534 intel_attach_force_audio_property(connector);
6536 intel_attach_broadcast_rgb_property(connector);
6537 if (HAS_GMCH(dev_priv))
6538 drm_connector_attach_max_bpc_property(connector, 6, 10);
6539 else if (INTEL_GEN(dev_priv) >= 5)
6540 drm_connector_attach_max_bpc_property(connector, 6, 12);
6542 if (intel_dp_is_edp(intel_dp)) {
6543 u32 allowed_scalers;
6545 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6546 if (!HAS_GMCH(dev_priv))
6547 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6549 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6551 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6556 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6558 intel_dp->panel_power_off_time = ktime_get_boottime();
6559 intel_dp->last_power_on = jiffies;
6560 intel_dp->last_backlight_off = jiffies;
6564 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6566 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6567 u32 pp_on, pp_off, pp_ctl;
6568 struct pps_registers regs;
6570 intel_pps_get_registers(intel_dp, ®s);
6572 pp_ctl = ironlake_get_pp_control(intel_dp);
6574 /* Ensure PPS is unlocked */
6575 if (!HAS_DDI(dev_priv))
6576 I915_WRITE(regs.pp_ctrl, pp_ctl);
6578 pp_on = I915_READ(regs.pp_on);
6579 pp_off = I915_READ(regs.pp_off);
6581 /* Pull timing values out of registers */
6582 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6583 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6584 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6585 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6587 if (i915_mmio_reg_valid(regs.pp_div)) {
6590 pp_div = I915_READ(regs.pp_div);
6592 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6594 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6599 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6601 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6603 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6607 intel_pps_verify_state(struct intel_dp *intel_dp)
6609 struct edp_power_seq hw;
6610 struct edp_power_seq *sw = &intel_dp->pps_delays;
6612 intel_pps_readout_hw_state(intel_dp, &hw);
6614 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6615 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6616 DRM_ERROR("PPS state mismatch\n");
6617 intel_pps_dump_state("sw", sw);
6618 intel_pps_dump_state("hw", &hw);
6623 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6625 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6626 struct edp_power_seq cur, vbt, spec,
6627 *final = &intel_dp->pps_delays;
6629 lockdep_assert_held(&dev_priv->pps_mutex);
6631 /* already initialized? */
6632 if (final->t11_t12 != 0)
6635 intel_pps_readout_hw_state(intel_dp, &cur);
6637 intel_pps_dump_state("cur", &cur);
6639 vbt = dev_priv->vbt.edp.pps;
6640 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6641 * of 500ms appears to be too short. Ocassionally the panel
6642 * just fails to power back on. Increasing the delay to 800ms
6643 * seems sufficient to avoid this problem.
6645 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6646 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6647 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6650 /* T11_T12 delay is special and actually in units of 100ms, but zero
6651 * based in the hw (so we need to add 100 ms). But the sw vbt
6652 * table multiplies it with 1000 to make it in units of 100usec,
6654 vbt.t11_t12 += 100 * 10;
6656 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6657 * our hw here, which are all in 100usec. */
6658 spec.t1_t3 = 210 * 10;
6659 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6660 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6661 spec.t10 = 500 * 10;
6662 /* This one is special and actually in units of 100ms, but zero
6663 * based in the hw (so we need to add 100 ms). But the sw vbt
6664 * table multiplies it with 1000 to make it in units of 100usec,
6666 spec.t11_t12 = (510 + 100) * 10;
6668 intel_pps_dump_state("vbt", &vbt);
6670 /* Use the max of the register settings and vbt. If both are
6671 * unset, fall back to the spec limits. */
6672 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6674 max(cur.field, vbt.field))
6675 assign_final(t1_t3);
6679 assign_final(t11_t12);
6682 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6683 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6684 intel_dp->backlight_on_delay = get_delay(t8);
6685 intel_dp->backlight_off_delay = get_delay(t9);
6686 intel_dp->panel_power_down_delay = get_delay(t10);
6687 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6690 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6691 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6692 intel_dp->panel_power_cycle_delay);
6694 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6695 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6698 * We override the HW backlight delays to 1 because we do manual waits
6699 * on them. For T8, even BSpec recommends doing it. For T9, if we
6700 * don't do this, we'll end up waiting for the backlight off delay
6701 * twice: once when we do the manual sleep, and once when we disable
6702 * the panel and wait for the PP_STATUS bit to become zero.
6708 * HW has only a 100msec granularity for t11_t12 so round it up
6711 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6715 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6716 bool force_disable_vdd)
6718 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6719 u32 pp_on, pp_off, port_sel = 0;
6720 int div = dev_priv->rawclk_freq / 1000;
6721 struct pps_registers regs;
6722 enum port port = dp_to_dig_port(intel_dp)->base.port;
6723 const struct edp_power_seq *seq = &intel_dp->pps_delays;
6725 lockdep_assert_held(&dev_priv->pps_mutex);
6727 intel_pps_get_registers(intel_dp, ®s);
6730 * On some VLV machines the BIOS can leave the VDD
6731 * enabled even on power sequencers which aren't
6732 * hooked up to any port. This would mess up the
6733 * power domain tracking the first time we pick
6734 * one of these power sequencers for use since
6735 * edp_panel_vdd_on() would notice that the VDD was
6736 * already on and therefore wouldn't grab the power
6737 * domain reference. Disable VDD first to avoid this.
6738 * This also avoids spuriously turning the VDD on as
6739 * soon as the new power sequencer gets initialized.
6741 if (force_disable_vdd) {
6742 u32 pp = ironlake_get_pp_control(intel_dp);
6744 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6746 if (pp & EDP_FORCE_VDD)
6747 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6749 pp &= ~EDP_FORCE_VDD;
6751 I915_WRITE(regs.pp_ctrl, pp);
6754 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6755 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6756 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6757 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6759 /* Haswell doesn't have any port selection bits for the panel
6760 * power sequencer any more. */
6761 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6762 port_sel = PANEL_PORT_SELECT_VLV(port);
6763 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6766 port_sel = PANEL_PORT_SELECT_DPA;
6769 port_sel = PANEL_PORT_SELECT_DPC;
6772 port_sel = PANEL_PORT_SELECT_DPD;
6782 I915_WRITE(regs.pp_on, pp_on);
6783 I915_WRITE(regs.pp_off, pp_off);
6786 * Compute the divisor for the pp clock, simply match the Bspec formula.
6788 if (i915_mmio_reg_valid(regs.pp_div)) {
6789 I915_WRITE(regs.pp_div,
6790 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6791 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6795 pp_ctl = I915_READ(regs.pp_ctrl);
6796 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6797 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6798 I915_WRITE(regs.pp_ctrl, pp_ctl);
6801 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6802 I915_READ(regs.pp_on),
6803 I915_READ(regs.pp_off),
6804 i915_mmio_reg_valid(regs.pp_div) ?
6805 I915_READ(regs.pp_div) :
6806 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6809 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6811 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6813 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6814 vlv_initial_power_sequencer_setup(intel_dp);
6816 intel_dp_init_panel_power_sequencer(intel_dp);
6817 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6822 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6823 * @dev_priv: i915 device
6824 * @crtc_state: a pointer to the active intel_crtc_state
6825 * @refresh_rate: RR to be programmed
6827 * This function gets called when refresh rate (RR) has to be changed from
6828 * one frequency to another. Switches can be between high and low RR
6829 * supported by the panel or to any other RR based on media playback (in
6830 * this case, RR value needs to be passed from user space).
6832 * The caller of this function needs to take a lock on dev_priv->drrs.
6834 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6835 const struct intel_crtc_state *crtc_state,
6838 struct intel_encoder *encoder;
6839 struct intel_digital_port *dig_port = NULL;
6840 struct intel_dp *intel_dp = dev_priv->drrs.dp;
6841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6842 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6844 if (refresh_rate <= 0) {
6845 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6849 if (intel_dp == NULL) {
6850 DRM_DEBUG_KMS("DRRS not supported.\n");
6854 dig_port = dp_to_dig_port(intel_dp);
6855 encoder = &dig_port->base;
6858 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6862 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6863 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6867 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6869 index = DRRS_LOW_RR;
6871 if (index == dev_priv->drrs.refresh_rate_type) {
6873 "DRRS requested for previously set RR...ignoring\n");
6877 if (!crtc_state->base.active) {
6878 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6882 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6885 intel_dp_set_m_n(crtc_state, M1_N1);
6888 intel_dp_set_m_n(crtc_state, M2_N2);
6892 DRM_ERROR("Unsupported refreshrate type\n");
6894 } else if (INTEL_GEN(dev_priv) > 6) {
6895 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6898 val = I915_READ(reg);
6899 if (index > DRRS_HIGH_RR) {
6900 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6901 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6903 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6905 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6906 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6908 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6910 I915_WRITE(reg, val);
6913 dev_priv->drrs.refresh_rate_type = index;
6915 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6919 * intel_edp_drrs_enable - init drrs struct if supported
6920 * @intel_dp: DP struct
6921 * @crtc_state: A pointer to the active crtc state.
6923 * Initializes frontbuffer_bits and drrs.dp
6925 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6926 const struct intel_crtc_state *crtc_state)
6928 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6930 if (!crtc_state->has_drrs) {
6931 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6935 if (dev_priv->psr.enabled) {
6936 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6940 mutex_lock(&dev_priv->drrs.mutex);
6941 if (dev_priv->drrs.dp) {
6942 DRM_DEBUG_KMS("DRRS already enabled\n");
6946 dev_priv->drrs.busy_frontbuffer_bits = 0;
6948 dev_priv->drrs.dp = intel_dp;
6951 mutex_unlock(&dev_priv->drrs.mutex);
6955 * intel_edp_drrs_disable - Disable DRRS
6956 * @intel_dp: DP struct
6957 * @old_crtc_state: Pointer to old crtc_state.
6960 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6961 const struct intel_crtc_state *old_crtc_state)
6963 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6965 if (!old_crtc_state->has_drrs)
6968 mutex_lock(&dev_priv->drrs.mutex);
6969 if (!dev_priv->drrs.dp) {
6970 mutex_unlock(&dev_priv->drrs.mutex);
6974 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6975 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6976 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6978 dev_priv->drrs.dp = NULL;
6979 mutex_unlock(&dev_priv->drrs.mutex);
6981 cancel_delayed_work_sync(&dev_priv->drrs.work);
6984 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6986 struct drm_i915_private *dev_priv =
6987 container_of(work, typeof(*dev_priv), drrs.work.work);
6988 struct intel_dp *intel_dp;
6990 mutex_lock(&dev_priv->drrs.mutex);
6992 intel_dp = dev_priv->drrs.dp;
6998 * The delayed work can race with an invalidate hence we need to
7002 if (dev_priv->drrs.busy_frontbuffer_bits)
7005 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
7006 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7008 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7009 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
7013 mutex_unlock(&dev_priv->drrs.mutex);
7017 * intel_edp_drrs_invalidate - Disable Idleness DRRS
7018 * @dev_priv: i915 device
7019 * @frontbuffer_bits: frontbuffer plane tracking bits
7021 * This function gets called everytime rendering on the given planes start.
7022 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
7024 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7026 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
7027 unsigned int frontbuffer_bits)
7029 struct drm_crtc *crtc;
7032 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7035 cancel_delayed_work(&dev_priv->drrs.work);
7037 mutex_lock(&dev_priv->drrs.mutex);
7038 if (!dev_priv->drrs.dp) {
7039 mutex_unlock(&dev_priv->drrs.mutex);
7043 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7044 pipe = to_intel_crtc(crtc)->pipe;
7046 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7047 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
7049 /* invalidate means busy screen hence upclock */
7050 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7051 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7052 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
7054 mutex_unlock(&dev_priv->drrs.mutex);
7058 * intel_edp_drrs_flush - Restart Idleness DRRS
7059 * @dev_priv: i915 device
7060 * @frontbuffer_bits: frontbuffer plane tracking bits
7062 * This function gets called every time rendering on the given planes has
7063 * completed or flip on a crtc is completed. So DRRS should be upclocked
7064 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7065 * if no other planes are dirty.
7067 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7069 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
7070 unsigned int frontbuffer_bits)
7072 struct drm_crtc *crtc;
7075 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7078 cancel_delayed_work(&dev_priv->drrs.work);
7080 mutex_lock(&dev_priv->drrs.mutex);
7081 if (!dev_priv->drrs.dp) {
7082 mutex_unlock(&dev_priv->drrs.mutex);
7086 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7087 pipe = to_intel_crtc(crtc)->pipe;
7089 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7090 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
7092 /* flush means busy screen hence upclock */
7093 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7094 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7095 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
7098 * flush also means no more activity hence schedule downclock, if all
7099 * other fbs are quiescent too
7101 if (!dev_priv->drrs.busy_frontbuffer_bits)
7102 schedule_delayed_work(&dev_priv->drrs.work,
7103 msecs_to_jiffies(1000));
7104 mutex_unlock(&dev_priv->drrs.mutex);
7108 * DOC: Display Refresh Rate Switching (DRRS)
7110 * Display Refresh Rate Switching (DRRS) is a power conservation feature
7111 * which enables swtching between low and high refresh rates,
7112 * dynamically, based on the usage scenario. This feature is applicable
7113 * for internal panels.
7115 * Indication that the panel supports DRRS is given by the panel EDID, which
7116 * would list multiple refresh rates for one resolution.
7118 * DRRS is of 2 types - static and seamless.
7119 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7120 * (may appear as a blink on screen) and is used in dock-undock scenario.
7121 * Seamless DRRS involves changing RR without any visual effect to the user
7122 * and can be used during normal system usage. This is done by programming
7123 * certain registers.
7125 * Support for static/seamless DRRS may be indicated in the VBT based on
7126 * inputs from the panel spec.
7128 * DRRS saves power by switching to low RR based on usage scenarios.
7130 * The implementation is based on frontbuffer tracking implementation. When
7131 * there is a disturbance on the screen triggered by user activity or a periodic
7132 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7133 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7136 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7137 * and intel_edp_drrs_flush() are called.
7139 * DRRS can be further extended to support other internal panels and also
7140 * the scenario of video playback wherein RR is set based on the rate
7141 * requested by userspace.
7145 * intel_dp_drrs_init - Init basic DRRS work and mutex.
7146 * @connector: eDP connector
7147 * @fixed_mode: preferred mode of panel
7149 * This function is called only once at driver load to initialize basic
7153 * Downclock mode if panel supports it, else return NULL.
7154 * DRRS support is determined by the presence of downclock mode (apart
7155 * from VBT setting).
7157 static struct drm_display_mode *
7158 intel_dp_drrs_init(struct intel_connector *connector,
7159 struct drm_display_mode *fixed_mode)
7161 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7162 struct drm_display_mode *downclock_mode = NULL;
7164 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7165 mutex_init(&dev_priv->drrs.mutex);
7167 if (INTEL_GEN(dev_priv) <= 6) {
7168 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7172 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7173 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
7177 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7178 if (!downclock_mode) {
7179 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
7183 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7185 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7186 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
7187 return downclock_mode;
7190 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7191 struct intel_connector *intel_connector)
7193 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7194 struct drm_device *dev = &dev_priv->drm;
7195 struct drm_connector *connector = &intel_connector->base;
7196 struct drm_display_mode *fixed_mode = NULL;
7197 struct drm_display_mode *downclock_mode = NULL;
7199 enum pipe pipe = INVALID_PIPE;
7200 intel_wakeref_t wakeref;
7203 if (!intel_dp_is_edp(intel_dp))
7206 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7209 * On IBX/CPT we may get here with LVDS already registered. Since the
7210 * driver uses the only internal power sequencer available for both
7211 * eDP and LVDS bail out early in this case to prevent interfering
7212 * with an already powered-on LVDS power sequencer.
7214 if (intel_get_lvds_encoder(dev_priv)) {
7215 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7216 DRM_INFO("LVDS was detected, not registering eDP\n");
7221 with_pps_lock(intel_dp, wakeref) {
7222 intel_dp_init_panel_power_timestamps(intel_dp);
7223 intel_dp_pps_init(intel_dp);
7224 intel_edp_panel_vdd_sanitize(intel_dp);
7227 /* Cache DPCD and EDID for edp. */
7228 has_dpcd = intel_edp_init_dpcd(intel_dp);
7231 /* if this fails, presume the device is a ghost */
7232 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7236 mutex_lock(&dev->mode_config.mutex);
7237 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7239 if (drm_add_edid_modes(connector, edid)) {
7240 drm_connector_update_edid_property(connector,
7244 edid = ERR_PTR(-EINVAL);
7247 edid = ERR_PTR(-ENOENT);
7249 intel_connector->edid = edid;
7251 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7253 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7255 /* fallback to VBT if available for eDP */
7257 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7258 mutex_unlock(&dev->mode_config.mutex);
7260 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7261 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7262 register_reboot_notifier(&intel_dp->edp_notifier);
7265 * Figure out the current pipe for the initial backlight setup.
7266 * If the current pipe isn't valid, try the PPS pipe, and if that
7267 * fails just assume pipe A.
7269 pipe = vlv_active_pipe(intel_dp);
7271 if (pipe != PIPE_A && pipe != PIPE_B)
7272 pipe = intel_dp->pps_pipe;
7274 if (pipe != PIPE_A && pipe != PIPE_B)
7277 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7281 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7282 intel_connector->panel.backlight.power = intel_edp_backlight_power;
7283 intel_panel_setup_backlight(connector, pipe);
7286 drm_connector_init_panel_orientation_property(
7287 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7292 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7294 * vdd might still be enabled do to the delayed vdd off.
7295 * Make sure vdd is actually turned off here.
7297 with_pps_lock(intel_dp, wakeref)
7298 edp_panel_vdd_off_sync(intel_dp);
7303 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7305 struct intel_connector *intel_connector;
7306 struct drm_connector *connector;
7308 intel_connector = container_of(work, typeof(*intel_connector),
7309 modeset_retry_work);
7310 connector = &intel_connector->base;
7311 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7314 /* Grab the locks before changing connector property*/
7315 mutex_lock(&connector->dev->mode_config.mutex);
7316 /* Set connector link status to BAD and send a Uevent to notify
7317 * userspace to do a modeset.
7319 drm_connector_set_link_status_property(connector,
7320 DRM_MODE_LINK_STATUS_BAD);
7321 mutex_unlock(&connector->dev->mode_config.mutex);
7322 /* Send Hotplug uevent so userspace can reprobe */
7323 drm_kms_helper_hotplug_event(connector->dev);
7327 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7328 struct intel_connector *intel_connector)
7330 struct drm_connector *connector = &intel_connector->base;
7331 struct intel_dp *intel_dp = &intel_dig_port->dp;
7332 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7333 struct drm_device *dev = intel_encoder->base.dev;
7334 struct drm_i915_private *dev_priv = to_i915(dev);
7335 enum port port = intel_encoder->port;
7338 /* Initialize the work for modeset in case of link train failure */
7339 INIT_WORK(&intel_connector->modeset_retry_work,
7340 intel_dp_modeset_retry_work_fn);
7342 if (WARN(intel_dig_port->max_lanes < 1,
7343 "Not enough lanes (%d) for DP on port %c\n",
7344 intel_dig_port->max_lanes, port_name(port)))
7347 intel_dp_set_source_rates(intel_dp);
7349 intel_dp->reset_link_params = true;
7350 intel_dp->pps_pipe = INVALID_PIPE;
7351 intel_dp->active_pipe = INVALID_PIPE;
7353 /* Preserve the current hw state. */
7354 intel_dp->DP = I915_READ(intel_dp->output_reg);
7355 intel_dp->attached_connector = intel_connector;
7357 if (intel_dp_is_port_edp(dev_priv, port)) {
7359 * Currently we don't support eDP on TypeC ports, although in
7360 * theory it could work on TypeC legacy ports.
7362 WARN_ON(intel_port_is_tc(dev_priv, port));
7363 type = DRM_MODE_CONNECTOR_eDP;
7365 type = DRM_MODE_CONNECTOR_DisplayPort;
7368 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7369 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7372 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7373 * for DP the encoder type can be set by the caller to
7374 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7376 if (type == DRM_MODE_CONNECTOR_eDP)
7377 intel_encoder->type = INTEL_OUTPUT_EDP;
7379 /* eDP only on port B and/or C on vlv/chv */
7380 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7381 intel_dp_is_edp(intel_dp) &&
7382 port != PORT_B && port != PORT_C))
7385 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7386 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7389 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7390 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7392 if (!HAS_GMCH(dev_priv))
7393 connector->interlace_allowed = true;
7394 connector->doublescan_allowed = 0;
7396 if (INTEL_GEN(dev_priv) >= 11)
7397 connector->ycbcr_420_allowed = true;
7399 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7401 intel_dp_aux_init(intel_dp);
7403 intel_connector_attach_encoder(intel_connector, intel_encoder);
7405 if (HAS_DDI(dev_priv))
7406 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7408 intel_connector->get_hw_state = intel_connector_get_hw_state;
7410 /* init MST on ports that can support it */
7411 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7412 (port == PORT_B || port == PORT_C ||
7413 port == PORT_D || port == PORT_F))
7414 intel_dp_mst_encoder_init(intel_dig_port,
7415 intel_connector->base.base.id);
7417 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7418 intel_dp_aux_fini(intel_dp);
7419 intel_dp_mst_encoder_cleanup(intel_dig_port);
7423 intel_dp_add_properties(intel_dp, connector);
7425 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7426 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7428 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7431 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7432 * 0xd. Failure to do so will result in spurious interrupts being
7433 * generated on the port when a cable is not attached.
7435 if (IS_G45(dev_priv)) {
7436 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7437 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7443 drm_connector_cleanup(connector);
7448 bool intel_dp_init(struct drm_i915_private *dev_priv,
7449 i915_reg_t output_reg,
7452 struct intel_digital_port *intel_dig_port;
7453 struct intel_encoder *intel_encoder;
7454 struct drm_encoder *encoder;
7455 struct intel_connector *intel_connector;
7457 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7458 if (!intel_dig_port)
7461 intel_connector = intel_connector_alloc();
7462 if (!intel_connector)
7463 goto err_connector_alloc;
7465 intel_encoder = &intel_dig_port->base;
7466 encoder = &intel_encoder->base;
7468 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7469 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7470 "DP %c", port_name(port)))
7471 goto err_encoder_init;
7473 intel_encoder->hotplug = intel_dp_hotplug;
7474 intel_encoder->compute_config = intel_dp_compute_config;
7475 intel_encoder->get_hw_state = intel_dp_get_hw_state;
7476 intel_encoder->get_config = intel_dp_get_config;
7477 intel_encoder->update_pipe = intel_panel_update_backlight;
7478 intel_encoder->suspend = intel_dp_encoder_suspend;
7479 if (IS_CHERRYVIEW(dev_priv)) {
7480 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7481 intel_encoder->pre_enable = chv_pre_enable_dp;
7482 intel_encoder->enable = vlv_enable_dp;
7483 intel_encoder->disable = vlv_disable_dp;
7484 intel_encoder->post_disable = chv_post_disable_dp;
7485 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7486 } else if (IS_VALLEYVIEW(dev_priv)) {
7487 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7488 intel_encoder->pre_enable = vlv_pre_enable_dp;
7489 intel_encoder->enable = vlv_enable_dp;
7490 intel_encoder->disable = vlv_disable_dp;
7491 intel_encoder->post_disable = vlv_post_disable_dp;
7493 intel_encoder->pre_enable = g4x_pre_enable_dp;
7494 intel_encoder->enable = g4x_enable_dp;
7495 intel_encoder->disable = g4x_disable_dp;
7496 intel_encoder->post_disable = g4x_post_disable_dp;
7499 intel_dig_port->dp.output_reg = output_reg;
7500 intel_dig_port->max_lanes = 4;
7502 intel_encoder->type = INTEL_OUTPUT_DP;
7503 intel_encoder->power_domain = intel_port_to_power_domain(port);
7504 if (IS_CHERRYVIEW(dev_priv)) {
7506 intel_encoder->crtc_mask = 1 << 2;
7508 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7510 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7512 intel_encoder->cloneable = 0;
7513 intel_encoder->port = port;
7515 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7518 intel_infoframe_init(intel_dig_port);
7520 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7521 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7522 goto err_init_connector;
7527 drm_encoder_cleanup(encoder);
7529 kfree(intel_connector);
7530 err_connector_alloc:
7531 kfree(intel_dig_port);
7535 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7537 struct intel_encoder *encoder;
7539 for_each_intel_encoder(&dev_priv->drm, encoder) {
7540 struct intel_dp *intel_dp;
7542 if (encoder->type != INTEL_OUTPUT_DDI)
7545 intel_dp = enc_to_intel_dp(&encoder->base);
7547 if (!intel_dp->can_mst)
7550 if (intel_dp->is_mst)
7551 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7555 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7557 struct intel_encoder *encoder;
7559 for_each_intel_encoder(&dev_priv->drm, encoder) {
7560 struct intel_dp *intel_dp;
7563 if (encoder->type != INTEL_OUTPUT_DDI)
7566 intel_dp = enc_to_intel_dp(&encoder->base);
7568 if (!intel_dp->can_mst)
7571 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7573 intel_dp->is_mst = false;
7574 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,