2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
35 #include <asm/byteorder.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
45 #include "i915_debugfs.h"
47 #include "intel_atomic.h"
48 #include "intel_audio.h"
49 #include "intel_connector.h"
50 #include "intel_ddi.h"
52 #include "intel_dp_link_training.h"
53 #include "intel_dp_mst.h"
54 #include "intel_dpio_phy.h"
55 #include "intel_drv.h"
56 #include "intel_fifo_underrun.h"
57 #include "intel_hdcp.h"
58 #include "intel_hdmi.h"
59 #include "intel_hotplug.h"
60 #include "intel_lspcon.h"
61 #include "intel_lvds.h"
62 #include "intel_panel.h"
63 #include "intel_psr.h"
64 #include "intel_sideband.h"
65 #include "intel_vdsc.h"
67 #define DP_DPRX_ESI_LEN 14
69 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
70 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
71 #define DP_DSC_MIN_SUPPORTED_BPC 8
72 #define DP_DSC_MAX_SUPPORTED_BPC 10
74 /* DP DSC throughput values used for slice count calculations KPixels/s */
75 #define DP_DSC_PEAK_PIXEL_RATE 2720000
76 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
77 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
79 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
80 #define DP_DSC_FEC_OVERHEAD_FACTOR 976
82 /* Compliance test status bits */
83 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
84 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
85 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
86 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
93 static const struct dp_link_dpll g4x_dpll[] = {
95 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
97 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
100 static const struct dp_link_dpll pch_dpll[] = {
102 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
104 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
107 static const struct dp_link_dpll vlv_dpll[] = {
109 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
111 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
115 * CHV supports eDP 1.4 that have more link rates.
116 * Below only provides the fixed rate but exclude variable rate.
118 static const struct dp_link_dpll chv_dpll[] = {
120 * CHV requires to program fractional division for m2.
121 * m2 is stored in fixed point format using formula below
122 * (m2_int << 22) | m2_fraction
124 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
125 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
126 { 270000, /* m2_int = 27, m2_fraction = 0 */
127 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
130 /* Constants for DP DSC configurations */
131 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
133 /* With Single pipe configuration, HW is capable of supporting maximum
134 * of 4 slices per line.
136 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
139 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
140 * @intel_dp: DP struct
142 * If a CPU or PCH DP output is attached to an eDP panel, this function
143 * will return true, and false otherwise.
145 bool intel_dp_is_edp(struct intel_dp *intel_dp)
147 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
149 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
152 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
154 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
157 static void intel_dp_link_down(struct intel_encoder *encoder,
158 const struct intel_crtc_state *old_crtc_state);
159 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
160 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
161 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
162 const struct intel_crtc_state *crtc_state);
163 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
165 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
167 /* update sink rates from dpcd */
168 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
170 static const int dp_rates[] = {
171 162000, 270000, 540000, 810000
175 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
178 if (dp_rates[i] > max_rate)
180 intel_dp->sink_rates[i] = dp_rates[i];
183 intel_dp->num_sink_rates = i;
186 /* Get length of rates array potentially limited by max_rate. */
187 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
191 /* Limit results by potentially reduced max rate */
192 for (i = 0; i < len; i++) {
193 if (rates[len - i - 1] <= max_rate)
200 /* Get length of common rates array potentially limited by max_rate. */
201 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
204 return intel_dp_rate_limit_len(intel_dp->common_rates,
205 intel_dp->num_common_rates, max_rate);
208 /* Theoretical max between source and sink */
209 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
211 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
214 static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
216 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
217 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
218 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
219 intel_wakeref_t wakeref;
222 if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
226 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
227 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
228 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
229 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
233 MISSING_CASE(lane_info);
247 /* Theoretical max between source and sink */
248 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
250 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
251 int source_max = intel_dig_port->max_lanes;
252 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
253 int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
255 return min3(source_max, sink_max, fia_max);
258 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
260 return intel_dp->max_link_lane_count;
264 intel_dp_link_required(int pixel_clock, int bpp)
266 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
267 return DIV_ROUND_UP(pixel_clock * bpp, 8);
271 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
273 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
274 * link rate that is generally expressed in Gbps. Since, 8 bits of data
275 * is transmitted every LS_Clk per lane, there is no need to account for
276 * the channel encoding that is done in the PHY layer here.
279 return max_link_clock * max_lanes;
283 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
285 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
286 struct intel_encoder *encoder = &intel_dig_port->base;
287 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
288 int max_dotclk = dev_priv->max_dotclk_freq;
291 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
293 if (type != DP_DS_PORT_TYPE_VGA)
296 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
297 intel_dp->downstream_ports);
299 if (ds_max_dotclk != 0)
300 max_dotclk = min(max_dotclk, ds_max_dotclk);
305 static int cnl_max_source_rate(struct intel_dp *intel_dp)
307 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
308 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
309 enum port port = dig_port->base.port;
311 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
313 /* Low voltage SKUs are limited to max of 5.4G */
314 if (voltage == VOLTAGE_INFO_0_85V)
317 /* For this SKU 8.1G is supported in all ports */
318 if (IS_CNL_WITH_PORT_F(dev_priv))
321 /* For other SKUs, max rate on ports A and D is 5.4G */
322 if (port == PORT_A || port == PORT_D)
328 static int icl_max_source_rate(struct intel_dp *intel_dp)
330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
331 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
332 enum port port = dig_port->base.port;
334 if (intel_port_is_combophy(dev_priv, port) &&
335 !IS_ELKHARTLAKE(dev_priv) &&
336 !intel_dp_is_edp(intel_dp))
343 intel_dp_set_source_rates(struct intel_dp *intel_dp)
345 /* The values must be in increasing order */
346 static const int cnl_rates[] = {
347 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
349 static const int bxt_rates[] = {
350 162000, 216000, 243000, 270000, 324000, 432000, 540000
352 static const int skl_rates[] = {
353 162000, 216000, 270000, 324000, 432000, 540000
355 static const int hsw_rates[] = {
356 162000, 270000, 540000
358 static const int g4x_rates[] = {
361 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
362 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
363 const struct ddi_vbt_port_info *info =
364 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
365 const int *source_rates;
366 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
368 /* This should only be done once */
369 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
371 if (INTEL_GEN(dev_priv) >= 10) {
372 source_rates = cnl_rates;
373 size = ARRAY_SIZE(cnl_rates);
374 if (IS_GEN(dev_priv, 10))
375 max_rate = cnl_max_source_rate(intel_dp);
377 max_rate = icl_max_source_rate(intel_dp);
378 } else if (IS_GEN9_LP(dev_priv)) {
379 source_rates = bxt_rates;
380 size = ARRAY_SIZE(bxt_rates);
381 } else if (IS_GEN9_BC(dev_priv)) {
382 source_rates = skl_rates;
383 size = ARRAY_SIZE(skl_rates);
384 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
385 IS_BROADWELL(dev_priv)) {
386 source_rates = hsw_rates;
387 size = ARRAY_SIZE(hsw_rates);
389 source_rates = g4x_rates;
390 size = ARRAY_SIZE(g4x_rates);
393 if (max_rate && vbt_max_rate)
394 max_rate = min(max_rate, vbt_max_rate);
395 else if (vbt_max_rate)
396 max_rate = vbt_max_rate;
399 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
401 intel_dp->source_rates = source_rates;
402 intel_dp->num_source_rates = size;
405 static int intersect_rates(const int *source_rates, int source_len,
406 const int *sink_rates, int sink_len,
409 int i = 0, j = 0, k = 0;
411 while (i < source_len && j < sink_len) {
412 if (source_rates[i] == sink_rates[j]) {
413 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
415 common_rates[k] = source_rates[i];
419 } else if (source_rates[i] < sink_rates[j]) {
428 /* return index of rate in rates array, or -1 if not found */
429 static int intel_dp_rate_index(const int *rates, int len, int rate)
433 for (i = 0; i < len; i++)
434 if (rate == rates[i])
440 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
442 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
444 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
445 intel_dp->num_source_rates,
446 intel_dp->sink_rates,
447 intel_dp->num_sink_rates,
448 intel_dp->common_rates);
450 /* Paranoia, there should always be something in common. */
451 if (WARN_ON(intel_dp->num_common_rates == 0)) {
452 intel_dp->common_rates[0] = 162000;
453 intel_dp->num_common_rates = 1;
457 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
461 * FIXME: we need to synchronize the current link parameters with
462 * hardware readout. Currently fast link training doesn't work on
465 if (link_rate == 0 ||
466 link_rate > intel_dp->max_link_rate)
469 if (lane_count == 0 ||
470 lane_count > intel_dp_max_lane_count(intel_dp))
476 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
480 const struct drm_display_mode *fixed_mode =
481 intel_dp->attached_connector->panel.fixed_mode;
482 int mode_rate, max_rate;
484 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
485 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
486 if (mode_rate > max_rate)
492 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
493 int link_rate, u8 lane_count)
497 index = intel_dp_rate_index(intel_dp->common_rates,
498 intel_dp->num_common_rates,
501 if (intel_dp_is_edp(intel_dp) &&
502 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
503 intel_dp->common_rates[index - 1],
505 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
508 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
509 intel_dp->max_link_lane_count = lane_count;
510 } else if (lane_count > 1) {
511 if (intel_dp_is_edp(intel_dp) &&
512 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
513 intel_dp_max_common_rate(intel_dp),
515 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
518 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
519 intel_dp->max_link_lane_count = lane_count >> 1;
521 DRM_ERROR("Link Training Unsuccessful\n");
528 static enum drm_mode_status
529 intel_dp_mode_valid(struct drm_connector *connector,
530 struct drm_display_mode *mode)
532 struct intel_dp *intel_dp = intel_attached_dp(connector);
533 struct intel_connector *intel_connector = to_intel_connector(connector);
534 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
535 struct drm_i915_private *dev_priv = to_i915(connector->dev);
536 int target_clock = mode->clock;
537 int max_rate, mode_rate, max_lanes, max_link_clock;
539 u16 dsc_max_output_bpp = 0;
540 u8 dsc_slice_count = 0;
542 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
543 return MODE_NO_DBLESCAN;
545 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
547 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
548 if (mode->hdisplay > fixed_mode->hdisplay)
551 if (mode->vdisplay > fixed_mode->vdisplay)
554 target_clock = fixed_mode->clock;
557 max_link_clock = intel_dp_max_link_rate(intel_dp);
558 max_lanes = intel_dp_max_lane_count(intel_dp);
560 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
561 mode_rate = intel_dp_link_required(target_clock, 18);
564 * Output bpp is stored in 6.4 format so right shift by 4 to get the
565 * integer value since we support only integer values of bpp.
567 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
568 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
569 if (intel_dp_is_edp(intel_dp)) {
571 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
573 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
575 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
577 intel_dp_dsc_get_output_bpp(max_link_clock,
580 mode->hdisplay) >> 4;
582 intel_dp_dsc_get_slice_count(intel_dp,
588 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
589 target_clock > max_dotclk)
590 return MODE_CLOCK_HIGH;
592 if (mode->clock < 10000)
593 return MODE_CLOCK_LOW;
595 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
596 return MODE_H_ILLEGAL;
601 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
608 for (i = 0; i < src_bytes; i++)
609 v |= ((u32)src[i]) << ((3 - i) * 8);
613 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
618 for (i = 0; i < dst_bytes; i++)
619 dst[i] = src >> ((3-i) * 8);
623 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
625 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
626 bool force_disable_vdd);
628 intel_dp_pps_init(struct intel_dp *intel_dp);
630 static intel_wakeref_t
631 pps_lock(struct intel_dp *intel_dp)
633 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
634 intel_wakeref_t wakeref;
637 * See intel_power_sequencer_reset() why we need
638 * a power domain reference here.
640 wakeref = intel_display_power_get(dev_priv,
641 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
643 mutex_lock(&dev_priv->pps_mutex);
648 static intel_wakeref_t
649 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
651 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
653 mutex_unlock(&dev_priv->pps_mutex);
654 intel_display_power_put(dev_priv,
655 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
660 #define with_pps_lock(dp, wf) \
661 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
664 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
666 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
667 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
668 enum pipe pipe = intel_dp->pps_pipe;
669 bool pll_enabled, release_cl_override = false;
670 enum dpio_phy phy = DPIO_PHY(pipe);
671 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
674 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
675 "skipping pipe %c power sequencer kick due to port %c being active\n",
676 pipe_name(pipe), port_name(intel_dig_port->base.port)))
679 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
680 pipe_name(pipe), port_name(intel_dig_port->base.port));
682 /* Preserve the BIOS-computed detected bit. This is
683 * supposed to be read-only.
685 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
686 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
687 DP |= DP_PORT_WIDTH(1);
688 DP |= DP_LINK_TRAIN_PAT_1;
690 if (IS_CHERRYVIEW(dev_priv))
691 DP |= DP_PIPE_SEL_CHV(pipe);
693 DP |= DP_PIPE_SEL(pipe);
695 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
698 * The DPLL for the pipe must be enabled for this to work.
699 * So enable temporarily it if it's not already enabled.
702 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
703 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
705 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
706 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
707 DRM_ERROR("Failed to force on pll for pipe %c!\n",
714 * Similar magic as in intel_dp_enable_port().
715 * We _must_ do this port enable + disable trick
716 * to make this power sequencer lock onto the port.
717 * Otherwise even VDD force bit won't work.
719 I915_WRITE(intel_dp->output_reg, DP);
720 POSTING_READ(intel_dp->output_reg);
722 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
723 POSTING_READ(intel_dp->output_reg);
725 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
726 POSTING_READ(intel_dp->output_reg);
729 vlv_force_pll_off(dev_priv, pipe);
731 if (release_cl_override)
732 chv_phy_powergate_ch(dev_priv, phy, ch, false);
736 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
738 struct intel_encoder *encoder;
739 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
742 * We don't have power sequencer currently.
743 * Pick one that's not used by other ports.
745 for_each_intel_dp(&dev_priv->drm, encoder) {
746 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
748 if (encoder->type == INTEL_OUTPUT_EDP) {
749 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
750 intel_dp->active_pipe != intel_dp->pps_pipe);
752 if (intel_dp->pps_pipe != INVALID_PIPE)
753 pipes &= ~(1 << intel_dp->pps_pipe);
755 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
757 if (intel_dp->active_pipe != INVALID_PIPE)
758 pipes &= ~(1 << intel_dp->active_pipe);
765 return ffs(pipes) - 1;
769 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
771 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
772 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
775 lockdep_assert_held(&dev_priv->pps_mutex);
777 /* We should never land here with regular DP ports */
778 WARN_ON(!intel_dp_is_edp(intel_dp));
780 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
781 intel_dp->active_pipe != intel_dp->pps_pipe);
783 if (intel_dp->pps_pipe != INVALID_PIPE)
784 return intel_dp->pps_pipe;
786 pipe = vlv_find_free_pps(dev_priv);
789 * Didn't find one. This should not happen since there
790 * are two power sequencers and up to two eDP ports.
792 if (WARN_ON(pipe == INVALID_PIPE))
795 vlv_steal_power_sequencer(dev_priv, pipe);
796 intel_dp->pps_pipe = pipe;
798 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
799 pipe_name(intel_dp->pps_pipe),
800 port_name(intel_dig_port->base.port));
802 /* init power sequencer on this pipe and port */
803 intel_dp_init_panel_power_sequencer(intel_dp);
804 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
807 * Even vdd force doesn't work until we've made
808 * the power sequencer lock in on the port.
810 vlv_power_sequencer_kick(intel_dp);
812 return intel_dp->pps_pipe;
816 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
818 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
819 int backlight_controller = dev_priv->vbt.backlight.controller;
821 lockdep_assert_held(&dev_priv->pps_mutex);
823 /* We should never land here with regular DP ports */
824 WARN_ON(!intel_dp_is_edp(intel_dp));
826 if (!intel_dp->pps_reset)
827 return backlight_controller;
829 intel_dp->pps_reset = false;
832 * Only the HW needs to be reprogrammed, the SW state is fixed and
833 * has been setup during connector init.
835 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
837 return backlight_controller;
840 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
843 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
846 return I915_READ(PP_STATUS(pipe)) & PP_ON;
849 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
852 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
855 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
862 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
864 vlv_pipe_check pipe_check)
868 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
869 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
870 PANEL_PORT_SELECT_MASK;
872 if (port_sel != PANEL_PORT_SELECT_VLV(port))
875 if (!pipe_check(dev_priv, pipe))
885 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
887 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
888 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
889 enum port port = intel_dig_port->base.port;
891 lockdep_assert_held(&dev_priv->pps_mutex);
893 /* try to find a pipe with this port selected */
894 /* first pick one where the panel is on */
895 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
897 /* didn't find one? pick one where vdd is on */
898 if (intel_dp->pps_pipe == INVALID_PIPE)
899 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
900 vlv_pipe_has_vdd_on);
901 /* didn't find one? pick one with just the correct port */
902 if (intel_dp->pps_pipe == INVALID_PIPE)
903 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
906 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
907 if (intel_dp->pps_pipe == INVALID_PIPE) {
908 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
913 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
914 port_name(port), pipe_name(intel_dp->pps_pipe));
916 intel_dp_init_panel_power_sequencer(intel_dp);
917 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
920 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
922 struct intel_encoder *encoder;
924 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
925 !IS_GEN9_LP(dev_priv)))
929 * We can't grab pps_mutex here due to deadlock with power_domain
930 * mutex when power_domain functions are called while holding pps_mutex.
931 * That also means that in order to use pps_pipe the code needs to
932 * hold both a power domain reference and pps_mutex, and the power domain
933 * reference get/put must be done while _not_ holding pps_mutex.
934 * pps_{lock,unlock}() do these steps in the correct order, so one
935 * should use them always.
938 for_each_intel_dp(&dev_priv->drm, encoder) {
939 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
941 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
943 if (encoder->type != INTEL_OUTPUT_EDP)
946 if (IS_GEN9_LP(dev_priv))
947 intel_dp->pps_reset = true;
949 intel_dp->pps_pipe = INVALID_PIPE;
953 struct pps_registers {
961 static void intel_pps_get_registers(struct intel_dp *intel_dp,
962 struct pps_registers *regs)
964 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
967 memset(regs, 0, sizeof(*regs));
969 if (IS_GEN9_LP(dev_priv))
970 pps_idx = bxt_power_sequencer_idx(intel_dp);
971 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
972 pps_idx = vlv_power_sequencer_pipe(intel_dp);
974 regs->pp_ctrl = PP_CONTROL(pps_idx);
975 regs->pp_stat = PP_STATUS(pps_idx);
976 regs->pp_on = PP_ON_DELAYS(pps_idx);
977 regs->pp_off = PP_OFF_DELAYS(pps_idx);
979 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
980 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
981 regs->pp_div = INVALID_MMIO_REG;
983 regs->pp_div = PP_DIVISOR(pps_idx);
987 _pp_ctrl_reg(struct intel_dp *intel_dp)
989 struct pps_registers regs;
991 intel_pps_get_registers(intel_dp, ®s);
997 _pp_stat_reg(struct intel_dp *intel_dp)
999 struct pps_registers regs;
1001 intel_pps_get_registers(intel_dp, ®s);
1003 return regs.pp_stat;
1006 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1007 This function only applicable when panel PM state is not to be tracked */
1008 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1011 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1013 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1014 intel_wakeref_t wakeref;
1016 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1019 with_pps_lock(intel_dp, wakeref) {
1020 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1021 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1022 i915_reg_t pp_ctrl_reg, pp_div_reg;
1025 pp_ctrl_reg = PP_CONTROL(pipe);
1026 pp_div_reg = PP_DIVISOR(pipe);
1027 pp_div = I915_READ(pp_div_reg);
1028 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1030 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1031 I915_WRITE(pp_div_reg, pp_div | 0x1F);
1032 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1033 msleep(intel_dp->panel_power_cycle_delay);
1040 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1042 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1044 lockdep_assert_held(&dev_priv->pps_mutex);
1046 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1047 intel_dp->pps_pipe == INVALID_PIPE)
1050 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1053 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1055 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1057 lockdep_assert_held(&dev_priv->pps_mutex);
1059 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1060 intel_dp->pps_pipe == INVALID_PIPE)
1063 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1067 intel_dp_check_edp(struct intel_dp *intel_dp)
1069 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1071 if (!intel_dp_is_edp(intel_dp))
1074 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1075 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1076 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1077 I915_READ(_pp_stat_reg(intel_dp)),
1078 I915_READ(_pp_ctrl_reg(intel_dp)));
1083 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1085 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1086 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1090 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1091 done = wait_event_timeout(i915->gmbus_wait_queue, C,
1092 msecs_to_jiffies_timeout(10));
1094 /* just trace the final value */
1095 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1098 DRM_ERROR("dp aux hw did not signal timeout!\n");
1104 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1106 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1112 * The clock divider is based off the hrawclk, and would like to run at
1113 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1115 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1118 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1120 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1121 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1127 * The clock divider is based off the cdclk or PCH rawclk, and would
1128 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1129 * divide by 2000 and use that
1131 if (dig_port->aux_ch == AUX_CH_A)
1132 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1134 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1137 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1139 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1140 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1142 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1143 /* Workaround for non-ULT HSW */
1151 return ilk_get_aux_clock_divider(intel_dp, index);
1154 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1157 * SKL doesn't need us to program the AUX clock divider (Hardware will
1158 * derive the clock from CDCLK automatically). We still implement the
1159 * get_aux_clock_divider vfunc to plug-in into the existing code.
1161 return index ? 0 : 1;
1164 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1166 u32 aux_clock_divider)
1168 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1169 struct drm_i915_private *dev_priv =
1170 to_i915(intel_dig_port->base.base.dev);
1171 u32 precharge, timeout;
1173 if (IS_GEN(dev_priv, 6))
1178 if (IS_BROADWELL(dev_priv))
1179 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1181 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1183 return DP_AUX_CH_CTL_SEND_BUSY |
1184 DP_AUX_CH_CTL_DONE |
1185 DP_AUX_CH_CTL_INTERRUPT |
1186 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1188 DP_AUX_CH_CTL_RECEIVE_ERROR |
1189 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1190 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1191 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1194 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1198 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1201 ret = DP_AUX_CH_CTL_SEND_BUSY |
1202 DP_AUX_CH_CTL_DONE |
1203 DP_AUX_CH_CTL_INTERRUPT |
1204 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1205 DP_AUX_CH_CTL_TIME_OUT_MAX |
1206 DP_AUX_CH_CTL_RECEIVE_ERROR |
1207 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1208 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1209 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1211 if (intel_dig_port->tc_type == TC_PORT_TBT)
1212 ret |= DP_AUX_CH_CTL_TBT_IO;
1218 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1219 const u8 *send, int send_bytes,
1220 u8 *recv, int recv_size,
1221 u32 aux_send_ctl_flags)
1223 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1224 struct drm_i915_private *i915 =
1225 to_i915(intel_dig_port->base.base.dev);
1226 struct intel_uncore *uncore = &i915->uncore;
1227 i915_reg_t ch_ctl, ch_data[5];
1228 u32 aux_clock_divider;
1229 enum intel_display_power_domain aux_domain =
1230 intel_aux_power_domain(intel_dig_port);
1231 intel_wakeref_t aux_wakeref;
1232 intel_wakeref_t pps_wakeref;
1233 int i, ret, recv_bytes;
1238 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1239 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1240 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1242 aux_wakeref = intel_display_power_get(i915, aux_domain);
1243 pps_wakeref = pps_lock(intel_dp);
1246 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1247 * In such cases we want to leave VDD enabled and it's up to upper layers
1248 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1251 vdd = edp_panel_vdd_on(intel_dp);
1253 /* dp aux is extremely sensitive to irq latency, hence request the
1254 * lowest possible wakeup latency and so prevent the cpu from going into
1255 * deep sleep states.
1257 pm_qos_update_request(&i915->pm_qos, 0);
1259 intel_dp_check_edp(intel_dp);
1261 /* Try to wait for any previous AUX channel activity */
1262 for (try = 0; try < 3; try++) {
1263 status = intel_uncore_read_notrace(uncore, ch_ctl);
1264 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1268 /* just trace the final value */
1269 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1272 static u32 last_status = -1;
1273 const u32 status = intel_uncore_read(uncore, ch_ctl);
1275 if (status != last_status) {
1276 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1278 last_status = status;
1285 /* Only 5 data registers! */
1286 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1291 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1292 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1296 send_ctl |= aux_send_ctl_flags;
1298 /* Must try at least 3 times according to DP spec */
1299 for (try = 0; try < 5; try++) {
1300 /* Load the send data into the aux channel data registers */
1301 for (i = 0; i < send_bytes; i += 4)
1302 intel_uncore_write(uncore,
1304 intel_dp_pack_aux(send + i,
1307 /* Send the command and wait for it to complete */
1308 intel_uncore_write(uncore, ch_ctl, send_ctl);
1310 status = intel_dp_aux_wait_done(intel_dp);
1312 /* Clear done status and any errors */
1313 intel_uncore_write(uncore,
1316 DP_AUX_CH_CTL_DONE |
1317 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1318 DP_AUX_CH_CTL_RECEIVE_ERROR);
1320 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1321 * 400us delay required for errors and timeouts
1322 * Timeout errors from the HW already meet this
1323 * requirement so skip to next iteration
1325 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1328 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1329 usleep_range(400, 500);
1332 if (status & DP_AUX_CH_CTL_DONE)
1337 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1338 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1344 /* Check for timeout or receive error.
1345 * Timeouts occur when the sink is not connected
1347 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1348 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1353 /* Timeouts occur when the device isn't connected, so they're
1354 * "normal" -- don't fill the kernel log with these */
1355 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1356 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1361 /* Unload any bytes sent back from the other side */
1362 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1363 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1366 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1367 * We have no idea of what happened so we return -EBUSY so
1368 * drm layer takes care for the necessary retries.
1370 if (recv_bytes == 0 || recv_bytes > 20) {
1371 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1377 if (recv_bytes > recv_size)
1378 recv_bytes = recv_size;
1380 for (i = 0; i < recv_bytes; i += 4)
1381 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1382 recv + i, recv_bytes - i);
1386 pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1389 edp_panel_vdd_off(intel_dp, false);
1391 pps_unlock(intel_dp, pps_wakeref);
1392 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1397 #define BARE_ADDRESS_SIZE 3
1398 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1401 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1402 const struct drm_dp_aux_msg *msg)
1404 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1405 txbuf[1] = (msg->address >> 8) & 0xff;
1406 txbuf[2] = msg->address & 0xff;
1407 txbuf[3] = msg->size - 1;
1411 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1413 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1414 u8 txbuf[20], rxbuf[20];
1415 size_t txsize, rxsize;
1418 intel_dp_aux_header(txbuf, msg);
1420 switch (msg->request & ~DP_AUX_I2C_MOT) {
1421 case DP_AUX_NATIVE_WRITE:
1422 case DP_AUX_I2C_WRITE:
1423 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1424 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1425 rxsize = 2; /* 0 or 1 data bytes */
1427 if (WARN_ON(txsize > 20))
1430 WARN_ON(!msg->buffer != !msg->size);
1433 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1435 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1438 msg->reply = rxbuf[0] >> 4;
1441 /* Number of bytes written in a short write. */
1442 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1444 /* Return payload size. */
1450 case DP_AUX_NATIVE_READ:
1451 case DP_AUX_I2C_READ:
1452 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1453 rxsize = msg->size + 1;
1455 if (WARN_ON(rxsize > 20))
1458 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1461 msg->reply = rxbuf[0] >> 4;
1463 * Assume happy day, and copy the data. The caller is
1464 * expected to check msg->reply before touching it.
1466 * Return payload size.
1469 memcpy(msg->buffer, rxbuf + 1, ret);
1482 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1484 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1485 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1486 enum aux_ch aux_ch = dig_port->aux_ch;
1492 return DP_AUX_CH_CTL(aux_ch);
1494 MISSING_CASE(aux_ch);
1495 return DP_AUX_CH_CTL(AUX_CH_B);
1499 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1501 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1502 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1503 enum aux_ch aux_ch = dig_port->aux_ch;
1509 return DP_AUX_CH_DATA(aux_ch, index);
1511 MISSING_CASE(aux_ch);
1512 return DP_AUX_CH_DATA(AUX_CH_B, index);
1516 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1518 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1519 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1520 enum aux_ch aux_ch = dig_port->aux_ch;
1524 return DP_AUX_CH_CTL(aux_ch);
1528 return PCH_DP_AUX_CH_CTL(aux_ch);
1530 MISSING_CASE(aux_ch);
1531 return DP_AUX_CH_CTL(AUX_CH_A);
1535 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1537 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1538 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1539 enum aux_ch aux_ch = dig_port->aux_ch;
1543 return DP_AUX_CH_DATA(aux_ch, index);
1547 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1549 MISSING_CASE(aux_ch);
1550 return DP_AUX_CH_DATA(AUX_CH_A, index);
1554 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1556 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1557 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1558 enum aux_ch aux_ch = dig_port->aux_ch;
1567 return DP_AUX_CH_CTL(aux_ch);
1569 MISSING_CASE(aux_ch);
1570 return DP_AUX_CH_CTL(AUX_CH_A);
1574 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1576 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1577 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1578 enum aux_ch aux_ch = dig_port->aux_ch;
1587 return DP_AUX_CH_DATA(aux_ch, index);
1589 MISSING_CASE(aux_ch);
1590 return DP_AUX_CH_DATA(AUX_CH_A, index);
1595 intel_dp_aux_fini(struct intel_dp *intel_dp)
1597 kfree(intel_dp->aux.name);
1601 intel_dp_aux_init(struct intel_dp *intel_dp)
1603 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1604 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1605 struct intel_encoder *encoder = &dig_port->base;
1607 if (INTEL_GEN(dev_priv) >= 9) {
1608 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1609 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1610 } else if (HAS_PCH_SPLIT(dev_priv)) {
1611 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1612 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1614 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1615 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1618 if (INTEL_GEN(dev_priv) >= 9)
1619 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1620 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1621 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1622 else if (HAS_PCH_SPLIT(dev_priv))
1623 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1625 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1627 if (INTEL_GEN(dev_priv) >= 9)
1628 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1630 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1632 drm_dp_aux_init(&intel_dp->aux);
1634 /* Failure to allocate our preferred name is not critical */
1635 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1636 port_name(encoder->port));
1637 intel_dp->aux.transfer = intel_dp_aux_transfer;
1640 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1642 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1644 return max_rate >= 540000;
1647 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1649 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1651 return max_rate >= 810000;
1655 intel_dp_set_clock(struct intel_encoder *encoder,
1656 struct intel_crtc_state *pipe_config)
1658 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1659 const struct dp_link_dpll *divisor = NULL;
1662 if (IS_G4X(dev_priv)) {
1664 count = ARRAY_SIZE(g4x_dpll);
1665 } else if (HAS_PCH_SPLIT(dev_priv)) {
1667 count = ARRAY_SIZE(pch_dpll);
1668 } else if (IS_CHERRYVIEW(dev_priv)) {
1670 count = ARRAY_SIZE(chv_dpll);
1671 } else if (IS_VALLEYVIEW(dev_priv)) {
1673 count = ARRAY_SIZE(vlv_dpll);
1676 if (divisor && count) {
1677 for (i = 0; i < count; i++) {
1678 if (pipe_config->port_clock == divisor[i].clock) {
1679 pipe_config->dpll = divisor[i].dpll;
1680 pipe_config->clock_set = true;
1687 static void snprintf_int_array(char *str, size_t len,
1688 const int *array, int nelem)
1694 for (i = 0; i < nelem; i++) {
1695 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1703 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1705 char str[128]; /* FIXME: too big for stack? */
1707 if ((drm_debug & DRM_UT_KMS) == 0)
1710 snprintf_int_array(str, sizeof(str),
1711 intel_dp->source_rates, intel_dp->num_source_rates);
1712 DRM_DEBUG_KMS("source rates: %s\n", str);
1714 snprintf_int_array(str, sizeof(str),
1715 intel_dp->sink_rates, intel_dp->num_sink_rates);
1716 DRM_DEBUG_KMS("sink rates: %s\n", str);
1718 snprintf_int_array(str, sizeof(str),
1719 intel_dp->common_rates, intel_dp->num_common_rates);
1720 DRM_DEBUG_KMS("common rates: %s\n", str);
1724 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1728 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1729 if (WARN_ON(len <= 0))
1732 return intel_dp->common_rates[len - 1];
1735 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1737 int i = intel_dp_rate_index(intel_dp->sink_rates,
1738 intel_dp->num_sink_rates, rate);
1746 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1747 u8 *link_bw, u8 *rate_select)
1749 /* eDP 1.4 rate select method. */
1750 if (intel_dp->use_rate_select) {
1753 intel_dp_rate_select(intel_dp, port_clock);
1755 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1760 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1761 const struct intel_crtc_state *pipe_config)
1763 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1765 return INTEL_GEN(dev_priv) >= 11 &&
1766 pipe_config->cpu_transcoder != TRANSCODER_A;
1769 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1770 const struct intel_crtc_state *pipe_config)
1772 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1773 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1776 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1777 const struct intel_crtc_state *pipe_config)
1779 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1781 return INTEL_GEN(dev_priv) >= 10 &&
1782 pipe_config->cpu_transcoder != TRANSCODER_A;
1785 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1786 const struct intel_crtc_state *pipe_config)
1788 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1791 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1792 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1795 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1796 struct intel_crtc_state *pipe_config)
1798 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1799 struct intel_connector *intel_connector = intel_dp->attached_connector;
1802 bpp = pipe_config->pipe_bpp;
1803 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1806 bpp = min(bpp, 3*bpc);
1808 if (intel_dp_is_edp(intel_dp)) {
1809 /* Get bpp from vbt only for panels that dont have bpp in edid */
1810 if (intel_connector->base.display_info.bpc == 0 &&
1811 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1812 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1813 dev_priv->vbt.edp.bpp);
1814 bpp = dev_priv->vbt.edp.bpp;
1821 /* Adjust link config limits based on compliance test requests. */
1823 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1824 struct intel_crtc_state *pipe_config,
1825 struct link_config_limits *limits)
1827 /* For DP Compliance we override the computed bpp for the pipe */
1828 if (intel_dp->compliance.test_data.bpc != 0) {
1829 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1831 limits->min_bpp = limits->max_bpp = bpp;
1832 pipe_config->dither_force_disable = bpp == 6 * 3;
1834 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1837 /* Use values requested by Compliance Test Request */
1838 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1841 /* Validate the compliance test data since max values
1842 * might have changed due to link train fallback.
1844 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1845 intel_dp->compliance.test_lane_count)) {
1846 index = intel_dp_rate_index(intel_dp->common_rates,
1847 intel_dp->num_common_rates,
1848 intel_dp->compliance.test_link_rate);
1850 limits->min_clock = limits->max_clock = index;
1851 limits->min_lane_count = limits->max_lane_count =
1852 intel_dp->compliance.test_lane_count;
1857 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1860 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1861 * format of the number of bytes per pixel will be half the number
1862 * of bytes of RGB pixel.
1864 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1870 /* Optimize link config in order: max bpp, min clock, min lanes */
1872 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1873 struct intel_crtc_state *pipe_config,
1874 const struct link_config_limits *limits)
1876 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1877 int bpp, clock, lane_count;
1878 int mode_rate, link_clock, link_avail;
1880 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1881 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1884 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1885 for (lane_count = limits->min_lane_count;
1886 lane_count <= limits->max_lane_count;
1888 link_clock = intel_dp->common_rates[clock];
1889 link_avail = intel_dp_max_data_rate(link_clock,
1892 if (mode_rate <= link_avail) {
1893 pipe_config->lane_count = lane_count;
1894 pipe_config->pipe_bpp = bpp;
1895 pipe_config->port_clock = link_clock;
1906 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1909 u8 dsc_bpc[3] = {0};
1911 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1913 for (i = 0; i < num_bpc; i++) {
1914 if (dsc_max_bpc >= dsc_bpc[i])
1915 return dsc_bpc[i] * 3;
1921 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1922 struct intel_crtc_state *pipe_config,
1923 struct drm_connector_state *conn_state,
1924 struct link_config_limits *limits)
1926 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1927 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1928 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1933 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1934 intel_dp_supports_fec(intel_dp, pipe_config);
1936 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1939 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1940 conn_state->max_requested_bpc);
1942 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1943 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1944 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1949 * For now enable DSC for max bpp, max link rate, max lane count.
1950 * Optimize this later for the minimum possible link rate/lane count
1951 * with DSC enabled for the requested mode.
1953 pipe_config->pipe_bpp = pipe_bpp;
1954 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1955 pipe_config->lane_count = limits->max_lane_count;
1957 if (intel_dp_is_edp(intel_dp)) {
1958 pipe_config->dsc_params.compressed_bpp =
1959 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1960 pipe_config->pipe_bpp);
1961 pipe_config->dsc_params.slice_count =
1962 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1965 u16 dsc_max_output_bpp;
1966 u8 dsc_dp_slice_count;
1968 dsc_max_output_bpp =
1969 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1970 pipe_config->lane_count,
1971 adjusted_mode->crtc_clock,
1972 adjusted_mode->crtc_hdisplay);
1973 dsc_dp_slice_count =
1974 intel_dp_dsc_get_slice_count(intel_dp,
1975 adjusted_mode->crtc_clock,
1976 adjusted_mode->crtc_hdisplay);
1977 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1978 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1981 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1982 dsc_max_output_bpp >> 4,
1983 pipe_config->pipe_bpp);
1984 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1987 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1988 * is greater than the maximum Cdclock and if slice count is even
1989 * then we need to use 2 VDSC instances.
1991 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1992 if (pipe_config->dsc_params.slice_count > 1) {
1993 pipe_config->dsc_params.dsc_split = true;
1995 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
2000 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
2002 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
2003 "Compressed BPP = %d\n",
2004 pipe_config->pipe_bpp,
2005 pipe_config->dsc_params.compressed_bpp);
2009 pipe_config->dsc_params.compression_enable = true;
2010 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2011 "Compressed Bpp = %d Slice Count = %d\n",
2012 pipe_config->pipe_bpp,
2013 pipe_config->dsc_params.compressed_bpp,
2014 pipe_config->dsc_params.slice_count);
2019 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2021 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2028 intel_dp_compute_link_config(struct intel_encoder *encoder,
2029 struct intel_crtc_state *pipe_config,
2030 struct drm_connector_state *conn_state)
2032 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2033 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2034 struct link_config_limits limits;
2038 common_len = intel_dp_common_len_rate_limit(intel_dp,
2039 intel_dp->max_link_rate);
2041 /* No common link rates between source and sink */
2042 WARN_ON(common_len <= 0);
2044 limits.min_clock = 0;
2045 limits.max_clock = common_len - 1;
2047 limits.min_lane_count = 1;
2048 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2050 limits.min_bpp = intel_dp_min_bpp(pipe_config);
2051 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2053 if (intel_dp_is_edp(intel_dp)) {
2055 * Use the maximum clock and number of lanes the eDP panel
2056 * advertizes being capable of. The panels are generally
2057 * designed to support only a single clock and lane
2058 * configuration, and typically these values correspond to the
2059 * native resolution of the panel.
2061 limits.min_lane_count = limits.max_lane_count;
2062 limits.min_clock = limits.max_clock;
2065 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2067 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2068 "max rate %d max bpp %d pixel clock %iKHz\n",
2069 limits.max_lane_count,
2070 intel_dp->common_rates[limits.max_clock],
2071 limits.max_bpp, adjusted_mode->crtc_clock);
2074 * Optimize for slow and wide. This is the place to add alternative
2075 * optimization policy.
2077 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2079 /* enable compression if the mode doesn't fit available BW */
2080 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2081 if (ret || intel_dp->force_dsc_en) {
2082 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2083 conn_state, &limits);
2088 if (pipe_config->dsc_params.compression_enable) {
2089 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2090 pipe_config->lane_count, pipe_config->port_clock,
2091 pipe_config->pipe_bpp,
2092 pipe_config->dsc_params.compressed_bpp);
2094 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2095 intel_dp_link_required(adjusted_mode->crtc_clock,
2096 pipe_config->dsc_params.compressed_bpp),
2097 intel_dp_max_data_rate(pipe_config->port_clock,
2098 pipe_config->lane_count));
2100 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2101 pipe_config->lane_count, pipe_config->port_clock,
2102 pipe_config->pipe_bpp);
2104 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2105 intel_dp_link_required(adjusted_mode->crtc_clock,
2106 pipe_config->pipe_bpp),
2107 intel_dp_max_data_rate(pipe_config->port_clock,
2108 pipe_config->lane_count));
2114 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2115 struct drm_connector *connector,
2116 struct intel_crtc_state *crtc_state)
2118 const struct drm_display_info *info = &connector->display_info;
2119 const struct drm_display_mode *adjusted_mode =
2120 &crtc_state->base.adjusted_mode;
2121 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2124 if (!drm_mode_is_420_only(info, adjusted_mode) ||
2125 !intel_dp_get_colorimetry_status(intel_dp) ||
2126 !connector->ycbcr_420_allowed)
2129 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2131 /* YCBCR 420 output conversion needs a scaler */
2132 ret = skl_update_scaler_crtc(crtc_state);
2134 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2138 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2143 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2144 const struct drm_connector_state *conn_state)
2146 const struct intel_digital_connector_state *intel_conn_state =
2147 to_intel_digital_connector_state(conn_state);
2148 const struct drm_display_mode *adjusted_mode =
2149 &crtc_state->base.adjusted_mode;
2151 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2154 * CEA-861-E - 5.1 Default Encoding Parameters
2155 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2157 return crtc_state->pipe_bpp != 18 &&
2158 drm_default_rgb_quant_range(adjusted_mode) ==
2159 HDMI_QUANTIZATION_RANGE_LIMITED;
2161 return intel_conn_state->broadcast_rgb ==
2162 INTEL_BROADCAST_RGB_LIMITED;
2167 intel_dp_compute_config(struct intel_encoder *encoder,
2168 struct intel_crtc_state *pipe_config,
2169 struct drm_connector_state *conn_state)
2171 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2172 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2173 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2174 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2175 enum port port = encoder->port;
2176 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2177 struct intel_connector *intel_connector = intel_dp->attached_connector;
2178 struct intel_digital_connector_state *intel_conn_state =
2179 to_intel_digital_connector_state(conn_state);
2180 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2181 DP_DPCD_QUIRK_CONSTANT_N);
2182 int ret = 0, output_bpp;
2184 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2185 pipe_config->has_pch_encoder = true;
2187 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2189 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2191 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2197 pipe_config->has_drrs = false;
2198 if (IS_G4X(dev_priv) || port == PORT_A)
2199 pipe_config->has_audio = false;
2200 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2201 pipe_config->has_audio = intel_dp->has_audio;
2203 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2205 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2206 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2209 if (INTEL_GEN(dev_priv) >= 9) {
2210 ret = skl_update_scaler_crtc(pipe_config);
2215 if (HAS_GMCH(dev_priv))
2216 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2217 conn_state->scaling_mode);
2219 intel_pch_panel_fitting(intel_crtc, pipe_config,
2220 conn_state->scaling_mode);
2223 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2226 if (HAS_GMCH(dev_priv) &&
2227 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2230 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2233 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2237 pipe_config->limited_color_range =
2238 intel_dp_limited_color_range(pipe_config, conn_state);
2240 if (pipe_config->dsc_params.compression_enable)
2241 output_bpp = pipe_config->dsc_params.compressed_bpp;
2243 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2245 intel_link_compute_m_n(output_bpp,
2246 pipe_config->lane_count,
2247 adjusted_mode->crtc_clock,
2248 pipe_config->port_clock,
2249 &pipe_config->dp_m_n,
2252 if (intel_connector->panel.downclock_mode != NULL &&
2253 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2254 pipe_config->has_drrs = true;
2255 intel_link_compute_m_n(output_bpp,
2256 pipe_config->lane_count,
2257 intel_connector->panel.downclock_mode->clock,
2258 pipe_config->port_clock,
2259 &pipe_config->dp_m2_n2,
2263 if (!HAS_DDI(dev_priv))
2264 intel_dp_set_clock(encoder, pipe_config);
2266 intel_psr_compute_config(intel_dp, pipe_config);
2271 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2272 int link_rate, u8 lane_count,
2275 intel_dp->link_trained = false;
2276 intel_dp->link_rate = link_rate;
2277 intel_dp->lane_count = lane_count;
2278 intel_dp->link_mst = link_mst;
2281 static void intel_dp_prepare(struct intel_encoder *encoder,
2282 const struct intel_crtc_state *pipe_config)
2284 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2285 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2286 enum port port = encoder->port;
2287 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2288 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2290 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2291 pipe_config->lane_count,
2292 intel_crtc_has_type(pipe_config,
2293 INTEL_OUTPUT_DP_MST));
2296 * There are four kinds of DP registers:
2303 * IBX PCH and CPU are the same for almost everything,
2304 * except that the CPU DP PLL is configured in this
2307 * CPT PCH is quite different, having many bits moved
2308 * to the TRANS_DP_CTL register instead. That
2309 * configuration happens (oddly) in ironlake_pch_enable
2312 /* Preserve the BIOS-computed detected bit. This is
2313 * supposed to be read-only.
2315 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2317 /* Handle DP bits in common between all three register formats */
2318 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2319 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2321 /* Split out the IBX/CPU vs CPT settings */
2323 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2324 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2325 intel_dp->DP |= DP_SYNC_HS_HIGH;
2326 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2327 intel_dp->DP |= DP_SYNC_VS_HIGH;
2328 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2330 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2331 intel_dp->DP |= DP_ENHANCED_FRAMING;
2333 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2334 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2337 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2339 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2340 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2341 trans_dp |= TRANS_DP_ENH_FRAMING;
2343 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2344 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2346 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2347 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2349 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2350 intel_dp->DP |= DP_SYNC_HS_HIGH;
2351 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2352 intel_dp->DP |= DP_SYNC_VS_HIGH;
2353 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2355 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2356 intel_dp->DP |= DP_ENHANCED_FRAMING;
2358 if (IS_CHERRYVIEW(dev_priv))
2359 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2361 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2365 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2366 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2368 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2369 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2371 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2372 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2374 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2376 static void wait_panel_status(struct intel_dp *intel_dp,
2380 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2381 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2383 lockdep_assert_held(&dev_priv->pps_mutex);
2385 intel_pps_verify_state(intel_dp);
2387 pp_stat_reg = _pp_stat_reg(intel_dp);
2388 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2390 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2392 I915_READ(pp_stat_reg),
2393 I915_READ(pp_ctrl_reg));
2395 if (intel_wait_for_register(&dev_priv->uncore,
2396 pp_stat_reg, mask, value,
2398 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2399 I915_READ(pp_stat_reg),
2400 I915_READ(pp_ctrl_reg));
2402 DRM_DEBUG_KMS("Wait complete\n");
2405 static void wait_panel_on(struct intel_dp *intel_dp)
2407 DRM_DEBUG_KMS("Wait for panel power on\n");
2408 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2411 static void wait_panel_off(struct intel_dp *intel_dp)
2413 DRM_DEBUG_KMS("Wait for panel power off time\n");
2414 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2417 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2419 ktime_t panel_power_on_time;
2420 s64 panel_power_off_duration;
2422 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2424 /* take the difference of currrent time and panel power off time
2425 * and then make panel wait for t11_t12 if needed. */
2426 panel_power_on_time = ktime_get_boottime();
2427 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2429 /* When we disable the VDD override bit last we have to do the manual
2431 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2432 wait_remaining_ms_from_jiffies(jiffies,
2433 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2435 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2438 static void wait_backlight_on(struct intel_dp *intel_dp)
2440 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2441 intel_dp->backlight_on_delay);
2444 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2446 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2447 intel_dp->backlight_off_delay);
2450 /* Read the current pp_control value, unlocking the register if it
2454 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2456 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2459 lockdep_assert_held(&dev_priv->pps_mutex);
2461 control = I915_READ(_pp_ctrl_reg(intel_dp));
2462 if (WARN_ON(!HAS_DDI(dev_priv) &&
2463 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2464 control &= ~PANEL_UNLOCK_MASK;
2465 control |= PANEL_UNLOCK_REGS;
2471 * Must be paired with edp_panel_vdd_off().
2472 * Must hold pps_mutex around the whole on/off sequence.
2473 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2475 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2477 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2478 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2480 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2481 bool need_to_disable = !intel_dp->want_panel_vdd;
2483 lockdep_assert_held(&dev_priv->pps_mutex);
2485 if (!intel_dp_is_edp(intel_dp))
2488 cancel_delayed_work(&intel_dp->panel_vdd_work);
2489 intel_dp->want_panel_vdd = true;
2491 if (edp_have_panel_vdd(intel_dp))
2492 return need_to_disable;
2494 intel_display_power_get(dev_priv,
2495 intel_aux_power_domain(intel_dig_port));
2497 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2498 port_name(intel_dig_port->base.port));
2500 if (!edp_have_panel_power(intel_dp))
2501 wait_panel_power_cycle(intel_dp);
2503 pp = ironlake_get_pp_control(intel_dp);
2504 pp |= EDP_FORCE_VDD;
2506 pp_stat_reg = _pp_stat_reg(intel_dp);
2507 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2509 I915_WRITE(pp_ctrl_reg, pp);
2510 POSTING_READ(pp_ctrl_reg);
2511 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2512 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2514 * If the panel wasn't on, delay before accessing aux channel
2516 if (!edp_have_panel_power(intel_dp)) {
2517 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2518 port_name(intel_dig_port->base.port));
2519 msleep(intel_dp->panel_power_up_delay);
2522 return need_to_disable;
2526 * Must be paired with intel_edp_panel_vdd_off() or
2527 * intel_edp_panel_off().
2528 * Nested calls to these functions are not allowed since
2529 * we drop the lock. Caller must use some higher level
2530 * locking to prevent nested calls from other threads.
2532 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2534 intel_wakeref_t wakeref;
2537 if (!intel_dp_is_edp(intel_dp))
2541 with_pps_lock(intel_dp, wakeref)
2542 vdd = edp_panel_vdd_on(intel_dp);
2543 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2544 port_name(dp_to_dig_port(intel_dp)->base.port));
2547 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2549 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2550 struct intel_digital_port *intel_dig_port =
2551 dp_to_dig_port(intel_dp);
2553 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2555 lockdep_assert_held(&dev_priv->pps_mutex);
2557 WARN_ON(intel_dp->want_panel_vdd);
2559 if (!edp_have_panel_vdd(intel_dp))
2562 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2563 port_name(intel_dig_port->base.port));
2565 pp = ironlake_get_pp_control(intel_dp);
2566 pp &= ~EDP_FORCE_VDD;
2568 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2569 pp_stat_reg = _pp_stat_reg(intel_dp);
2571 I915_WRITE(pp_ctrl_reg, pp);
2572 POSTING_READ(pp_ctrl_reg);
2574 /* Make sure sequencer is idle before allowing subsequent activity */
2575 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2576 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2578 if ((pp & PANEL_POWER_ON) == 0)
2579 intel_dp->panel_power_off_time = ktime_get_boottime();
2581 intel_display_power_put_unchecked(dev_priv,
2582 intel_aux_power_domain(intel_dig_port));
2585 static void edp_panel_vdd_work(struct work_struct *__work)
2587 struct intel_dp *intel_dp =
2588 container_of(to_delayed_work(__work),
2589 struct intel_dp, panel_vdd_work);
2590 intel_wakeref_t wakeref;
2592 with_pps_lock(intel_dp, wakeref) {
2593 if (!intel_dp->want_panel_vdd)
2594 edp_panel_vdd_off_sync(intel_dp);
2598 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2600 unsigned long delay;
2603 * Queue the timer to fire a long time from now (relative to the power
2604 * down delay) to keep the panel power up across a sequence of
2607 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2608 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2612 * Must be paired with edp_panel_vdd_on().
2613 * Must hold pps_mutex around the whole on/off sequence.
2614 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2616 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2618 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2620 lockdep_assert_held(&dev_priv->pps_mutex);
2622 if (!intel_dp_is_edp(intel_dp))
2625 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2626 port_name(dp_to_dig_port(intel_dp)->base.port));
2628 intel_dp->want_panel_vdd = false;
2631 edp_panel_vdd_off_sync(intel_dp);
2633 edp_panel_vdd_schedule_off(intel_dp);
2636 static void edp_panel_on(struct intel_dp *intel_dp)
2638 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2640 i915_reg_t pp_ctrl_reg;
2642 lockdep_assert_held(&dev_priv->pps_mutex);
2644 if (!intel_dp_is_edp(intel_dp))
2647 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2648 port_name(dp_to_dig_port(intel_dp)->base.port));
2650 if (WARN(edp_have_panel_power(intel_dp),
2651 "eDP port %c panel power already on\n",
2652 port_name(dp_to_dig_port(intel_dp)->base.port)))
2655 wait_panel_power_cycle(intel_dp);
2657 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2658 pp = ironlake_get_pp_control(intel_dp);
2659 if (IS_GEN(dev_priv, 5)) {
2660 /* ILK workaround: disable reset around power sequence */
2661 pp &= ~PANEL_POWER_RESET;
2662 I915_WRITE(pp_ctrl_reg, pp);
2663 POSTING_READ(pp_ctrl_reg);
2666 pp |= PANEL_POWER_ON;
2667 if (!IS_GEN(dev_priv, 5))
2668 pp |= PANEL_POWER_RESET;
2670 I915_WRITE(pp_ctrl_reg, pp);
2671 POSTING_READ(pp_ctrl_reg);
2673 wait_panel_on(intel_dp);
2674 intel_dp->last_power_on = jiffies;
2676 if (IS_GEN(dev_priv, 5)) {
2677 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2678 I915_WRITE(pp_ctrl_reg, pp);
2679 POSTING_READ(pp_ctrl_reg);
2683 void intel_edp_panel_on(struct intel_dp *intel_dp)
2685 intel_wakeref_t wakeref;
2687 if (!intel_dp_is_edp(intel_dp))
2690 with_pps_lock(intel_dp, wakeref)
2691 edp_panel_on(intel_dp);
2695 static void edp_panel_off(struct intel_dp *intel_dp)
2697 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2698 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2700 i915_reg_t pp_ctrl_reg;
2702 lockdep_assert_held(&dev_priv->pps_mutex);
2704 if (!intel_dp_is_edp(intel_dp))
2707 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2708 port_name(dig_port->base.port));
2710 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2711 port_name(dig_port->base.port));
2713 pp = ironlake_get_pp_control(intel_dp);
2714 /* We need to switch off panel power _and_ force vdd, for otherwise some
2715 * panels get very unhappy and cease to work. */
2716 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2719 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2721 intel_dp->want_panel_vdd = false;
2723 I915_WRITE(pp_ctrl_reg, pp);
2724 POSTING_READ(pp_ctrl_reg);
2726 wait_panel_off(intel_dp);
2727 intel_dp->panel_power_off_time = ktime_get_boottime();
2729 /* We got a reference when we enabled the VDD. */
2730 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2733 void intel_edp_panel_off(struct intel_dp *intel_dp)
2735 intel_wakeref_t wakeref;
2737 if (!intel_dp_is_edp(intel_dp))
2740 with_pps_lock(intel_dp, wakeref)
2741 edp_panel_off(intel_dp);
2744 /* Enable backlight in the panel power control. */
2745 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2747 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2748 intel_wakeref_t wakeref;
2751 * If we enable the backlight right away following a panel power
2752 * on, we may see slight flicker as the panel syncs with the eDP
2753 * link. So delay a bit to make sure the image is solid before
2754 * allowing it to appear.
2756 wait_backlight_on(intel_dp);
2758 with_pps_lock(intel_dp, wakeref) {
2759 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2762 pp = ironlake_get_pp_control(intel_dp);
2763 pp |= EDP_BLC_ENABLE;
2765 I915_WRITE(pp_ctrl_reg, pp);
2766 POSTING_READ(pp_ctrl_reg);
2770 /* Enable backlight PWM and backlight PP control. */
2771 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2772 const struct drm_connector_state *conn_state)
2774 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2776 if (!intel_dp_is_edp(intel_dp))
2779 DRM_DEBUG_KMS("\n");
2781 intel_panel_enable_backlight(crtc_state, conn_state);
2782 _intel_edp_backlight_on(intel_dp);
2785 /* Disable backlight in the panel power control. */
2786 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2788 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2789 intel_wakeref_t wakeref;
2791 if (!intel_dp_is_edp(intel_dp))
2794 with_pps_lock(intel_dp, wakeref) {
2795 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2798 pp = ironlake_get_pp_control(intel_dp);
2799 pp &= ~EDP_BLC_ENABLE;
2801 I915_WRITE(pp_ctrl_reg, pp);
2802 POSTING_READ(pp_ctrl_reg);
2805 intel_dp->last_backlight_off = jiffies;
2806 edp_wait_backlight_off(intel_dp);
2809 /* Disable backlight PP control and backlight PWM. */
2810 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2812 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2814 if (!intel_dp_is_edp(intel_dp))
2817 DRM_DEBUG_KMS("\n");
2819 _intel_edp_backlight_off(intel_dp);
2820 intel_panel_disable_backlight(old_conn_state);
2824 * Hook for controlling the panel power control backlight through the bl_power
2825 * sysfs attribute. Take care to handle multiple calls.
2827 static void intel_edp_backlight_power(struct intel_connector *connector,
2830 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2831 intel_wakeref_t wakeref;
2835 with_pps_lock(intel_dp, wakeref)
2836 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2837 if (is_enabled == enable)
2840 DRM_DEBUG_KMS("panel power control backlight %s\n",
2841 enable ? "enable" : "disable");
2844 _intel_edp_backlight_on(intel_dp);
2846 _intel_edp_backlight_off(intel_dp);
2849 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2851 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2852 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2853 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2855 I915_STATE_WARN(cur_state != state,
2856 "DP port %c state assertion failure (expected %s, current %s)\n",
2857 port_name(dig_port->base.port),
2858 onoff(state), onoff(cur_state));
2860 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2862 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2864 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2866 I915_STATE_WARN(cur_state != state,
2867 "eDP PLL state assertion failure (expected %s, current %s)\n",
2868 onoff(state), onoff(cur_state));
2870 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2871 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2873 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2874 const struct intel_crtc_state *pipe_config)
2876 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2877 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2879 assert_pipe_disabled(dev_priv, crtc->pipe);
2880 assert_dp_port_disabled(intel_dp);
2881 assert_edp_pll_disabled(dev_priv);
2883 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2884 pipe_config->port_clock);
2886 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2888 if (pipe_config->port_clock == 162000)
2889 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2891 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2893 I915_WRITE(DP_A, intel_dp->DP);
2898 * [DevILK] Work around required when enabling DP PLL
2899 * while a pipe is enabled going to FDI:
2900 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2901 * 2. Program DP PLL enable
2903 if (IS_GEN(dev_priv, 5))
2904 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2906 intel_dp->DP |= DP_PLL_ENABLE;
2908 I915_WRITE(DP_A, intel_dp->DP);
2913 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2914 const struct intel_crtc_state *old_crtc_state)
2916 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2917 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2919 assert_pipe_disabled(dev_priv, crtc->pipe);
2920 assert_dp_port_disabled(intel_dp);
2921 assert_edp_pll_enabled(dev_priv);
2923 DRM_DEBUG_KMS("disabling eDP PLL\n");
2925 intel_dp->DP &= ~DP_PLL_ENABLE;
2927 I915_WRITE(DP_A, intel_dp->DP);
2932 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2935 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2936 * be capable of signalling downstream hpd with a long pulse.
2937 * Whether or not that means D3 is safe to use is not clear,
2938 * but let's assume so until proven otherwise.
2940 * FIXME should really check all downstream ports...
2942 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2943 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2944 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2947 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2948 const struct intel_crtc_state *crtc_state,
2953 if (!crtc_state->dsc_params.compression_enable)
2956 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2957 enable ? DP_DECOMPRESSION_EN : 0);
2959 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2960 enable ? "enable" : "disable");
2963 /* If the sink supports it, try to set the power state appropriately */
2964 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2968 /* Should have a valid DPCD by this point */
2969 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2972 if (mode != DRM_MODE_DPMS_ON) {
2973 if (downstream_hpd_needs_d0(intel_dp))
2976 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2979 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2982 * When turning on, we need to retry for 1ms to give the sink
2985 for (i = 0; i < 3; i++) {
2986 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2993 if (ret == 1 && lspcon->active)
2994 lspcon_wait_pcon_mode(lspcon);
2998 DRM_DEBUG_KMS("failed to %s sink power state\n",
2999 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3002 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3003 enum port port, enum pipe *pipe)
3007 for_each_pipe(dev_priv, p) {
3008 u32 val = I915_READ(TRANS_DP_CTL(p));
3010 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3016 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
3018 /* must initialize pipe to something for the asserts */
3024 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3025 i915_reg_t dp_reg, enum port port,
3031 val = I915_READ(dp_reg);
3033 ret = val & DP_PORT_EN;
3035 /* asserts want to know the pipe even if the port is disabled */
3036 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3037 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3038 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3039 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3040 else if (IS_CHERRYVIEW(dev_priv))
3041 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3043 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3048 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3051 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3052 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3053 intel_wakeref_t wakeref;
3056 wakeref = intel_display_power_get_if_enabled(dev_priv,
3057 encoder->power_domain);
3061 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3062 encoder->port, pipe);
3064 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3069 static void intel_dp_get_config(struct intel_encoder *encoder,
3070 struct intel_crtc_state *pipe_config)
3072 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3073 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3075 enum port port = encoder->port;
3076 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3078 if (encoder->type == INTEL_OUTPUT_EDP)
3079 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3081 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3083 tmp = I915_READ(intel_dp->output_reg);
3085 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3087 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3088 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3090 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3091 flags |= DRM_MODE_FLAG_PHSYNC;
3093 flags |= DRM_MODE_FLAG_NHSYNC;
3095 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3096 flags |= DRM_MODE_FLAG_PVSYNC;
3098 flags |= DRM_MODE_FLAG_NVSYNC;
3100 if (tmp & DP_SYNC_HS_HIGH)
3101 flags |= DRM_MODE_FLAG_PHSYNC;
3103 flags |= DRM_MODE_FLAG_NHSYNC;
3105 if (tmp & DP_SYNC_VS_HIGH)
3106 flags |= DRM_MODE_FLAG_PVSYNC;
3108 flags |= DRM_MODE_FLAG_NVSYNC;
3111 pipe_config->base.adjusted_mode.flags |= flags;
3113 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3114 pipe_config->limited_color_range = true;
3116 pipe_config->lane_count =
3117 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3119 intel_dp_get_m_n(crtc, pipe_config);
3121 if (port == PORT_A) {
3122 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3123 pipe_config->port_clock = 162000;
3125 pipe_config->port_clock = 270000;
3128 pipe_config->base.adjusted_mode.crtc_clock =
3129 intel_dotclock_calculate(pipe_config->port_clock,
3130 &pipe_config->dp_m_n);
3132 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3133 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3135 * This is a big fat ugly hack.
3137 * Some machines in UEFI boot mode provide us a VBT that has 18
3138 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3139 * unknown we fail to light up. Yet the same BIOS boots up with
3140 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3141 * max, not what it tells us to use.
3143 * Note: This will still be broken if the eDP panel is not lit
3144 * up by the BIOS, and thus we can't get the mode at module
3147 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3148 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3149 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3153 static void intel_disable_dp(struct intel_encoder *encoder,
3154 const struct intel_crtc_state *old_crtc_state,
3155 const struct drm_connector_state *old_conn_state)
3157 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3159 intel_dp->link_trained = false;
3161 if (old_crtc_state->has_audio)
3162 intel_audio_codec_disable(encoder,
3163 old_crtc_state, old_conn_state);
3165 /* Make sure the panel is off before trying to change the mode. But also
3166 * ensure that we have vdd while we switch off the panel. */
3167 intel_edp_panel_vdd_on(intel_dp);
3168 intel_edp_backlight_off(old_conn_state);
3169 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3170 intel_edp_panel_off(intel_dp);
3173 static void g4x_disable_dp(struct intel_encoder *encoder,
3174 const struct intel_crtc_state *old_crtc_state,
3175 const struct drm_connector_state *old_conn_state)
3177 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3180 static void vlv_disable_dp(struct intel_encoder *encoder,
3181 const struct intel_crtc_state *old_crtc_state,
3182 const struct drm_connector_state *old_conn_state)
3184 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3187 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3188 const struct intel_crtc_state *old_crtc_state,
3189 const struct drm_connector_state *old_conn_state)
3191 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3192 enum port port = encoder->port;
3195 * Bspec does not list a specific disable sequence for g4x DP.
3196 * Follow the ilk+ sequence (disable pipe before the port) for
3197 * g4x DP as it does not suffer from underruns like the normal
3198 * g4x modeset sequence (disable pipe after the port).
3200 intel_dp_link_down(encoder, old_crtc_state);
3202 /* Only ilk+ has port A */
3204 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3207 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3208 const struct intel_crtc_state *old_crtc_state,
3209 const struct drm_connector_state *old_conn_state)
3211 intel_dp_link_down(encoder, old_crtc_state);
3214 static void chv_post_disable_dp(struct intel_encoder *encoder,
3215 const struct intel_crtc_state *old_crtc_state,
3216 const struct drm_connector_state *old_conn_state)
3218 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3220 intel_dp_link_down(encoder, old_crtc_state);
3222 vlv_dpio_get(dev_priv);
3224 /* Assert data lane reset */
3225 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3227 vlv_dpio_put(dev_priv);
3231 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3235 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3236 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3237 enum port port = intel_dig_port->base.port;
3238 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3240 if (dp_train_pat & train_pat_mask)
3241 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3242 dp_train_pat & train_pat_mask);
3244 if (HAS_DDI(dev_priv)) {
3245 u32 temp = I915_READ(DP_TP_CTL(port));
3247 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3248 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3250 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3252 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3253 switch (dp_train_pat & train_pat_mask) {
3254 case DP_TRAINING_PATTERN_DISABLE:
3255 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3258 case DP_TRAINING_PATTERN_1:
3259 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3261 case DP_TRAINING_PATTERN_2:
3262 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3264 case DP_TRAINING_PATTERN_3:
3265 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3267 case DP_TRAINING_PATTERN_4:
3268 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3271 I915_WRITE(DP_TP_CTL(port), temp);
3273 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3274 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3275 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3277 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3278 case DP_TRAINING_PATTERN_DISABLE:
3279 *DP |= DP_LINK_TRAIN_OFF_CPT;
3281 case DP_TRAINING_PATTERN_1:
3282 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3284 case DP_TRAINING_PATTERN_2:
3285 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3287 case DP_TRAINING_PATTERN_3:
3288 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3289 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3294 *DP &= ~DP_LINK_TRAIN_MASK;
3296 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3297 case DP_TRAINING_PATTERN_DISABLE:
3298 *DP |= DP_LINK_TRAIN_OFF;
3300 case DP_TRAINING_PATTERN_1:
3301 *DP |= DP_LINK_TRAIN_PAT_1;
3303 case DP_TRAINING_PATTERN_2:
3304 *DP |= DP_LINK_TRAIN_PAT_2;
3306 case DP_TRAINING_PATTERN_3:
3307 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3308 *DP |= DP_LINK_TRAIN_PAT_2;
3314 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3315 const struct intel_crtc_state *old_crtc_state)
3317 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3319 /* enable with pattern 1 (as per spec) */
3321 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3324 * Magic for VLV/CHV. We _must_ first set up the register
3325 * without actually enabling the port, and then do another
3326 * write to enable the port. Otherwise link training will
3327 * fail when the power sequencer is freshly used for this port.
3329 intel_dp->DP |= DP_PORT_EN;
3330 if (old_crtc_state->has_audio)
3331 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3333 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3334 POSTING_READ(intel_dp->output_reg);
3337 static void intel_enable_dp(struct intel_encoder *encoder,
3338 const struct intel_crtc_state *pipe_config,
3339 const struct drm_connector_state *conn_state)
3341 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3342 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3343 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3344 u32 dp_reg = I915_READ(intel_dp->output_reg);
3345 enum pipe pipe = crtc->pipe;
3346 intel_wakeref_t wakeref;
3348 if (WARN_ON(dp_reg & DP_PORT_EN))
3351 with_pps_lock(intel_dp, wakeref) {
3352 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3353 vlv_init_panel_power_sequencer(encoder, pipe_config);
3355 intel_dp_enable_port(intel_dp, pipe_config);
3357 edp_panel_vdd_on(intel_dp);
3358 edp_panel_on(intel_dp);
3359 edp_panel_vdd_off(intel_dp, true);
3362 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3363 unsigned int lane_mask = 0x0;
3365 if (IS_CHERRYVIEW(dev_priv))
3366 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3368 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3372 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3373 intel_dp_start_link_train(intel_dp);
3374 intel_dp_stop_link_train(intel_dp);
3376 if (pipe_config->has_audio) {
3377 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3379 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3383 static void g4x_enable_dp(struct intel_encoder *encoder,
3384 const struct intel_crtc_state *pipe_config,
3385 const struct drm_connector_state *conn_state)
3387 intel_enable_dp(encoder, pipe_config, conn_state);
3388 intel_edp_backlight_on(pipe_config, conn_state);
3391 static void vlv_enable_dp(struct intel_encoder *encoder,
3392 const struct intel_crtc_state *pipe_config,
3393 const struct drm_connector_state *conn_state)
3395 intel_edp_backlight_on(pipe_config, conn_state);
3398 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3399 const struct intel_crtc_state *pipe_config,
3400 const struct drm_connector_state *conn_state)
3402 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3403 enum port port = encoder->port;
3405 intel_dp_prepare(encoder, pipe_config);
3407 /* Only ilk+ has port A */
3409 ironlake_edp_pll_on(intel_dp, pipe_config);
3412 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3414 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3415 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3416 enum pipe pipe = intel_dp->pps_pipe;
3417 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3419 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3421 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3424 edp_panel_vdd_off_sync(intel_dp);
3427 * VLV seems to get confused when multiple power sequencers
3428 * have the same port selected (even if only one has power/vdd
3429 * enabled). The failure manifests as vlv_wait_port_ready() failing
3430 * CHV on the other hand doesn't seem to mind having the same port
3431 * selected in multiple power sequencers, but let's clear the
3432 * port select always when logically disconnecting a power sequencer
3435 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3436 pipe_name(pipe), port_name(intel_dig_port->base.port));
3437 I915_WRITE(pp_on_reg, 0);
3438 POSTING_READ(pp_on_reg);
3440 intel_dp->pps_pipe = INVALID_PIPE;
3443 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3446 struct intel_encoder *encoder;
3448 lockdep_assert_held(&dev_priv->pps_mutex);
3450 for_each_intel_dp(&dev_priv->drm, encoder) {
3451 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3452 enum port port = encoder->port;
3454 WARN(intel_dp->active_pipe == pipe,
3455 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3456 pipe_name(pipe), port_name(port));
3458 if (intel_dp->pps_pipe != pipe)
3461 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3462 pipe_name(pipe), port_name(port));
3464 /* make sure vdd is off before we steal it */
3465 vlv_detach_power_sequencer(intel_dp);
3469 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3470 const struct intel_crtc_state *crtc_state)
3472 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3473 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3474 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3476 lockdep_assert_held(&dev_priv->pps_mutex);
3478 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3480 if (intel_dp->pps_pipe != INVALID_PIPE &&
3481 intel_dp->pps_pipe != crtc->pipe) {
3483 * If another power sequencer was being used on this
3484 * port previously make sure to turn off vdd there while
3485 * we still have control of it.
3487 vlv_detach_power_sequencer(intel_dp);
3491 * We may be stealing the power
3492 * sequencer from another port.
3494 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3496 intel_dp->active_pipe = crtc->pipe;
3498 if (!intel_dp_is_edp(intel_dp))
3501 /* now it's all ours */
3502 intel_dp->pps_pipe = crtc->pipe;
3504 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3505 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3507 /* init power sequencer on this pipe and port */
3508 intel_dp_init_panel_power_sequencer(intel_dp);
3509 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3512 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3513 const struct intel_crtc_state *pipe_config,
3514 const struct drm_connector_state *conn_state)
3516 vlv_phy_pre_encoder_enable(encoder, pipe_config);
3518 intel_enable_dp(encoder, pipe_config, conn_state);
3521 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3522 const struct intel_crtc_state *pipe_config,
3523 const struct drm_connector_state *conn_state)
3525 intel_dp_prepare(encoder, pipe_config);
3527 vlv_phy_pre_pll_enable(encoder, pipe_config);
3530 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3531 const struct intel_crtc_state *pipe_config,
3532 const struct drm_connector_state *conn_state)
3534 chv_phy_pre_encoder_enable(encoder, pipe_config);
3536 intel_enable_dp(encoder, pipe_config, conn_state);
3538 /* Second common lane will stay alive on its own now */
3539 chv_phy_release_cl2_override(encoder);
3542 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3543 const struct intel_crtc_state *pipe_config,
3544 const struct drm_connector_state *conn_state)
3546 intel_dp_prepare(encoder, pipe_config);
3548 chv_phy_pre_pll_enable(encoder, pipe_config);
3551 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3552 const struct intel_crtc_state *old_crtc_state,
3553 const struct drm_connector_state *old_conn_state)
3555 chv_phy_post_pll_disable(encoder, old_crtc_state);
3559 * Fetch AUX CH registers 0x202 - 0x207 which contain
3560 * link status information
3563 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3565 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3566 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3569 /* These are source-specific values. */
3571 intel_dp_voltage_max(struct intel_dp *intel_dp)
3573 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3574 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3575 enum port port = encoder->port;
3577 if (HAS_DDI(dev_priv))
3578 return intel_ddi_dp_voltage_max(encoder);
3579 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3580 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3581 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3582 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3583 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3584 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3586 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3590 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3592 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3593 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3594 enum port port = encoder->port;
3596 if (HAS_DDI(dev_priv)) {
3597 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3598 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3599 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3600 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3601 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3602 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3603 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3604 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3605 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3606 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3608 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3610 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3611 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3613 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3614 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3616 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3618 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3621 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3622 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3623 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3624 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3625 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3626 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3627 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3628 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3630 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3635 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3637 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3638 unsigned long demph_reg_value, preemph_reg_value,
3639 uniqtranscale_reg_value;
3640 u8 train_set = intel_dp->train_set[0];
3642 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3643 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3644 preemph_reg_value = 0x0004000;
3645 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3647 demph_reg_value = 0x2B405555;
3648 uniqtranscale_reg_value = 0x552AB83A;
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3651 demph_reg_value = 0x2B404040;
3652 uniqtranscale_reg_value = 0x5548B83A;
3654 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3655 demph_reg_value = 0x2B245555;
3656 uniqtranscale_reg_value = 0x5560B83A;
3658 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3659 demph_reg_value = 0x2B405555;
3660 uniqtranscale_reg_value = 0x5598DA3A;
3666 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3667 preemph_reg_value = 0x0002000;
3668 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3669 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3670 demph_reg_value = 0x2B404040;
3671 uniqtranscale_reg_value = 0x5552B83A;
3673 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3674 demph_reg_value = 0x2B404848;
3675 uniqtranscale_reg_value = 0x5580B83A;
3677 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3678 demph_reg_value = 0x2B404040;
3679 uniqtranscale_reg_value = 0x55ADDA3A;
3685 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3686 preemph_reg_value = 0x0000000;
3687 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3688 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3689 demph_reg_value = 0x2B305555;
3690 uniqtranscale_reg_value = 0x5570B83A;
3692 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3693 demph_reg_value = 0x2B2B4040;
3694 uniqtranscale_reg_value = 0x55ADDA3A;
3700 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3701 preemph_reg_value = 0x0006000;
3702 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3703 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3704 demph_reg_value = 0x1B405555;
3705 uniqtranscale_reg_value = 0x55ADDA3A;
3715 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3716 uniqtranscale_reg_value, 0);
3721 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3723 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3724 u32 deemph_reg_value, margin_reg_value;
3725 bool uniq_trans_scale = false;
3726 u8 train_set = intel_dp->train_set[0];
3728 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3729 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3730 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3731 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3732 deemph_reg_value = 128;
3733 margin_reg_value = 52;
3735 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3736 deemph_reg_value = 128;
3737 margin_reg_value = 77;
3739 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3740 deemph_reg_value = 128;
3741 margin_reg_value = 102;
3743 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3744 deemph_reg_value = 128;
3745 margin_reg_value = 154;
3746 uniq_trans_scale = true;
3752 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3753 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3754 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3755 deemph_reg_value = 85;
3756 margin_reg_value = 78;
3758 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3759 deemph_reg_value = 85;
3760 margin_reg_value = 116;
3762 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3763 deemph_reg_value = 85;
3764 margin_reg_value = 154;
3770 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3771 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3772 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3773 deemph_reg_value = 64;
3774 margin_reg_value = 104;
3776 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3777 deemph_reg_value = 64;
3778 margin_reg_value = 154;
3784 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3785 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3786 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3787 deemph_reg_value = 43;
3788 margin_reg_value = 154;
3798 chv_set_phy_signal_level(encoder, deemph_reg_value,
3799 margin_reg_value, uniq_trans_scale);
3805 g4x_signal_levels(u8 train_set)
3807 u32 signal_levels = 0;
3809 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3810 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3812 signal_levels |= DP_VOLTAGE_0_4;
3814 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3815 signal_levels |= DP_VOLTAGE_0_6;
3817 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3818 signal_levels |= DP_VOLTAGE_0_8;
3820 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3821 signal_levels |= DP_VOLTAGE_1_2;
3824 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3825 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3827 signal_levels |= DP_PRE_EMPHASIS_0;
3829 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3830 signal_levels |= DP_PRE_EMPHASIS_3_5;
3832 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3833 signal_levels |= DP_PRE_EMPHASIS_6;
3835 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3836 signal_levels |= DP_PRE_EMPHASIS_9_5;
3839 return signal_levels;
3842 /* SNB CPU eDP voltage swing and pre-emphasis control */
3844 snb_cpu_edp_signal_levels(u8 train_set)
3846 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3847 DP_TRAIN_PRE_EMPHASIS_MASK);
3848 switch (signal_levels) {
3849 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3850 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3851 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3853 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3855 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3856 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3859 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3860 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3862 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3864 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3865 "0x%x\n", signal_levels);
3866 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3870 /* IVB CPU eDP voltage swing and pre-emphasis control */
3872 ivb_cpu_edp_signal_levels(u8 train_set)
3874 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3875 DP_TRAIN_PRE_EMPHASIS_MASK);
3876 switch (signal_levels) {
3877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3878 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3879 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3880 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3881 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3882 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3884 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3885 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3886 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3887 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3889 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3890 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3891 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3892 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3895 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3896 "0x%x\n", signal_levels);
3897 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3902 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3904 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3905 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3906 enum port port = intel_dig_port->base.port;
3907 u32 signal_levels, mask = 0;
3908 u8 train_set = intel_dp->train_set[0];
3910 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3911 signal_levels = bxt_signal_levels(intel_dp);
3912 } else if (HAS_DDI(dev_priv)) {
3913 signal_levels = ddi_signal_levels(intel_dp);
3914 mask = DDI_BUF_EMP_MASK;
3915 } else if (IS_CHERRYVIEW(dev_priv)) {
3916 signal_levels = chv_signal_levels(intel_dp);
3917 } else if (IS_VALLEYVIEW(dev_priv)) {
3918 signal_levels = vlv_signal_levels(intel_dp);
3919 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3920 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3921 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3922 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3923 signal_levels = snb_cpu_edp_signal_levels(train_set);
3924 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3926 signal_levels = g4x_signal_levels(train_set);
3927 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3931 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3933 DRM_DEBUG_KMS("Using vswing level %d\n",
3934 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3935 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3936 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3937 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3939 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3941 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3942 POSTING_READ(intel_dp->output_reg);
3946 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3949 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3950 struct drm_i915_private *dev_priv =
3951 to_i915(intel_dig_port->base.base.dev);
3953 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3955 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3956 POSTING_READ(intel_dp->output_reg);
3959 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3961 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3962 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3963 enum port port = intel_dig_port->base.port;
3966 if (!HAS_DDI(dev_priv))
3969 val = I915_READ(DP_TP_CTL(port));
3970 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3971 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3972 I915_WRITE(DP_TP_CTL(port), val);
3975 * On PORT_A we can have only eDP in SST mode. There the only reason
3976 * we need to set idle transmission mode is to work around a HW issue
3977 * where we enable the pipe while not in idle link-training mode.
3978 * In this case there is requirement to wait for a minimum number of
3979 * idle patterns to be sent.
3984 if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
3985 DP_TP_STATUS_IDLE_DONE,
3986 DP_TP_STATUS_IDLE_DONE,
3988 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3992 intel_dp_link_down(struct intel_encoder *encoder,
3993 const struct intel_crtc_state *old_crtc_state)
3995 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3996 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3997 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3998 enum port port = encoder->port;
3999 u32 DP = intel_dp->DP;
4001 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4004 DRM_DEBUG_KMS("\n");
4006 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4007 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4008 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4009 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4011 DP &= ~DP_LINK_TRAIN_MASK;
4012 DP |= DP_LINK_TRAIN_PAT_IDLE;
4014 I915_WRITE(intel_dp->output_reg, DP);
4015 POSTING_READ(intel_dp->output_reg);
4017 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4018 I915_WRITE(intel_dp->output_reg, DP);
4019 POSTING_READ(intel_dp->output_reg);
4022 * HW workaround for IBX, we need to move the port
4023 * to transcoder A after disabling it to allow the
4024 * matching HDMI port to be enabled on transcoder A.
4026 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4028 * We get CPU/PCH FIFO underruns on the other pipe when
4029 * doing the workaround. Sweep them under the rug.
4031 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4032 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4034 /* always enable with pattern 1 (as per spec) */
4035 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4036 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4037 DP_LINK_TRAIN_PAT_1;
4038 I915_WRITE(intel_dp->output_reg, DP);
4039 POSTING_READ(intel_dp->output_reg);
4042 I915_WRITE(intel_dp->output_reg, DP);
4043 POSTING_READ(intel_dp->output_reg);
4045 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4046 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4047 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4050 msleep(intel_dp->panel_power_down_delay);
4054 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4055 intel_wakeref_t wakeref;
4057 with_pps_lock(intel_dp, wakeref)
4058 intel_dp->active_pipe = INVALID_PIPE;
4063 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4068 * Prior to DP1.3 the bit represented by
4069 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4070 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4071 * the true capability of the panel. The only way to check is to
4072 * then compare 0000h and 2200h.
4074 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4075 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4078 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4079 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4080 DRM_ERROR("DPCD failed read at extended capabilities\n");
4084 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4085 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4089 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4092 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4093 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4095 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4099 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4101 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4102 sizeof(intel_dp->dpcd)) < 0)
4103 return false; /* aux transfer failed */
4105 intel_dp_extended_receiver_capabilities(intel_dp);
4107 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4109 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4112 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4116 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4119 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4122 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4125 * Clear the cached register set to avoid using stale values
4126 * for the sinks that do not support DSC.
4128 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4130 /* Clear fec_capable to avoid using stale values */
4131 intel_dp->fec_capable = 0;
4133 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4134 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4135 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4136 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4138 sizeof(intel_dp->dsc_dpcd)) < 0)
4139 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4142 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4143 (int)sizeof(intel_dp->dsc_dpcd),
4144 intel_dp->dsc_dpcd);
4146 /* FEC is supported only on DP 1.4 */
4147 if (!intel_dp_is_edp(intel_dp) &&
4148 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4149 &intel_dp->fec_capable) < 0)
4150 DRM_ERROR("Failed to read FEC DPCD register\n");
4152 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4157 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4159 struct drm_i915_private *dev_priv =
4160 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4162 /* this function is meant to be called only once */
4163 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4165 if (!intel_dp_read_dpcd(intel_dp))
4168 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4169 drm_dp_is_branch(intel_dp->dpcd));
4171 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4172 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4173 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4176 * Read the eDP display control registers.
4178 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4179 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4180 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4181 * method). The display control registers should read zero if they're
4182 * not supported anyway.
4184 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4185 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4186 sizeof(intel_dp->edp_dpcd))
4187 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4188 intel_dp->edp_dpcd);
4191 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4192 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4194 intel_psr_init_dpcd(intel_dp);
4196 /* Read the eDP 1.4+ supported link rates. */
4197 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4198 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4201 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4202 sink_rates, sizeof(sink_rates));
4204 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4205 int val = le16_to_cpu(sink_rates[i]);
4210 /* Value read multiplied by 200kHz gives the per-lane
4211 * link rate in kHz. The source rates are, however,
4212 * stored in terms of LS_Clk kHz. The full conversion
4213 * back to symbols is
4214 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4216 intel_dp->sink_rates[i] = (val * 200) / 10;
4218 intel_dp->num_sink_rates = i;
4222 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4223 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4225 if (intel_dp->num_sink_rates)
4226 intel_dp->use_rate_select = true;
4228 intel_dp_set_sink_rates(intel_dp);
4230 intel_dp_set_common_rates(intel_dp);
4232 /* Read the eDP DSC DPCD registers */
4233 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4234 intel_dp_get_dsc_sink_cap(intel_dp);
4241 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4243 if (!intel_dp_read_dpcd(intel_dp))
4246 /* Don't clobber cached eDP rates. */
4247 if (!intel_dp_is_edp(intel_dp)) {
4248 intel_dp_set_sink_rates(intel_dp);
4249 intel_dp_set_common_rates(intel_dp);
4253 * Some eDP panels do not set a valid value for sink count, that is why
4254 * it don't care about read it here and in intel_edp_init_dpcd().
4256 if (!intel_dp_is_edp(intel_dp)) {
4260 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4265 * Sink count can change between short pulse hpd hence
4266 * a member variable in intel_dp will track any changes
4267 * between short pulse interrupts.
4269 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4272 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4273 * a dongle is present but no display. Unless we require to know
4274 * if a dongle is present or not, we don't need to update
4275 * downstream port information. So, an early return here saves
4276 * time from performing other operations which are not required.
4278 if (!intel_dp->sink_count)
4282 if (!drm_dp_is_branch(intel_dp->dpcd))
4283 return true; /* native DP sink */
4285 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4286 return true; /* no per-port downstream info */
4288 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4289 intel_dp->downstream_ports,
4290 DP_MAX_DOWNSTREAM_PORTS) < 0)
4291 return false; /* downstream port status fetch failed */
4297 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4301 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4304 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4307 return mstm_cap & DP_MST_CAP;
4311 intel_dp_can_mst(struct intel_dp *intel_dp)
4313 return i915_modparams.enable_dp_mst &&
4314 intel_dp->can_mst &&
4315 intel_dp_sink_can_mst(intel_dp);
4319 intel_dp_configure_mst(struct intel_dp *intel_dp)
4321 struct intel_encoder *encoder =
4322 &dp_to_dig_port(intel_dp)->base;
4323 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4325 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4326 port_name(encoder->port), yesno(intel_dp->can_mst),
4327 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4329 if (!intel_dp->can_mst)
4332 intel_dp->is_mst = sink_can_mst &&
4333 i915_modparams.enable_dp_mst;
4335 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4340 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4342 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4343 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4347 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4348 int mode_clock, int mode_hdisplay)
4350 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4354 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4355 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4356 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4357 * for MST -> TimeSlotsPerMTP has to be calculated
4359 bits_per_pixel = (link_clock * lane_count * 8 *
4360 DP_DSC_FEC_OVERHEAD_FACTOR) /
4363 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4364 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4368 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4369 * check, output bpp from small joiner RAM check)
4371 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4373 /* Error out if the max bpp is less than smallest allowed valid bpp */
4374 if (bits_per_pixel < valid_dsc_bpp[0]) {
4375 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4379 /* Find the nearest match in the array of known BPPs from VESA */
4380 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4381 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4384 bits_per_pixel = valid_dsc_bpp[i];
4387 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4388 * fractional part is 0
4390 return bits_per_pixel << 4;
4393 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4397 u8 min_slice_count, i;
4398 int max_slice_width;
4400 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4401 min_slice_count = DIV_ROUND_UP(mode_clock,
4402 DP_DSC_MAX_ENC_THROUGHPUT_0);
4404 min_slice_count = DIV_ROUND_UP(mode_clock,
4405 DP_DSC_MAX_ENC_THROUGHPUT_1);
4407 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4408 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4409 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4413 /* Also take into account max slice width */
4414 min_slice_count = min_t(u8, min_slice_count,
4415 DIV_ROUND_UP(mode_hdisplay,
4418 /* Find the closest match to the valid slice count values */
4419 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4420 if (valid_dsc_slicecount[i] >
4421 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4424 if (min_slice_count <= valid_dsc_slicecount[i])
4425 return valid_dsc_slicecount[i];
4428 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4433 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4434 const struct intel_crtc_state *crtc_state)
4436 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4437 struct dp_sdp vsc_sdp = {};
4439 /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4440 vsc_sdp.sdp_header.HB0 = 0;
4441 vsc_sdp.sdp_header.HB1 = 0x7;
4444 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4445 * Colorimetry Format indication.
4447 vsc_sdp.sdp_header.HB2 = 0x5;
4450 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4451 * Colorimetry Format indication (HB2 = 05h).
4453 vsc_sdp.sdp_header.HB3 = 0x13;
4456 * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4457 * DB16[3:0] DP 1.4a spec, Table 2-120
4459 vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4460 /* RGB->YCBCR color conversion uses the BT.709 color space. */
4461 vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4464 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4465 * the following Component Bit Depth values are defined:
4471 switch (crtc_state->pipe_bpp) {
4473 vsc_sdp.db[17] = 0x1;
4475 case 30: /* 10bpc */
4476 vsc_sdp.db[17] = 0x2;
4478 case 36: /* 12bpc */
4479 vsc_sdp.db[17] = 0x3;
4481 case 48: /* 16bpc */
4482 vsc_sdp.db[17] = 0x4;
4485 MISSING_CASE(crtc_state->pipe_bpp);
4490 * Dynamic Range (Bit 7)
4491 * 0 = VESA range, 1 = CTA range.
4492 * all YCbCr are always limited range
4494 vsc_sdp.db[17] |= 0x80;
4497 * Content Type (Bits 2:0)
4498 * 000b = Not defined.
4503 * All other values are RESERVED.
4504 * Note: See CTA-861-G for the definition and expected
4505 * processing by a stream sink for the above contect types.
4509 intel_dig_port->write_infoframe(&intel_dig_port->base,
4510 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4513 void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4514 const struct intel_crtc_state *crtc_state)
4516 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4519 intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4522 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4526 u8 test_lane_count, test_link_bw;
4530 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4531 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4535 DRM_DEBUG_KMS("Lane count read failed\n");
4538 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4540 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4543 DRM_DEBUG_KMS("Link Rate read failed\n");
4546 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4548 /* Validate the requested link rate and lane count */
4549 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4553 intel_dp->compliance.test_lane_count = test_lane_count;
4554 intel_dp->compliance.test_link_rate = test_link_rate;
4559 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4563 __be16 h_width, v_height;
4566 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4567 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4570 DRM_DEBUG_KMS("Test pattern read failed\n");
4573 if (test_pattern != DP_COLOR_RAMP)
4576 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4579 DRM_DEBUG_KMS("H Width read failed\n");
4583 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4586 DRM_DEBUG_KMS("V Height read failed\n");
4590 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4593 DRM_DEBUG_KMS("TEST MISC read failed\n");
4596 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4598 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4600 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4601 case DP_TEST_BIT_DEPTH_6:
4602 intel_dp->compliance.test_data.bpc = 6;
4604 case DP_TEST_BIT_DEPTH_8:
4605 intel_dp->compliance.test_data.bpc = 8;
4611 intel_dp->compliance.test_data.video_pattern = test_pattern;
4612 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4613 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4614 /* Set test active flag here so userspace doesn't interrupt things */
4615 intel_dp->compliance.test_active = 1;
4620 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4622 u8 test_result = DP_TEST_ACK;
4623 struct intel_connector *intel_connector = intel_dp->attached_connector;
4624 struct drm_connector *connector = &intel_connector->base;
4626 if (intel_connector->detect_edid == NULL ||
4627 connector->edid_corrupt ||
4628 intel_dp->aux.i2c_defer_count > 6) {
4629 /* Check EDID read for NACKs, DEFERs and corruption
4630 * (DP CTS 1.2 Core r1.1)
4631 * 4.2.2.4 : Failed EDID read, I2C_NAK
4632 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4633 * 4.2.2.6 : EDID corruption detected
4634 * Use failsafe mode for all cases
4636 if (intel_dp->aux.i2c_nack_count > 0 ||
4637 intel_dp->aux.i2c_defer_count > 0)
4638 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4639 intel_dp->aux.i2c_nack_count,
4640 intel_dp->aux.i2c_defer_count);
4641 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4643 struct edid *block = intel_connector->detect_edid;
4645 /* We have to write the checksum
4646 * of the last block read
4648 block += intel_connector->detect_edid->extensions;
4650 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4651 block->checksum) <= 0)
4652 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4654 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4655 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4658 /* Set test active flag here so userspace doesn't interrupt things */
4659 intel_dp->compliance.test_active = 1;
4664 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4666 u8 test_result = DP_TEST_NAK;
4670 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4672 u8 response = DP_TEST_NAK;
4676 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4678 DRM_DEBUG_KMS("Could not read test request from sink\n");
4683 case DP_TEST_LINK_TRAINING:
4684 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4685 response = intel_dp_autotest_link_training(intel_dp);
4687 case DP_TEST_LINK_VIDEO_PATTERN:
4688 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4689 response = intel_dp_autotest_video_pattern(intel_dp);
4691 case DP_TEST_LINK_EDID_READ:
4692 DRM_DEBUG_KMS("EDID test requested\n");
4693 response = intel_dp_autotest_edid(intel_dp);
4695 case DP_TEST_LINK_PHY_TEST_PATTERN:
4696 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4697 response = intel_dp_autotest_phy_pattern(intel_dp);
4700 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4704 if (response & DP_TEST_ACK)
4705 intel_dp->compliance.test_type = request;
4708 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4710 DRM_DEBUG_KMS("Could not write test response to sink\n");
4714 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4718 if (intel_dp->is_mst) {
4719 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4724 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4725 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4729 /* check link status - esi[10] = 0x200c */
4730 if (intel_dp->active_mst_links > 0 &&
4731 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4732 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4733 intel_dp_start_link_train(intel_dp);
4734 intel_dp_stop_link_train(intel_dp);
4737 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4738 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4741 for (retry = 0; retry < 3; retry++) {
4743 wret = drm_dp_dpcd_write(&intel_dp->aux,
4744 DP_SINK_COUNT_ESI+1,
4751 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4753 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4761 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4762 intel_dp->is_mst = false;
4763 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4771 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4773 u8 link_status[DP_LINK_STATUS_SIZE];
4775 if (!intel_dp->link_trained)
4779 * While PSR source HW is enabled, it will control main-link sending
4780 * frames, enabling and disabling it so trying to do a retrain will fail
4781 * as the link would or not be on or it could mix training patterns
4782 * and frame data at the same time causing retrain to fail.
4783 * Also when exiting PSR, HW will retrain the link anyways fixing
4784 * any link status error.
4786 if (intel_psr_enabled(intel_dp))
4789 if (!intel_dp_get_link_status(intel_dp, link_status))
4793 * Validate the cached values of intel_dp->link_rate and
4794 * intel_dp->lane_count before attempting to retrain.
4796 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4797 intel_dp->lane_count))
4800 /* Retrain if Channel EQ or CR not ok */
4801 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4804 int intel_dp_retrain_link(struct intel_encoder *encoder,
4805 struct drm_modeset_acquire_ctx *ctx)
4807 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4808 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4809 struct intel_connector *connector = intel_dp->attached_connector;
4810 struct drm_connector_state *conn_state;
4811 struct intel_crtc_state *crtc_state;
4812 struct intel_crtc *crtc;
4815 /* FIXME handle the MST connectors as well */
4817 if (!connector || connector->base.status != connector_status_connected)
4820 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4825 conn_state = connector->base.state;
4827 crtc = to_intel_crtc(conn_state->crtc);
4831 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4835 crtc_state = to_intel_crtc_state(crtc->base.state);
4837 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4839 if (!crtc_state->base.active)
4842 if (conn_state->commit &&
4843 !try_wait_for_completion(&conn_state->commit->hw_done))
4846 if (!intel_dp_needs_link_retrain(intel_dp))
4849 /* Suppress underruns caused by re-training */
4850 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4851 if (crtc_state->has_pch_encoder)
4852 intel_set_pch_fifo_underrun_reporting(dev_priv,
4853 intel_crtc_pch_transcoder(crtc), false);
4855 intel_dp_start_link_train(intel_dp);
4856 intel_dp_stop_link_train(intel_dp);
4858 /* Keep underrun reporting disabled until things are stable */
4859 intel_wait_for_vblank(dev_priv, crtc->pipe);
4861 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4862 if (crtc_state->has_pch_encoder)
4863 intel_set_pch_fifo_underrun_reporting(dev_priv,
4864 intel_crtc_pch_transcoder(crtc), true);
4870 * If display is now connected check links status,
4871 * there has been known issues of link loss triggering
4874 * Some sinks (eg. ASUS PB287Q) seem to perform some
4875 * weird HPD ping pong during modesets. So we can apparently
4876 * end up with HPD going low during a modeset, and then
4877 * going back up soon after. And once that happens we must
4878 * retrain the link to get a picture. That's in case no
4879 * userspace component reacted to intermittent HPD dip.
4881 static bool intel_dp_hotplug(struct intel_encoder *encoder,
4882 struct intel_connector *connector)
4884 struct drm_modeset_acquire_ctx ctx;
4888 changed = intel_encoder_hotplug(encoder, connector);
4890 drm_modeset_acquire_init(&ctx, 0);
4893 ret = intel_dp_retrain_link(encoder, &ctx);
4895 if (ret == -EDEADLK) {
4896 drm_modeset_backoff(&ctx);
4903 drm_modeset_drop_locks(&ctx);
4904 drm_modeset_acquire_fini(&ctx);
4905 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4910 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4914 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4917 if (drm_dp_dpcd_readb(&intel_dp->aux,
4918 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4921 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4923 if (val & DP_AUTOMATED_TEST_REQUEST)
4924 intel_dp_handle_test_request(intel_dp);
4926 if (val & DP_CP_IRQ)
4927 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4929 if (val & DP_SINK_SPECIFIC_IRQ)
4930 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4934 * According to DP spec
4937 * 2. Configure link according to Receiver Capabilities
4938 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4939 * 4. Check link status on receipt of hot-plug interrupt
4941 * intel_dp_short_pulse - handles short pulse interrupts
4942 * when full detection is not required.
4943 * Returns %true if short pulse is handled and full detection
4944 * is NOT required and %false otherwise.
4947 intel_dp_short_pulse(struct intel_dp *intel_dp)
4949 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4950 u8 old_sink_count = intel_dp->sink_count;
4954 * Clearing compliance test variables to allow capturing
4955 * of values for next automated test request.
4957 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4960 * Now read the DPCD to see if it's actually running
4961 * If the current value of sink count doesn't match with
4962 * the value that was stored earlier or dpcd read failed
4963 * we need to do full detection
4965 ret = intel_dp_get_dpcd(intel_dp);
4967 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4968 /* No need to proceed if we are going to do full detect */
4972 intel_dp_check_service_irq(intel_dp);
4974 /* Handle CEC interrupts, if any */
4975 drm_dp_cec_irq(&intel_dp->aux);
4977 /* defer to the hotplug work for link retraining if needed */
4978 if (intel_dp_needs_link_retrain(intel_dp))
4981 intel_psr_short_pulse(intel_dp);
4983 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4984 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4985 /* Send a Hotplug Uevent to userspace to start modeset */
4986 drm_kms_helper_hotplug_event(&dev_priv->drm);
4992 /* XXX this is probably wrong for multiple downstream ports */
4993 static enum drm_connector_status
4994 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4996 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4997 u8 *dpcd = intel_dp->dpcd;
5000 if (WARN_ON(intel_dp_is_edp(intel_dp)))
5001 return connector_status_connected;
5004 lspcon_resume(lspcon);
5006 if (!intel_dp_get_dpcd(intel_dp))
5007 return connector_status_disconnected;
5009 /* if there's no downstream port, we're done */
5010 if (!drm_dp_is_branch(dpcd))
5011 return connector_status_connected;
5013 /* If we're HPD-aware, SINK_COUNT changes dynamically */
5014 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5015 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5017 return intel_dp->sink_count ?
5018 connector_status_connected : connector_status_disconnected;
5021 if (intel_dp_can_mst(intel_dp))
5022 return connector_status_connected;
5024 /* If no HPD, poke DDC gently */
5025 if (drm_probe_ddc(&intel_dp->aux.ddc))
5026 return connector_status_connected;
5028 /* Well we tried, say unknown for unreliable port types */
5029 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5030 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5031 if (type == DP_DS_PORT_TYPE_VGA ||
5032 type == DP_DS_PORT_TYPE_NON_EDID)
5033 return connector_status_unknown;
5035 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5036 DP_DWN_STRM_PORT_TYPE_MASK;
5037 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5038 type == DP_DWN_STRM_PORT_TYPE_OTHER)
5039 return connector_status_unknown;
5042 /* Anything else is out of spec, warn and ignore */
5043 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5044 return connector_status_disconnected;
5047 static enum drm_connector_status
5048 edp_detect(struct intel_dp *intel_dp)
5050 return connector_status_connected;
5053 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5055 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5058 switch (encoder->hpd_pin) {
5060 bit = SDE_PORTB_HOTPLUG;
5063 bit = SDE_PORTC_HOTPLUG;
5066 bit = SDE_PORTD_HOTPLUG;
5069 MISSING_CASE(encoder->hpd_pin);
5073 return I915_READ(SDEISR) & bit;
5076 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5078 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5081 switch (encoder->hpd_pin) {
5083 bit = SDE_PORTB_HOTPLUG_CPT;
5086 bit = SDE_PORTC_HOTPLUG_CPT;
5089 bit = SDE_PORTD_HOTPLUG_CPT;
5092 MISSING_CASE(encoder->hpd_pin);
5096 return I915_READ(SDEISR) & bit;
5099 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5101 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5104 switch (encoder->hpd_pin) {
5106 bit = SDE_PORTA_HOTPLUG_SPT;
5109 bit = SDE_PORTE_HOTPLUG_SPT;
5112 return cpt_digital_port_connected(encoder);
5115 return I915_READ(SDEISR) & bit;
5118 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5120 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5123 switch (encoder->hpd_pin) {
5125 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5128 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5131 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5134 MISSING_CASE(encoder->hpd_pin);
5138 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5141 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5143 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5146 switch (encoder->hpd_pin) {
5148 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5151 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5154 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5157 MISSING_CASE(encoder->hpd_pin);
5161 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5164 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5166 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5168 if (encoder->hpd_pin == HPD_PORT_A)
5169 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5171 return ibx_digital_port_connected(encoder);
5174 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5176 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5178 if (encoder->hpd_pin == HPD_PORT_A)
5179 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5181 return cpt_digital_port_connected(encoder);
5184 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5186 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5188 if (encoder->hpd_pin == HPD_PORT_A)
5189 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5191 return cpt_digital_port_connected(encoder);
5194 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5196 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5198 if (encoder->hpd_pin == HPD_PORT_A)
5199 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5201 return cpt_digital_port_connected(encoder);
5204 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5206 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5209 switch (encoder->hpd_pin) {
5211 bit = BXT_DE_PORT_HP_DDIA;
5214 bit = BXT_DE_PORT_HP_DDIB;
5217 bit = BXT_DE_PORT_HP_DDIC;
5220 MISSING_CASE(encoder->hpd_pin);
5224 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5227 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5228 struct intel_digital_port *intel_dig_port)
5230 enum port port = intel_dig_port->base.port;
5232 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5235 static const char *tc_type_name(enum tc_port_type type)
5237 static const char * const names[] = {
5238 [TC_PORT_UNKNOWN] = "unknown",
5239 [TC_PORT_LEGACY] = "legacy",
5240 [TC_PORT_TYPEC] = "typec",
5241 [TC_PORT_TBT] = "tbt",
5244 if (WARN_ON(type >= ARRAY_SIZE(names)))
5245 type = TC_PORT_UNKNOWN;
5250 static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5251 struct intel_digital_port *intel_dig_port,
5252 bool is_legacy, bool is_typec, bool is_tbt)
5254 enum port port = intel_dig_port->base.port;
5255 enum tc_port_type old_type = intel_dig_port->tc_type;
5257 WARN_ON(is_legacy + is_typec + is_tbt != 1);
5260 intel_dig_port->tc_type = TC_PORT_LEGACY;
5262 intel_dig_port->tc_type = TC_PORT_TYPEC;
5264 intel_dig_port->tc_type = TC_PORT_TBT;
5268 /* Types are not supposed to be changed at runtime. */
5269 WARN_ON(old_type != TC_PORT_UNKNOWN &&
5270 old_type != intel_dig_port->tc_type);
5272 if (old_type != intel_dig_port->tc_type)
5273 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
5274 tc_type_name(intel_dig_port->tc_type));
5278 * This function implements the first part of the Connect Flow described by our
5279 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5280 * lanes, EDID, etc) is done as needed in the typical places.
5282 * Unlike the other ports, type-C ports are not available to use as soon as we
5283 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5284 * display, USB, etc. As a result, handshaking through FIA is required around
5285 * connect and disconnect to cleanly transfer ownership with the controller and
5286 * set the type-C power state.
5288 * We could opt to only do the connect flow when we actually try to use the AUX
5289 * channels or do a modeset, then immediately run the disconnect flow after
5290 * usage, but there are some implications on this for a dynamic environment:
5291 * things may go away or change behind our backs. So for now our driver is
5292 * always trying to acquire ownership of the controller as soon as it gets an
5293 * interrupt (or polls state and sees a port is connected) and only gives it
5294 * back when it sees a disconnect. Implementation of a more fine-grained model
5295 * will require a lot of coordination with user space and thorough testing for
5296 * the extra possible cases.
5298 static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5299 struct intel_digital_port *dig_port)
5301 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5304 if (dig_port->tc_type != TC_PORT_LEGACY &&
5305 dig_port->tc_type != TC_PORT_TYPEC)
5308 val = I915_READ(PORT_TX_DFLEXDPPMS);
5309 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5310 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
5311 WARN_ON(dig_port->tc_legacy_port);
5316 * This function may be called many times in a row without an HPD event
5317 * in between, so try to avoid the write when we can.
5319 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5320 if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
5321 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5322 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5326 * Now we have to re-check the live state, in case the port recently
5327 * became disconnected. Not necessary for legacy mode.
5329 if (dig_port->tc_type == TC_PORT_TYPEC &&
5330 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
5331 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
5332 icl_tc_phy_disconnect(dev_priv, dig_port);
5340 * See the comment at the connect function. This implements the Disconnect
5343 void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5344 struct intel_digital_port *dig_port)
5346 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5348 if (dig_port->tc_type == TC_PORT_UNKNOWN)
5352 * TBT disconnection flow is read the live status, what was done in
5355 if (dig_port->tc_type == TC_PORT_TYPEC ||
5356 dig_port->tc_type == TC_PORT_LEGACY) {
5359 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5360 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5361 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5364 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5365 port_name(dig_port->base.port),
5366 tc_type_name(dig_port->tc_type));
5368 dig_port->tc_type = TC_PORT_UNKNOWN;
5372 * The type-C ports are different because even when they are connected, they may
5373 * not be available/usable by the graphics driver: see the comment on
5374 * icl_tc_phy_connect(). So in our driver instead of adding the additional
5375 * concept of "usable" and make everything check for "connected and usable" we
5376 * define a port as "connected" when it is not only connected, but also when it
5377 * is usable by the rest of the driver. That maintains the old assumption that
5378 * connected ports are usable, and avoids exposing to the users objects they
5381 static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5382 struct intel_digital_port *intel_dig_port)
5384 enum port port = intel_dig_port->base.port;
5385 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
5386 bool is_legacy, is_typec, is_tbt;
5390 * Complain if we got a legacy port HPD, but VBT didn't mark the port as
5391 * legacy. Treat the port as legacy from now on.
5393 if (!intel_dig_port->tc_legacy_port &&
5394 I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
5395 DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
5397 intel_dig_port->tc_legacy_port = true;
5399 is_legacy = intel_dig_port->tc_legacy_port;
5402 * The spec says we shouldn't be using the ISR bits for detecting
5403 * between TC and TBT. We should use DFLEXDPSP.
5405 dpsp = I915_READ(PORT_TX_DFLEXDPSP);
5406 is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
5407 is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
5409 if (!is_legacy && !is_typec && !is_tbt) {
5410 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
5415 icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
5418 if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
5424 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5426 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5427 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5429 if (intel_port_is_combophy(dev_priv, encoder->port))
5430 return icl_combo_port_connected(dev_priv, dig_port);
5431 else if (intel_port_is_tc(dev_priv, encoder->port))
5432 return icl_tc_port_connected(dev_priv, dig_port);
5434 MISSING_CASE(encoder->hpd_pin);
5440 * intel_digital_port_connected - is the specified port connected?
5441 * @encoder: intel_encoder
5443 * In cases where there's a connector physically connected but it can't be used
5444 * by our hardware we also return false, since the rest of the driver should
5445 * pretty much treat the port as disconnected. This is relevant for type-C
5446 * (starting on ICL) where there's ownership involved.
5448 * Return %true if port is connected, %false otherwise.
5450 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5452 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5454 if (HAS_GMCH(dev_priv)) {
5455 if (IS_GM45(dev_priv))
5456 return gm45_digital_port_connected(encoder);
5458 return g4x_digital_port_connected(encoder);
5461 if (INTEL_GEN(dev_priv) >= 11)
5462 return icl_digital_port_connected(encoder);
5463 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5464 return spt_digital_port_connected(encoder);
5465 else if (IS_GEN9_LP(dev_priv))
5466 return bxt_digital_port_connected(encoder);
5467 else if (IS_GEN(dev_priv, 8))
5468 return bdw_digital_port_connected(encoder);
5469 else if (IS_GEN(dev_priv, 7))
5470 return ivb_digital_port_connected(encoder);
5471 else if (IS_GEN(dev_priv, 6))
5472 return snb_digital_port_connected(encoder);
5473 else if (IS_GEN(dev_priv, 5))
5474 return ilk_digital_port_connected(encoder);
5476 MISSING_CASE(INTEL_GEN(dev_priv));
5480 bool intel_digital_port_connected(struct intel_encoder *encoder)
5482 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5483 bool is_connected = false;
5484 intel_wakeref_t wakeref;
5486 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5487 is_connected = __intel_digital_port_connected(encoder);
5489 return is_connected;
5492 static struct edid *
5493 intel_dp_get_edid(struct intel_dp *intel_dp)
5495 struct intel_connector *intel_connector = intel_dp->attached_connector;
5497 /* use cached edid if we have one */
5498 if (intel_connector->edid) {
5500 if (IS_ERR(intel_connector->edid))
5503 return drm_edid_duplicate(intel_connector->edid);
5505 return drm_get_edid(&intel_connector->base,
5506 &intel_dp->aux.ddc);
5510 intel_dp_set_edid(struct intel_dp *intel_dp)
5512 struct intel_connector *intel_connector = intel_dp->attached_connector;
5515 intel_dp_unset_edid(intel_dp);
5516 edid = intel_dp_get_edid(intel_dp);
5517 intel_connector->detect_edid = edid;
5519 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5520 drm_dp_cec_set_edid(&intel_dp->aux, edid);
5524 intel_dp_unset_edid(struct intel_dp *intel_dp)
5526 struct intel_connector *intel_connector = intel_dp->attached_connector;
5528 drm_dp_cec_unset_edid(&intel_dp->aux);
5529 kfree(intel_connector->detect_edid);
5530 intel_connector->detect_edid = NULL;
5532 intel_dp->has_audio = false;
5536 intel_dp_detect(struct drm_connector *connector,
5537 struct drm_modeset_acquire_ctx *ctx,
5540 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5541 struct intel_dp *intel_dp = intel_attached_dp(connector);
5542 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5543 struct intel_encoder *encoder = &dig_port->base;
5544 enum drm_connector_status status;
5546 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5547 connector->base.id, connector->name);
5548 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5550 /* Can't disconnect eDP */
5551 if (intel_dp_is_edp(intel_dp))
5552 status = edp_detect(intel_dp);
5553 else if (intel_digital_port_connected(encoder))
5554 status = intel_dp_detect_dpcd(intel_dp);
5556 status = connector_status_disconnected;
5558 if (status == connector_status_disconnected) {
5559 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5560 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5562 if (intel_dp->is_mst) {
5563 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5565 intel_dp->mst_mgr.mst_state);
5566 intel_dp->is_mst = false;
5567 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5574 if (intel_dp->reset_link_params) {
5575 /* Initial max link lane count */
5576 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5578 /* Initial max link rate */
5579 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5581 intel_dp->reset_link_params = false;
5584 intel_dp_print_rates(intel_dp);
5586 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5587 if (INTEL_GEN(dev_priv) >= 11)
5588 intel_dp_get_dsc_sink_cap(intel_dp);
5590 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5591 drm_dp_is_branch(intel_dp->dpcd));
5593 intel_dp_configure_mst(intel_dp);
5595 if (intel_dp->is_mst) {
5597 * If we are in MST mode then this connector
5598 * won't appear connected or have anything
5601 status = connector_status_disconnected;
5606 * Some external monitors do not signal loss of link synchronization
5607 * with an IRQ_HPD, so force a link status check.
5609 if (!intel_dp_is_edp(intel_dp)) {
5612 ret = intel_dp_retrain_link(encoder, ctx);
5618 * Clearing NACK and defer counts to get their exact values
5619 * while reading EDID which are required by Compliance tests
5620 * 4.2.2.4 and 4.2.2.5
5622 intel_dp->aux.i2c_nack_count = 0;
5623 intel_dp->aux.i2c_defer_count = 0;
5625 intel_dp_set_edid(intel_dp);
5626 if (intel_dp_is_edp(intel_dp) ||
5627 to_intel_connector(connector)->detect_edid)
5628 status = connector_status_connected;
5630 intel_dp_check_service_irq(intel_dp);
5633 if (status != connector_status_connected && !intel_dp->is_mst)
5634 intel_dp_unset_edid(intel_dp);
5640 intel_dp_force(struct drm_connector *connector)
5642 struct intel_dp *intel_dp = intel_attached_dp(connector);
5643 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5644 struct intel_encoder *intel_encoder = &dig_port->base;
5645 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5646 enum intel_display_power_domain aux_domain =
5647 intel_aux_power_domain(dig_port);
5648 intel_wakeref_t wakeref;
5650 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5651 connector->base.id, connector->name);
5652 intel_dp_unset_edid(intel_dp);
5654 if (connector->status != connector_status_connected)
5657 wakeref = intel_display_power_get(dev_priv, aux_domain);
5659 intel_dp_set_edid(intel_dp);
5661 intel_display_power_put(dev_priv, aux_domain, wakeref);
5664 static int intel_dp_get_modes(struct drm_connector *connector)
5666 struct intel_connector *intel_connector = to_intel_connector(connector);
5669 edid = intel_connector->detect_edid;
5671 int ret = intel_connector_update_modes(connector, edid);
5676 /* if eDP has no EDID, fall back to fixed mode */
5677 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5678 intel_connector->panel.fixed_mode) {
5679 struct drm_display_mode *mode;
5681 mode = drm_mode_duplicate(connector->dev,
5682 intel_connector->panel.fixed_mode);
5684 drm_mode_probed_add(connector, mode);
5693 intel_dp_connector_register(struct drm_connector *connector)
5695 struct intel_dp *intel_dp = intel_attached_dp(connector);
5696 struct drm_device *dev = connector->dev;
5699 ret = intel_connector_register(connector);
5703 i915_debugfs_connector_add(connector);
5705 DRM_DEBUG_KMS("registering %s bus for %s\n",
5706 intel_dp->aux.name, connector->kdev->kobj.name);
5708 intel_dp->aux.dev = connector->kdev;
5709 ret = drm_dp_aux_register(&intel_dp->aux);
5711 drm_dp_cec_register_connector(&intel_dp->aux,
5712 connector->name, dev->dev);
5717 intel_dp_connector_unregister(struct drm_connector *connector)
5719 struct intel_dp *intel_dp = intel_attached_dp(connector);
5721 drm_dp_cec_unregister_connector(&intel_dp->aux);
5722 drm_dp_aux_unregister(&intel_dp->aux);
5723 intel_connector_unregister(connector);
5726 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5728 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5729 struct intel_dp *intel_dp = &intel_dig_port->dp;
5731 intel_dp_mst_encoder_cleanup(intel_dig_port);
5732 if (intel_dp_is_edp(intel_dp)) {
5733 intel_wakeref_t wakeref;
5735 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5737 * vdd might still be enabled do to the delayed vdd off.
5738 * Make sure vdd is actually turned off here.
5740 with_pps_lock(intel_dp, wakeref)
5741 edp_panel_vdd_off_sync(intel_dp);
5743 if (intel_dp->edp_notifier.notifier_call) {
5744 unregister_reboot_notifier(&intel_dp->edp_notifier);
5745 intel_dp->edp_notifier.notifier_call = NULL;
5749 intel_dp_aux_fini(intel_dp);
5752 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5754 intel_dp_encoder_flush_work(encoder);
5756 drm_encoder_cleanup(encoder);
5757 kfree(enc_to_dig_port(encoder));
5760 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5762 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5763 intel_wakeref_t wakeref;
5765 if (!intel_dp_is_edp(intel_dp))
5769 * vdd might still be enabled do to the delayed vdd off.
5770 * Make sure vdd is actually turned off here.
5772 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5773 with_pps_lock(intel_dp, wakeref)
5774 edp_panel_vdd_off_sync(intel_dp);
5777 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5781 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5782 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5783 msecs_to_jiffies(timeout));
5786 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5790 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5793 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5794 static const struct drm_dp_aux_msg msg = {
5795 .request = DP_AUX_NATIVE_WRITE,
5796 .address = DP_AUX_HDCP_AKSV,
5797 .size = DRM_HDCP_KSV_LEN,
5799 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5803 /* Output An first, that's easy */
5804 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5805 an, DRM_HDCP_AN_LEN);
5806 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5807 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5809 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5813 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5814 * order to get it on the wire, we need to create the AUX header as if
5815 * we were writing the data, and then tickle the hardware to output the
5816 * data once the header is sent out.
5818 intel_dp_aux_header(txbuf, &msg);
5820 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5821 rxbuf, sizeof(rxbuf),
5822 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5824 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5826 } else if (ret == 0) {
5827 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5831 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5832 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5833 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5840 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5844 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5846 if (ret != DRM_HDCP_KSV_LEN) {
5847 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5848 return ret >= 0 ? -EIO : ret;
5853 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5858 * For some reason the HDMI and DP HDCP specs call this register
5859 * definition by different names. In the HDMI spec, it's called BSTATUS,
5860 * but in DP it's called BINFO.
5862 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5863 bstatus, DRM_HDCP_BSTATUS_LEN);
5864 if (ret != DRM_HDCP_BSTATUS_LEN) {
5865 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5866 return ret >= 0 ? -EIO : ret;
5872 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5877 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5880 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5881 return ret >= 0 ? -EIO : ret;
5888 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5889 bool *repeater_present)
5894 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5898 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5903 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5907 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5908 ri_prime, DRM_HDCP_RI_LEN);
5909 if (ret != DRM_HDCP_RI_LEN) {
5910 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5911 return ret >= 0 ? -EIO : ret;
5917 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5922 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5925 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5926 return ret >= 0 ? -EIO : ret;
5928 *ksv_ready = bstatus & DP_BSTATUS_READY;
5933 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5934 int num_downstream, u8 *ksv_fifo)
5939 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5940 for (i = 0; i < num_downstream; i += 3) {
5941 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5942 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5943 DP_AUX_HDCP_KSV_FIFO,
5944 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5947 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5949 return ret >= 0 ? -EIO : ret;
5956 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5961 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5964 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5965 DP_AUX_HDCP_V_PRIME(i), part,
5966 DRM_HDCP_V_PRIME_PART_LEN);
5967 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5968 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5969 return ret >= 0 ? -EIO : ret;
5975 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5978 /* Not used for single stream DisplayPort setups */
5983 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5988 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5991 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5995 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5999 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
6005 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
6009 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
6013 struct hdcp2_dp_errata_stream_type {
6018 static struct hdcp2_dp_msg_data {
6021 bool msg_detectable;
6023 u32 timeout2; /* Added for non_paired situation */
6024 } hdcp2_msg_data[] = {
6025 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
6026 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
6027 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
6028 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
6030 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
6032 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
6033 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
6034 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
6035 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
6036 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
6037 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
6038 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
6039 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
6040 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
6041 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
6043 {HDCP_2_2_REP_SEND_RECVID_LIST,
6044 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
6045 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
6046 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
6048 {HDCP_2_2_REP_STREAM_MANAGE,
6049 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
6051 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
6052 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
6053 /* local define to shovel this through the write_2_2 interface */
6054 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
6055 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
6056 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
6061 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
6066 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6067 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
6068 HDCP_2_2_DP_RXSTATUS_LEN);
6069 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
6070 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
6071 return ret >= 0 ? -EIO : ret;
6078 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
6079 u8 msg_id, bool *msg_ready)
6085 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6090 case HDCP_2_2_AKE_SEND_HPRIME:
6091 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
6094 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
6095 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
6098 case HDCP_2_2_REP_SEND_RECVID_LIST:
6099 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6103 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
6111 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
6112 struct hdcp2_dp_msg_data *hdcp2_msg_data)
6114 struct intel_dp *dp = &intel_dig_port->dp;
6115 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6116 u8 msg_id = hdcp2_msg_data->msg_id;
6118 bool msg_ready = false;
6120 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
6121 timeout = hdcp2_msg_data->timeout2;
6123 timeout = hdcp2_msg_data->timeout;
6126 * There is no way to detect the CERT, LPRIME and STREAM_READY
6127 * availability. So Wait for timeout and read the msg.
6129 if (!hdcp2_msg_data->msg_detectable) {
6134 * As we want to check the msg availability at timeout, Ignoring
6135 * the timeout at wait for CP_IRQ.
6137 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6138 ret = hdcp2_detect_msg_availability(intel_dig_port,
6139 msg_id, &msg_ready);
6145 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6146 hdcp2_msg_data->msg_id, ret, timeout);
6151 static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6155 for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
6156 if (hdcp2_msg_data[i].msg_id == msg_id)
6157 return &hdcp2_msg_data[i];
6163 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6164 void *buf, size_t size)
6166 struct intel_dp *dp = &intel_dig_port->dp;
6167 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6168 unsigned int offset;
6170 ssize_t ret, bytes_to_write, len;
6171 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6173 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6174 if (!hdcp2_msg_data)
6177 offset = hdcp2_msg_data->offset;
6179 /* No msg_id in DP HDCP2.2 msgs */
6180 bytes_to_write = size - 1;
6183 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6185 while (bytes_to_write) {
6186 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6187 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6189 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6190 offset, (void *)byte, len);
6194 bytes_to_write -= ret;
6203 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6205 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6209 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6210 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6211 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6212 if (ret != HDCP_2_2_RXINFO_LEN)
6213 return ret >= 0 ? -EIO : ret;
6215 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6216 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6218 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6219 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6221 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6222 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6223 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6229 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6230 u8 msg_id, void *buf, size_t size)
6232 unsigned int offset;
6234 ssize_t ret, bytes_to_recv, len;
6235 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6237 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6238 if (!hdcp2_msg_data)
6240 offset = hdcp2_msg_data->offset;
6242 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6246 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6247 ret = get_receiver_id_list_size(intel_dig_port);
6253 bytes_to_recv = size - 1;
6255 /* DP adaptation msgs has no msg_id */
6258 while (bytes_to_recv) {
6259 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6260 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6262 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6265 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6269 bytes_to_recv -= ret;
6280 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6281 bool is_repeater, u8 content_type)
6283 struct hdcp2_dp_errata_stream_type stream_type_msg;
6289 * Errata for DP: As Stream type is used for encryption, Receiver
6290 * should be communicated with stream type for the decryption of the
6292 * Repeater will be communicated with stream type as a part of it's
6293 * auth later in time.
6295 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6296 stream_type_msg.stream_type = content_type;
6298 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6299 sizeof(stream_type_msg));
6303 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6308 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6312 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6313 ret = HDCP_REAUTH_REQUEST;
6314 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6315 ret = HDCP_LINK_INTEGRITY_FAILURE;
6316 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6317 ret = HDCP_TOPOLOGY_CHANGE;
6323 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6330 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6331 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6332 rx_caps, HDCP_2_2_RXCAPS_LEN);
6333 if (ret != HDCP_2_2_RXCAPS_LEN)
6334 return ret >= 0 ? -EIO : ret;
6336 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6337 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6343 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6344 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6345 .read_bksv = intel_dp_hdcp_read_bksv,
6346 .read_bstatus = intel_dp_hdcp_read_bstatus,
6347 .repeater_present = intel_dp_hdcp_repeater_present,
6348 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6349 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6350 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6351 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6352 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6353 .check_link = intel_dp_hdcp_check_link,
6354 .hdcp_capable = intel_dp_hdcp_capable,
6355 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6356 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6357 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6358 .check_2_2_link = intel_dp_hdcp2_check_link,
6359 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6360 .protocol = HDCP_PROTOCOL_DP,
6363 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6365 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6366 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6368 lockdep_assert_held(&dev_priv->pps_mutex);
6370 if (!edp_have_panel_vdd(intel_dp))
6374 * The VDD bit needs a power domain reference, so if the bit is
6375 * already enabled when we boot or resume, grab this reference and
6376 * schedule a vdd off, so we don't hold on to the reference
6379 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6380 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6382 edp_panel_vdd_schedule_off(intel_dp);
6385 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6387 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6388 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6391 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6392 encoder->port, &pipe))
6395 return INVALID_PIPE;
6398 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6400 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6401 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6402 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6403 intel_wakeref_t wakeref;
6405 if (!HAS_DDI(dev_priv))
6406 intel_dp->DP = I915_READ(intel_dp->output_reg);
6409 lspcon_resume(lspcon);
6411 intel_dp->reset_link_params = true;
6413 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6414 !intel_dp_is_edp(intel_dp))
6417 with_pps_lock(intel_dp, wakeref) {
6418 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6419 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6421 if (intel_dp_is_edp(intel_dp)) {
6423 * Reinit the power sequencer, in case BIOS did
6424 * something nasty with it.
6426 intel_dp_pps_init(intel_dp);
6427 intel_edp_panel_vdd_sanitize(intel_dp);
6432 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6433 .force = intel_dp_force,
6434 .fill_modes = drm_helper_probe_single_connector_modes,
6435 .atomic_get_property = intel_digital_connector_atomic_get_property,
6436 .atomic_set_property = intel_digital_connector_atomic_set_property,
6437 .late_register = intel_dp_connector_register,
6438 .early_unregister = intel_dp_connector_unregister,
6439 .destroy = intel_connector_destroy,
6440 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6441 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6444 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6445 .detect_ctx = intel_dp_detect,
6446 .get_modes = intel_dp_get_modes,
6447 .mode_valid = intel_dp_mode_valid,
6448 .atomic_check = intel_digital_connector_atomic_check,
6451 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6452 .reset = intel_dp_encoder_reset,
6453 .destroy = intel_dp_encoder_destroy,
6457 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6459 struct intel_dp *intel_dp = &intel_dig_port->dp;
6461 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6463 * vdd off can generate a long pulse on eDP which
6464 * would require vdd on to handle it, and thus we
6465 * would end up in an endless cycle of
6466 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6468 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6469 port_name(intel_dig_port->base.port));
6473 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6474 port_name(intel_dig_port->base.port),
6475 long_hpd ? "long" : "short");
6478 intel_dp->reset_link_params = true;
6482 if (intel_dp->is_mst) {
6483 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6485 * If we were in MST mode, and device is not
6486 * there, get out of MST mode
6488 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6489 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6490 intel_dp->is_mst = false;
6491 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6498 if (!intel_dp->is_mst) {
6501 handled = intel_dp_short_pulse(intel_dp);
6510 /* check the VBT to see whether the eDP is on another port */
6511 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6514 * eDP not supported on g4x. so bail out early just
6515 * for a bit extra safety in case the VBT is bonkers.
6517 if (INTEL_GEN(dev_priv) < 5)
6520 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6523 return intel_bios_is_port_edp(dev_priv, port);
6527 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6529 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6530 enum port port = dp_to_dig_port(intel_dp)->base.port;
6532 if (!IS_G4X(dev_priv) && port != PORT_A)
6533 intel_attach_force_audio_property(connector);
6535 intel_attach_broadcast_rgb_property(connector);
6536 if (HAS_GMCH(dev_priv))
6537 drm_connector_attach_max_bpc_property(connector, 6, 10);
6538 else if (INTEL_GEN(dev_priv) >= 5)
6539 drm_connector_attach_max_bpc_property(connector, 6, 12);
6541 if (intel_dp_is_edp(intel_dp)) {
6542 u32 allowed_scalers;
6544 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6545 if (!HAS_GMCH(dev_priv))
6546 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6548 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6550 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6555 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6557 intel_dp->panel_power_off_time = ktime_get_boottime();
6558 intel_dp->last_power_on = jiffies;
6559 intel_dp->last_backlight_off = jiffies;
6563 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6565 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6566 u32 pp_on, pp_off, pp_ctl;
6567 struct pps_registers regs;
6569 intel_pps_get_registers(intel_dp, ®s);
6571 pp_ctl = ironlake_get_pp_control(intel_dp);
6573 /* Ensure PPS is unlocked */
6574 if (!HAS_DDI(dev_priv))
6575 I915_WRITE(regs.pp_ctrl, pp_ctl);
6577 pp_on = I915_READ(regs.pp_on);
6578 pp_off = I915_READ(regs.pp_off);
6580 /* Pull timing values out of registers */
6581 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6582 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6583 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6584 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6586 if (i915_mmio_reg_valid(regs.pp_div)) {
6589 pp_div = I915_READ(regs.pp_div);
6591 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6593 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6598 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6600 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6602 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6606 intel_pps_verify_state(struct intel_dp *intel_dp)
6608 struct edp_power_seq hw;
6609 struct edp_power_seq *sw = &intel_dp->pps_delays;
6611 intel_pps_readout_hw_state(intel_dp, &hw);
6613 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6614 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6615 DRM_ERROR("PPS state mismatch\n");
6616 intel_pps_dump_state("sw", sw);
6617 intel_pps_dump_state("hw", &hw);
6622 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6624 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6625 struct edp_power_seq cur, vbt, spec,
6626 *final = &intel_dp->pps_delays;
6628 lockdep_assert_held(&dev_priv->pps_mutex);
6630 /* already initialized? */
6631 if (final->t11_t12 != 0)
6634 intel_pps_readout_hw_state(intel_dp, &cur);
6636 intel_pps_dump_state("cur", &cur);
6638 vbt = dev_priv->vbt.edp.pps;
6639 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6640 * of 500ms appears to be too short. Ocassionally the panel
6641 * just fails to power back on. Increasing the delay to 800ms
6642 * seems sufficient to avoid this problem.
6644 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6645 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6646 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6649 /* T11_T12 delay is special and actually in units of 100ms, but zero
6650 * based in the hw (so we need to add 100 ms). But the sw vbt
6651 * table multiplies it with 1000 to make it in units of 100usec,
6653 vbt.t11_t12 += 100 * 10;
6655 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6656 * our hw here, which are all in 100usec. */
6657 spec.t1_t3 = 210 * 10;
6658 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6659 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6660 spec.t10 = 500 * 10;
6661 /* This one is special and actually in units of 100ms, but zero
6662 * based in the hw (so we need to add 100 ms). But the sw vbt
6663 * table multiplies it with 1000 to make it in units of 100usec,
6665 spec.t11_t12 = (510 + 100) * 10;
6667 intel_pps_dump_state("vbt", &vbt);
6669 /* Use the max of the register settings and vbt. If both are
6670 * unset, fall back to the spec limits. */
6671 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6673 max(cur.field, vbt.field))
6674 assign_final(t1_t3);
6678 assign_final(t11_t12);
6681 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6682 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6683 intel_dp->backlight_on_delay = get_delay(t8);
6684 intel_dp->backlight_off_delay = get_delay(t9);
6685 intel_dp->panel_power_down_delay = get_delay(t10);
6686 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6689 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6690 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6691 intel_dp->panel_power_cycle_delay);
6693 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6694 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6697 * We override the HW backlight delays to 1 because we do manual waits
6698 * on them. For T8, even BSpec recommends doing it. For T9, if we
6699 * don't do this, we'll end up waiting for the backlight off delay
6700 * twice: once when we do the manual sleep, and once when we disable
6701 * the panel and wait for the PP_STATUS bit to become zero.
6707 * HW has only a 100msec granularity for t11_t12 so round it up
6710 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6714 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6715 bool force_disable_vdd)
6717 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6718 u32 pp_on, pp_off, port_sel = 0;
6719 int div = dev_priv->rawclk_freq / 1000;
6720 struct pps_registers regs;
6721 enum port port = dp_to_dig_port(intel_dp)->base.port;
6722 const struct edp_power_seq *seq = &intel_dp->pps_delays;
6724 lockdep_assert_held(&dev_priv->pps_mutex);
6726 intel_pps_get_registers(intel_dp, ®s);
6729 * On some VLV machines the BIOS can leave the VDD
6730 * enabled even on power sequencers which aren't
6731 * hooked up to any port. This would mess up the
6732 * power domain tracking the first time we pick
6733 * one of these power sequencers for use since
6734 * edp_panel_vdd_on() would notice that the VDD was
6735 * already on and therefore wouldn't grab the power
6736 * domain reference. Disable VDD first to avoid this.
6737 * This also avoids spuriously turning the VDD on as
6738 * soon as the new power sequencer gets initialized.
6740 if (force_disable_vdd) {
6741 u32 pp = ironlake_get_pp_control(intel_dp);
6743 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6745 if (pp & EDP_FORCE_VDD)
6746 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6748 pp &= ~EDP_FORCE_VDD;
6750 I915_WRITE(regs.pp_ctrl, pp);
6753 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6754 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6755 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6756 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6758 /* Haswell doesn't have any port selection bits for the panel
6759 * power sequencer any more. */
6760 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6761 port_sel = PANEL_PORT_SELECT_VLV(port);
6762 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6765 port_sel = PANEL_PORT_SELECT_DPA;
6768 port_sel = PANEL_PORT_SELECT_DPC;
6771 port_sel = PANEL_PORT_SELECT_DPD;
6781 I915_WRITE(regs.pp_on, pp_on);
6782 I915_WRITE(regs.pp_off, pp_off);
6785 * Compute the divisor for the pp clock, simply match the Bspec formula.
6787 if (i915_mmio_reg_valid(regs.pp_div)) {
6788 I915_WRITE(regs.pp_div,
6789 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6790 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6794 pp_ctl = I915_READ(regs.pp_ctrl);
6795 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6796 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6797 I915_WRITE(regs.pp_ctrl, pp_ctl);
6800 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6801 I915_READ(regs.pp_on),
6802 I915_READ(regs.pp_off),
6803 i915_mmio_reg_valid(regs.pp_div) ?
6804 I915_READ(regs.pp_div) :
6805 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6808 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6810 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6812 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6813 vlv_initial_power_sequencer_setup(intel_dp);
6815 intel_dp_init_panel_power_sequencer(intel_dp);
6816 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6821 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6822 * @dev_priv: i915 device
6823 * @crtc_state: a pointer to the active intel_crtc_state
6824 * @refresh_rate: RR to be programmed
6826 * This function gets called when refresh rate (RR) has to be changed from
6827 * one frequency to another. Switches can be between high and low RR
6828 * supported by the panel or to any other RR based on media playback (in
6829 * this case, RR value needs to be passed from user space).
6831 * The caller of this function needs to take a lock on dev_priv->drrs.
6833 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6834 const struct intel_crtc_state *crtc_state,
6837 struct intel_encoder *encoder;
6838 struct intel_digital_port *dig_port = NULL;
6839 struct intel_dp *intel_dp = dev_priv->drrs.dp;
6840 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6841 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6843 if (refresh_rate <= 0) {
6844 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6848 if (intel_dp == NULL) {
6849 DRM_DEBUG_KMS("DRRS not supported.\n");
6853 dig_port = dp_to_dig_port(intel_dp);
6854 encoder = &dig_port->base;
6857 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6861 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6862 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6866 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6868 index = DRRS_LOW_RR;
6870 if (index == dev_priv->drrs.refresh_rate_type) {
6872 "DRRS requested for previously set RR...ignoring\n");
6876 if (!crtc_state->base.active) {
6877 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6881 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6884 intel_dp_set_m_n(crtc_state, M1_N1);
6887 intel_dp_set_m_n(crtc_state, M2_N2);
6891 DRM_ERROR("Unsupported refreshrate type\n");
6893 } else if (INTEL_GEN(dev_priv) > 6) {
6894 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6897 val = I915_READ(reg);
6898 if (index > DRRS_HIGH_RR) {
6899 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6900 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6902 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6904 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6905 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6907 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6909 I915_WRITE(reg, val);
6912 dev_priv->drrs.refresh_rate_type = index;
6914 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6918 * intel_edp_drrs_enable - init drrs struct if supported
6919 * @intel_dp: DP struct
6920 * @crtc_state: A pointer to the active crtc state.
6922 * Initializes frontbuffer_bits and drrs.dp
6924 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6925 const struct intel_crtc_state *crtc_state)
6927 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6929 if (!crtc_state->has_drrs) {
6930 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6934 if (dev_priv->psr.enabled) {
6935 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6939 mutex_lock(&dev_priv->drrs.mutex);
6940 if (dev_priv->drrs.dp) {
6941 DRM_DEBUG_KMS("DRRS already enabled\n");
6945 dev_priv->drrs.busy_frontbuffer_bits = 0;
6947 dev_priv->drrs.dp = intel_dp;
6950 mutex_unlock(&dev_priv->drrs.mutex);
6954 * intel_edp_drrs_disable - Disable DRRS
6955 * @intel_dp: DP struct
6956 * @old_crtc_state: Pointer to old crtc_state.
6959 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6960 const struct intel_crtc_state *old_crtc_state)
6962 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6964 if (!old_crtc_state->has_drrs)
6967 mutex_lock(&dev_priv->drrs.mutex);
6968 if (!dev_priv->drrs.dp) {
6969 mutex_unlock(&dev_priv->drrs.mutex);
6973 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6974 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6975 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6977 dev_priv->drrs.dp = NULL;
6978 mutex_unlock(&dev_priv->drrs.mutex);
6980 cancel_delayed_work_sync(&dev_priv->drrs.work);
6983 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6985 struct drm_i915_private *dev_priv =
6986 container_of(work, typeof(*dev_priv), drrs.work.work);
6987 struct intel_dp *intel_dp;
6989 mutex_lock(&dev_priv->drrs.mutex);
6991 intel_dp = dev_priv->drrs.dp;
6997 * The delayed work can race with an invalidate hence we need to
7001 if (dev_priv->drrs.busy_frontbuffer_bits)
7004 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
7005 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7007 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7008 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
7012 mutex_unlock(&dev_priv->drrs.mutex);
7016 * intel_edp_drrs_invalidate - Disable Idleness DRRS
7017 * @dev_priv: i915 device
7018 * @frontbuffer_bits: frontbuffer plane tracking bits
7020 * This function gets called everytime rendering on the given planes start.
7021 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
7023 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7025 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
7026 unsigned int frontbuffer_bits)
7028 struct drm_crtc *crtc;
7031 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7034 cancel_delayed_work(&dev_priv->drrs.work);
7036 mutex_lock(&dev_priv->drrs.mutex);
7037 if (!dev_priv->drrs.dp) {
7038 mutex_unlock(&dev_priv->drrs.mutex);
7042 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7043 pipe = to_intel_crtc(crtc)->pipe;
7045 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7046 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
7048 /* invalidate means busy screen hence upclock */
7049 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7050 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7051 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
7053 mutex_unlock(&dev_priv->drrs.mutex);
7057 * intel_edp_drrs_flush - Restart Idleness DRRS
7058 * @dev_priv: i915 device
7059 * @frontbuffer_bits: frontbuffer plane tracking bits
7061 * This function gets called every time rendering on the given planes has
7062 * completed or flip on a crtc is completed. So DRRS should be upclocked
7063 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7064 * if no other planes are dirty.
7066 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7068 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
7069 unsigned int frontbuffer_bits)
7071 struct drm_crtc *crtc;
7074 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7077 cancel_delayed_work(&dev_priv->drrs.work);
7079 mutex_lock(&dev_priv->drrs.mutex);
7080 if (!dev_priv->drrs.dp) {
7081 mutex_unlock(&dev_priv->drrs.mutex);
7085 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7086 pipe = to_intel_crtc(crtc)->pipe;
7088 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7089 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
7091 /* flush means busy screen hence upclock */
7092 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7093 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7094 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
7097 * flush also means no more activity hence schedule downclock, if all
7098 * other fbs are quiescent too
7100 if (!dev_priv->drrs.busy_frontbuffer_bits)
7101 schedule_delayed_work(&dev_priv->drrs.work,
7102 msecs_to_jiffies(1000));
7103 mutex_unlock(&dev_priv->drrs.mutex);
7107 * DOC: Display Refresh Rate Switching (DRRS)
7109 * Display Refresh Rate Switching (DRRS) is a power conservation feature
7110 * which enables swtching between low and high refresh rates,
7111 * dynamically, based on the usage scenario. This feature is applicable
7112 * for internal panels.
7114 * Indication that the panel supports DRRS is given by the panel EDID, which
7115 * would list multiple refresh rates for one resolution.
7117 * DRRS is of 2 types - static and seamless.
7118 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7119 * (may appear as a blink on screen) and is used in dock-undock scenario.
7120 * Seamless DRRS involves changing RR without any visual effect to the user
7121 * and can be used during normal system usage. This is done by programming
7122 * certain registers.
7124 * Support for static/seamless DRRS may be indicated in the VBT based on
7125 * inputs from the panel spec.
7127 * DRRS saves power by switching to low RR based on usage scenarios.
7129 * The implementation is based on frontbuffer tracking implementation. When
7130 * there is a disturbance on the screen triggered by user activity or a periodic
7131 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7132 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7135 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7136 * and intel_edp_drrs_flush() are called.
7138 * DRRS can be further extended to support other internal panels and also
7139 * the scenario of video playback wherein RR is set based on the rate
7140 * requested by userspace.
7144 * intel_dp_drrs_init - Init basic DRRS work and mutex.
7145 * @connector: eDP connector
7146 * @fixed_mode: preferred mode of panel
7148 * This function is called only once at driver load to initialize basic
7152 * Downclock mode if panel supports it, else return NULL.
7153 * DRRS support is determined by the presence of downclock mode (apart
7154 * from VBT setting).
7156 static struct drm_display_mode *
7157 intel_dp_drrs_init(struct intel_connector *connector,
7158 struct drm_display_mode *fixed_mode)
7160 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7161 struct drm_display_mode *downclock_mode = NULL;
7163 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7164 mutex_init(&dev_priv->drrs.mutex);
7166 if (INTEL_GEN(dev_priv) <= 6) {
7167 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7171 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7172 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
7176 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7177 if (!downclock_mode) {
7178 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
7182 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7184 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7185 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
7186 return downclock_mode;
7189 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7190 struct intel_connector *intel_connector)
7192 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7193 struct drm_device *dev = &dev_priv->drm;
7194 struct drm_connector *connector = &intel_connector->base;
7195 struct drm_display_mode *fixed_mode = NULL;
7196 struct drm_display_mode *downclock_mode = NULL;
7198 enum pipe pipe = INVALID_PIPE;
7199 intel_wakeref_t wakeref;
7202 if (!intel_dp_is_edp(intel_dp))
7205 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7208 * On IBX/CPT we may get here with LVDS already registered. Since the
7209 * driver uses the only internal power sequencer available for both
7210 * eDP and LVDS bail out early in this case to prevent interfering
7211 * with an already powered-on LVDS power sequencer.
7213 if (intel_get_lvds_encoder(dev_priv)) {
7214 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7215 DRM_INFO("LVDS was detected, not registering eDP\n");
7220 with_pps_lock(intel_dp, wakeref) {
7221 intel_dp_init_panel_power_timestamps(intel_dp);
7222 intel_dp_pps_init(intel_dp);
7223 intel_edp_panel_vdd_sanitize(intel_dp);
7226 /* Cache DPCD and EDID for edp. */
7227 has_dpcd = intel_edp_init_dpcd(intel_dp);
7230 /* if this fails, presume the device is a ghost */
7231 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7235 mutex_lock(&dev->mode_config.mutex);
7236 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7238 if (drm_add_edid_modes(connector, edid)) {
7239 drm_connector_update_edid_property(connector,
7243 edid = ERR_PTR(-EINVAL);
7246 edid = ERR_PTR(-ENOENT);
7248 intel_connector->edid = edid;
7250 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7252 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7254 /* fallback to VBT if available for eDP */
7256 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7257 mutex_unlock(&dev->mode_config.mutex);
7259 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7260 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7261 register_reboot_notifier(&intel_dp->edp_notifier);
7264 * Figure out the current pipe for the initial backlight setup.
7265 * If the current pipe isn't valid, try the PPS pipe, and if that
7266 * fails just assume pipe A.
7268 pipe = vlv_active_pipe(intel_dp);
7270 if (pipe != PIPE_A && pipe != PIPE_B)
7271 pipe = intel_dp->pps_pipe;
7273 if (pipe != PIPE_A && pipe != PIPE_B)
7276 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7280 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7281 intel_connector->panel.backlight.power = intel_edp_backlight_power;
7282 intel_panel_setup_backlight(connector, pipe);
7285 drm_connector_init_panel_orientation_property(
7286 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7291 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7293 * vdd might still be enabled do to the delayed vdd off.
7294 * Make sure vdd is actually turned off here.
7296 with_pps_lock(intel_dp, wakeref)
7297 edp_panel_vdd_off_sync(intel_dp);
7302 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7304 struct intel_connector *intel_connector;
7305 struct drm_connector *connector;
7307 intel_connector = container_of(work, typeof(*intel_connector),
7308 modeset_retry_work);
7309 connector = &intel_connector->base;
7310 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7313 /* Grab the locks before changing connector property*/
7314 mutex_lock(&connector->dev->mode_config.mutex);
7315 /* Set connector link status to BAD and send a Uevent to notify
7316 * userspace to do a modeset.
7318 drm_connector_set_link_status_property(connector,
7319 DRM_MODE_LINK_STATUS_BAD);
7320 mutex_unlock(&connector->dev->mode_config.mutex);
7321 /* Send Hotplug uevent so userspace can reprobe */
7322 drm_kms_helper_hotplug_event(connector->dev);
7326 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7327 struct intel_connector *intel_connector)
7329 struct drm_connector *connector = &intel_connector->base;
7330 struct intel_dp *intel_dp = &intel_dig_port->dp;
7331 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7332 struct drm_device *dev = intel_encoder->base.dev;
7333 struct drm_i915_private *dev_priv = to_i915(dev);
7334 enum port port = intel_encoder->port;
7337 /* Initialize the work for modeset in case of link train failure */
7338 INIT_WORK(&intel_connector->modeset_retry_work,
7339 intel_dp_modeset_retry_work_fn);
7341 if (WARN(intel_dig_port->max_lanes < 1,
7342 "Not enough lanes (%d) for DP on port %c\n",
7343 intel_dig_port->max_lanes, port_name(port)))
7346 intel_dp_set_source_rates(intel_dp);
7348 intel_dp->reset_link_params = true;
7349 intel_dp->pps_pipe = INVALID_PIPE;
7350 intel_dp->active_pipe = INVALID_PIPE;
7352 /* Preserve the current hw state. */
7353 intel_dp->DP = I915_READ(intel_dp->output_reg);
7354 intel_dp->attached_connector = intel_connector;
7356 if (intel_dp_is_port_edp(dev_priv, port)) {
7358 * Currently we don't support eDP on TypeC ports, although in
7359 * theory it could work on TypeC legacy ports.
7361 WARN_ON(intel_port_is_tc(dev_priv, port));
7362 type = DRM_MODE_CONNECTOR_eDP;
7364 type = DRM_MODE_CONNECTOR_DisplayPort;
7367 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7368 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7371 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7372 * for DP the encoder type can be set by the caller to
7373 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7375 if (type == DRM_MODE_CONNECTOR_eDP)
7376 intel_encoder->type = INTEL_OUTPUT_EDP;
7378 /* eDP only on port B and/or C on vlv/chv */
7379 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7380 intel_dp_is_edp(intel_dp) &&
7381 port != PORT_B && port != PORT_C))
7384 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7385 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7388 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7389 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7391 if (!HAS_GMCH(dev_priv))
7392 connector->interlace_allowed = true;
7393 connector->doublescan_allowed = 0;
7395 if (INTEL_GEN(dev_priv) >= 11)
7396 connector->ycbcr_420_allowed = true;
7398 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7400 intel_dp_aux_init(intel_dp);
7402 intel_connector_attach_encoder(intel_connector, intel_encoder);
7404 if (HAS_DDI(dev_priv))
7405 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7407 intel_connector->get_hw_state = intel_connector_get_hw_state;
7409 /* init MST on ports that can support it */
7410 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7411 (port == PORT_B || port == PORT_C ||
7412 port == PORT_D || port == PORT_F))
7413 intel_dp_mst_encoder_init(intel_dig_port,
7414 intel_connector->base.base.id);
7416 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7417 intel_dp_aux_fini(intel_dp);
7418 intel_dp_mst_encoder_cleanup(intel_dig_port);
7422 intel_dp_add_properties(intel_dp, connector);
7424 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7425 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7427 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7430 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7431 * 0xd. Failure to do so will result in spurious interrupts being
7432 * generated on the port when a cable is not attached.
7434 if (IS_G45(dev_priv)) {
7435 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7436 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7442 drm_connector_cleanup(connector);
7447 bool intel_dp_init(struct drm_i915_private *dev_priv,
7448 i915_reg_t output_reg,
7451 struct intel_digital_port *intel_dig_port;
7452 struct intel_encoder *intel_encoder;
7453 struct drm_encoder *encoder;
7454 struct intel_connector *intel_connector;
7456 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7457 if (!intel_dig_port)
7460 intel_connector = intel_connector_alloc();
7461 if (!intel_connector)
7462 goto err_connector_alloc;
7464 intel_encoder = &intel_dig_port->base;
7465 encoder = &intel_encoder->base;
7467 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7468 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7469 "DP %c", port_name(port)))
7470 goto err_encoder_init;
7472 intel_encoder->hotplug = intel_dp_hotplug;
7473 intel_encoder->compute_config = intel_dp_compute_config;
7474 intel_encoder->get_hw_state = intel_dp_get_hw_state;
7475 intel_encoder->get_config = intel_dp_get_config;
7476 intel_encoder->update_pipe = intel_panel_update_backlight;
7477 intel_encoder->suspend = intel_dp_encoder_suspend;
7478 if (IS_CHERRYVIEW(dev_priv)) {
7479 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7480 intel_encoder->pre_enable = chv_pre_enable_dp;
7481 intel_encoder->enable = vlv_enable_dp;
7482 intel_encoder->disable = vlv_disable_dp;
7483 intel_encoder->post_disable = chv_post_disable_dp;
7484 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7485 } else if (IS_VALLEYVIEW(dev_priv)) {
7486 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7487 intel_encoder->pre_enable = vlv_pre_enable_dp;
7488 intel_encoder->enable = vlv_enable_dp;
7489 intel_encoder->disable = vlv_disable_dp;
7490 intel_encoder->post_disable = vlv_post_disable_dp;
7492 intel_encoder->pre_enable = g4x_pre_enable_dp;
7493 intel_encoder->enable = g4x_enable_dp;
7494 intel_encoder->disable = g4x_disable_dp;
7495 intel_encoder->post_disable = g4x_post_disable_dp;
7498 intel_dig_port->dp.output_reg = output_reg;
7499 intel_dig_port->max_lanes = 4;
7501 intel_encoder->type = INTEL_OUTPUT_DP;
7502 intel_encoder->power_domain = intel_port_to_power_domain(port);
7503 if (IS_CHERRYVIEW(dev_priv)) {
7505 intel_encoder->crtc_mask = 1 << 2;
7507 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7509 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7511 intel_encoder->cloneable = 0;
7512 intel_encoder->port = port;
7514 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7517 intel_infoframe_init(intel_dig_port);
7519 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7520 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7521 goto err_init_connector;
7526 drm_encoder_cleanup(encoder);
7528 kfree(intel_connector);
7529 err_connector_alloc:
7530 kfree(intel_dig_port);
7534 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7536 struct intel_encoder *encoder;
7538 for_each_intel_encoder(&dev_priv->drm, encoder) {
7539 struct intel_dp *intel_dp;
7541 if (encoder->type != INTEL_OUTPUT_DDI)
7544 intel_dp = enc_to_intel_dp(&encoder->base);
7546 if (!intel_dp->can_mst)
7549 if (intel_dp->is_mst)
7550 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7554 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7556 struct intel_encoder *encoder;
7558 for_each_intel_encoder(&dev_priv->drm, encoder) {
7559 struct intel_dp *intel_dp;
7562 if (encoder->type != INTEL_OUTPUT_DDI)
7565 intel_dp = enc_to_intel_dp(&encoder->base);
7567 if (!intel_dp->can_mst)
7570 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7572 intel_dp->is_mst = false;
7573 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,