2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
101 static const int default_rates[] = { 162000, 270000, 540000 };
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
110 static bool is_edp(struct intel_dp *intel_dp)
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
117 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
121 return intel_dig_port->base.base.dev;
124 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
129 static void intel_dp_link_down(struct intel_dp *intel_dp);
130 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
131 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
132 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
133 static void vlv_steal_power_sequencer(struct drm_device *dev,
136 static unsigned int intel_dp_unused_lane_mask(int lane_count)
138 return ~((1 << lane_count) - 1) & 0xf;
142 intel_dp_max_link_bw(struct intel_dp *intel_dp)
144 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
146 switch (max_link_bw) {
147 case DP_LINK_BW_1_62:
152 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
154 max_link_bw = DP_LINK_BW_1_62;
160 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
163 struct drm_device *dev = intel_dig_port->base.base.dev;
164 u8 source_max, sink_max;
167 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
168 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
171 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
173 return min(source_max, sink_max);
177 * The units on the numbers in the next two are... bizarre. Examples will
178 * make it clearer; this one parallels an example in the eDP spec.
180 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
182 * 270000 * 1 * 8 / 10 == 216000
184 * The actual data capacity of that configuration is 2.16Gbit/s, so the
185 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
186 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
187 * 119000. At 18bpp that's 2142000 kilobits per second.
189 * Thus the strange-looking division by 10 in intel_dp_link_required, to
190 * get the result in decakilobits instead of kilobits.
194 intel_dp_link_required(int pixel_clock, int bpp)
196 return (pixel_clock * bpp + 9) / 10;
200 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
202 return (max_link_clock * max_lanes * 8) / 10;
205 static enum drm_mode_status
206 intel_dp_mode_valid(struct drm_connector *connector,
207 struct drm_display_mode *mode)
209 struct intel_dp *intel_dp = intel_attached_dp(connector);
210 struct intel_connector *intel_connector = to_intel_connector(connector);
211 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
212 int target_clock = mode->clock;
213 int max_rate, mode_rate, max_lanes, max_link_clock;
215 if (is_edp(intel_dp) && fixed_mode) {
216 if (mode->hdisplay > fixed_mode->hdisplay)
219 if (mode->vdisplay > fixed_mode->vdisplay)
222 target_clock = fixed_mode->clock;
225 max_link_clock = intel_dp_max_link_rate(intel_dp);
226 max_lanes = intel_dp_max_lane_count(intel_dp);
228 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
229 mode_rate = intel_dp_link_required(target_clock, 18);
231 if (mode_rate > max_rate)
232 return MODE_CLOCK_HIGH;
234 if (mode->clock < 10000)
235 return MODE_CLOCK_LOW;
237 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
238 return MODE_H_ILLEGAL;
243 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
250 for (i = 0; i < src_bytes; i++)
251 v |= ((uint32_t) src[i]) << ((3-i) * 8);
255 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
260 for (i = 0; i < dst_bytes; i++)
261 dst[i] = src >> ((3-i) * 8);
265 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
266 struct intel_dp *intel_dp);
268 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
269 struct intel_dp *intel_dp);
271 static void pps_lock(struct intel_dp *intel_dp)
273 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
274 struct intel_encoder *encoder = &intel_dig_port->base;
275 struct drm_device *dev = encoder->base.dev;
276 struct drm_i915_private *dev_priv = dev->dev_private;
277 enum intel_display_power_domain power_domain;
280 * See vlv_power_sequencer_reset() why we need
281 * a power domain reference here.
283 power_domain = intel_display_port_power_domain(encoder);
284 intel_display_power_get(dev_priv, power_domain);
286 mutex_lock(&dev_priv->pps_mutex);
289 static void pps_unlock(struct intel_dp *intel_dp)
291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292 struct intel_encoder *encoder = &intel_dig_port->base;
293 struct drm_device *dev = encoder->base.dev;
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 enum intel_display_power_domain power_domain;
297 mutex_unlock(&dev_priv->pps_mutex);
299 power_domain = intel_display_port_power_domain(encoder);
300 intel_display_power_put(dev_priv, power_domain);
304 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
306 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
307 struct drm_device *dev = intel_dig_port->base.base.dev;
308 struct drm_i915_private *dev_priv = dev->dev_private;
309 enum pipe pipe = intel_dp->pps_pipe;
310 bool pll_enabled, release_cl_override = false;
311 enum dpio_phy phy = DPIO_PHY(pipe);
312 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
315 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
316 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
317 pipe_name(pipe), port_name(intel_dig_port->port)))
320 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
321 pipe_name(pipe), port_name(intel_dig_port->port));
323 /* Preserve the BIOS-computed detected bit. This is
324 * supposed to be read-only.
326 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
327 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
328 DP |= DP_PORT_WIDTH(1);
329 DP |= DP_LINK_TRAIN_PAT_1;
331 if (IS_CHERRYVIEW(dev))
332 DP |= DP_PIPE_SELECT_CHV(pipe);
333 else if (pipe == PIPE_B)
334 DP |= DP_PIPEB_SELECT;
336 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
339 * The DPLL for the pipe must be enabled for this to work.
340 * So enable temporarily it if it's not already enabled.
343 release_cl_override = IS_CHERRYVIEW(dev) &&
344 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
346 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
347 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
351 * Similar magic as in intel_dp_enable_port().
352 * We _must_ do this port enable + disable trick
353 * to make this power seqeuencer lock onto the port.
354 * Otherwise even VDD force bit won't work.
356 I915_WRITE(intel_dp->output_reg, DP);
357 POSTING_READ(intel_dp->output_reg);
359 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
362 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
363 POSTING_READ(intel_dp->output_reg);
366 vlv_force_pll_off(dev, pipe);
368 if (release_cl_override)
369 chv_phy_powergate_ch(dev_priv, phy, ch, false);
374 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
376 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
377 struct drm_device *dev = intel_dig_port->base.base.dev;
378 struct drm_i915_private *dev_priv = dev->dev_private;
379 struct intel_encoder *encoder;
380 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
383 lockdep_assert_held(&dev_priv->pps_mutex);
385 /* We should never land here with regular DP ports */
386 WARN_ON(!is_edp(intel_dp));
388 if (intel_dp->pps_pipe != INVALID_PIPE)
389 return intel_dp->pps_pipe;
392 * We don't have power sequencer currently.
393 * Pick one that's not used by other ports.
395 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
397 struct intel_dp *tmp;
399 if (encoder->type != INTEL_OUTPUT_EDP)
402 tmp = enc_to_intel_dp(&encoder->base);
404 if (tmp->pps_pipe != INVALID_PIPE)
405 pipes &= ~(1 << tmp->pps_pipe);
409 * Didn't find one. This should not happen since there
410 * are two power sequencers and up to two eDP ports.
412 if (WARN_ON(pipes == 0))
415 pipe = ffs(pipes) - 1;
417 vlv_steal_power_sequencer(dev, pipe);
418 intel_dp->pps_pipe = pipe;
420 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
421 pipe_name(intel_dp->pps_pipe),
422 port_name(intel_dig_port->port));
424 /* init power sequencer on this pipe and port */
425 intel_dp_init_panel_power_sequencer(dev, intel_dp);
426 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
429 * Even vdd force doesn't work until we've made
430 * the power sequencer lock in on the port.
432 vlv_power_sequencer_kick(intel_dp);
434 return intel_dp->pps_pipe;
437 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
440 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
443 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
446 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
449 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
452 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
459 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
461 vlv_pipe_check pipe_check)
465 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
466 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
467 PANEL_PORT_SELECT_MASK;
469 if (port_sel != PANEL_PORT_SELECT_VLV(port))
472 if (!pipe_check(dev_priv, pipe))
482 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
484 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
485 struct drm_device *dev = intel_dig_port->base.base.dev;
486 struct drm_i915_private *dev_priv = dev->dev_private;
487 enum port port = intel_dig_port->port;
489 lockdep_assert_held(&dev_priv->pps_mutex);
491 /* try to find a pipe with this port selected */
492 /* first pick one where the panel is on */
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 /* didn't find one? pick one where vdd is on */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498 vlv_pipe_has_vdd_on);
499 /* didn't find one? pick one with just the correct port */
500 if (intel_dp->pps_pipe == INVALID_PIPE)
501 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
504 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
505 if (intel_dp->pps_pipe == INVALID_PIPE) {
506 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
511 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
512 port_name(port), pipe_name(intel_dp->pps_pipe));
514 intel_dp_init_panel_power_sequencer(dev, intel_dp);
515 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
518 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
520 struct drm_device *dev = dev_priv->dev;
521 struct intel_encoder *encoder;
523 if (WARN_ON(!IS_VALLEYVIEW(dev)))
527 * We can't grab pps_mutex here due to deadlock with power_domain
528 * mutex when power_domain functions are called while holding pps_mutex.
529 * That also means that in order to use pps_pipe the code needs to
530 * hold both a power domain reference and pps_mutex, and the power domain
531 * reference get/put must be done while _not_ holding pps_mutex.
532 * pps_{lock,unlock}() do these steps in the correct order, so one
533 * should use them always.
536 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
537 struct intel_dp *intel_dp;
539 if (encoder->type != INTEL_OUTPUT_EDP)
542 intel_dp = enc_to_intel_dp(&encoder->base);
543 intel_dp->pps_pipe = INVALID_PIPE;
547 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
549 struct drm_device *dev = intel_dp_to_dev(intel_dp);
552 return BXT_PP_CONTROL(0);
553 else if (HAS_PCH_SPLIT(dev))
554 return PCH_PP_CONTROL;
556 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
559 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
561 struct drm_device *dev = intel_dp_to_dev(intel_dp);
564 return BXT_PP_STATUS(0);
565 else if (HAS_PCH_SPLIT(dev))
566 return PCH_PP_STATUS;
568 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
571 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
572 This function only applicable when panel PM state is not to be tracked */
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
576 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
579 struct drm_i915_private *dev_priv = dev->dev_private;
581 u32 pp_ctrl_reg, pp_div_reg;
583 if (!is_edp(intel_dp) || code != SYS_RESTART)
588 if (IS_VALLEYVIEW(dev)) {
589 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
591 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
592 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
593 pp_div = I915_READ(pp_div_reg);
594 pp_div &= PP_REFERENCE_DIVIDER_MASK;
596 /* 0x1F write to PP_DIV_REG sets max cycle delay */
597 I915_WRITE(pp_div_reg, pp_div | 0x1F);
598 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
599 msleep(intel_dp->panel_power_cycle_delay);
602 pps_unlock(intel_dp);
607 static bool edp_have_panel_power(struct intel_dp *intel_dp)
609 struct drm_device *dev = intel_dp_to_dev(intel_dp);
610 struct drm_i915_private *dev_priv = dev->dev_private;
612 lockdep_assert_held(&dev_priv->pps_mutex);
614 if (IS_VALLEYVIEW(dev) &&
615 intel_dp->pps_pipe == INVALID_PIPE)
618 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
621 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
623 struct drm_device *dev = intel_dp_to_dev(intel_dp);
624 struct drm_i915_private *dev_priv = dev->dev_private;
626 lockdep_assert_held(&dev_priv->pps_mutex);
628 if (IS_VALLEYVIEW(dev) &&
629 intel_dp->pps_pipe == INVALID_PIPE)
632 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
636 intel_dp_check_edp(struct intel_dp *intel_dp)
638 struct drm_device *dev = intel_dp_to_dev(intel_dp);
639 struct drm_i915_private *dev_priv = dev->dev_private;
641 if (!is_edp(intel_dp))
644 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
645 WARN(1, "eDP powered off while attempting aux channel communication.\n");
646 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
647 I915_READ(_pp_stat_reg(intel_dp)),
648 I915_READ(_pp_ctrl_reg(intel_dp)));
653 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
655 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
656 struct drm_device *dev = intel_dig_port->base.base.dev;
657 struct drm_i915_private *dev_priv = dev->dev_private;
658 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
662 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
664 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
665 msecs_to_jiffies_timeout(10));
667 done = wait_for_atomic(C, 10) == 0;
669 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
676 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
678 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
679 struct drm_device *dev = intel_dig_port->base.base.dev;
682 * The clock divider is based off the hrawclk, and would like to run at
683 * 2MHz. So, take the hrawclk value and divide by 2 and use that
685 return index ? 0 : intel_hrawclk(dev) / 2;
688 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
690 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
691 struct drm_device *dev = intel_dig_port->base.base.dev;
692 struct drm_i915_private *dev_priv = dev->dev_private;
697 if (intel_dig_port->port == PORT_A) {
698 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
701 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
705 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
708 struct drm_device *dev = intel_dig_port->base.base.dev;
709 struct drm_i915_private *dev_priv = dev->dev_private;
711 if (intel_dig_port->port == PORT_A) {
714 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
715 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
716 /* Workaround for non-ULT HSW */
723 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
727 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
729 return index ? 0 : 100;
732 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
735 * SKL doesn't need us to program the AUX clock divider (Hardware will
736 * derive the clock from CDCLK automatically). We still implement the
737 * get_aux_clock_divider vfunc to plug-in into the existing code.
739 return index ? 0 : 1;
742 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
745 uint32_t aux_clock_divider)
747 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
748 struct drm_device *dev = intel_dig_port->base.base.dev;
749 uint32_t precharge, timeout;
756 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
757 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
759 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
761 return DP_AUX_CH_CTL_SEND_BUSY |
763 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
764 DP_AUX_CH_CTL_TIME_OUT_ERROR |
766 DP_AUX_CH_CTL_RECEIVE_ERROR |
767 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
768 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
769 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
772 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
777 return DP_AUX_CH_CTL_SEND_BUSY |
779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
781 DP_AUX_CH_CTL_TIME_OUT_1600us |
782 DP_AUX_CH_CTL_RECEIVE_ERROR |
783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
788 intel_dp_aux_ch(struct intel_dp *intel_dp,
789 const uint8_t *send, int send_bytes,
790 uint8_t *recv, int recv_size)
792 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
793 struct drm_device *dev = intel_dig_port->base.base.dev;
794 struct drm_i915_private *dev_priv = dev->dev_private;
795 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
796 uint32_t ch_data = ch_ctl + 4;
797 uint32_t aux_clock_divider;
798 int i, ret, recv_bytes;
801 bool has_aux_irq = HAS_AUX_IRQ(dev);
807 * We will be called with VDD already enabled for dpcd/edid/oui reads.
808 * In such cases we want to leave VDD enabled and it's up to upper layers
809 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
812 vdd = edp_panel_vdd_on(intel_dp);
814 /* dp aux is extremely sensitive to irq latency, hence request the
815 * lowest possible wakeup latency and so prevent the cpu from going into
818 pm_qos_update_request(&dev_priv->pm_qos, 0);
820 intel_dp_check_edp(intel_dp);
822 intel_aux_display_runtime_get(dev_priv);
824 /* Try to wait for any previous AUX channel activity */
825 for (try = 0; try < 3; try++) {
826 status = I915_READ_NOTRACE(ch_ctl);
827 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833 static u32 last_status = -1;
834 const u32 status = I915_READ(ch_ctl);
836 if (status != last_status) {
837 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839 last_status = status;
846 /* Only 5 data registers! */
847 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
852 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
853 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
858 /* Must try at least 3 times according to DP spec */
859 for (try = 0; try < 5; try++) {
860 /* Load the send data into the aux channel data registers */
861 for (i = 0; i < send_bytes; i += 4)
862 I915_WRITE(ch_data + i,
863 intel_dp_pack_aux(send + i,
866 /* Send the command and wait for it to complete */
867 I915_WRITE(ch_ctl, send_ctl);
869 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871 /* Clear done status and any errors */
875 DP_AUX_CH_CTL_TIME_OUT_ERROR |
876 DP_AUX_CH_CTL_RECEIVE_ERROR);
878 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
881 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
882 * 400us delay required for errors and timeouts
883 * Timeout errors from the HW already meet this
884 * requirement so skip to next iteration
886 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
887 usleep_range(400, 500);
890 if (status & DP_AUX_CH_CTL_DONE)
895 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
896 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
902 /* Check for timeout or receive error.
903 * Timeouts occur when the sink is not connected
905 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
906 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
911 /* Timeouts occur when the device isn't connected, so they're
912 * "normal" -- don't fill the kernel log with these */
913 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
914 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
919 /* Unload any bytes sent back from the other side */
920 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
921 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
922 if (recv_bytes > recv_size)
923 recv_bytes = recv_size;
925 for (i = 0; i < recv_bytes; i += 4)
926 intel_dp_unpack_aux(I915_READ(ch_data + i),
927 recv + i, recv_bytes - i);
931 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
932 intel_aux_display_runtime_put(dev_priv);
935 edp_panel_vdd_off(intel_dp, false);
937 pps_unlock(intel_dp);
942 #define BARE_ADDRESS_SIZE 3
943 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
945 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
947 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
948 uint8_t txbuf[20], rxbuf[20];
949 size_t txsize, rxsize;
952 txbuf[0] = (msg->request << 4) |
953 ((msg->address >> 16) & 0xf);
954 txbuf[1] = (msg->address >> 8) & 0xff;
955 txbuf[2] = msg->address & 0xff;
956 txbuf[3] = msg->size - 1;
958 switch (msg->request & ~DP_AUX_I2C_MOT) {
959 case DP_AUX_NATIVE_WRITE:
960 case DP_AUX_I2C_WRITE:
961 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
962 rxsize = 2; /* 0 or 1 data bytes */
964 if (WARN_ON(txsize > 20))
967 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
969 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
971 msg->reply = rxbuf[0] >> 4;
974 /* Number of bytes written in a short write. */
975 ret = clamp_t(int, rxbuf[1], 0, msg->size);
977 /* Return payload size. */
983 case DP_AUX_NATIVE_READ:
984 case DP_AUX_I2C_READ:
985 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
986 rxsize = msg->size + 1;
988 if (WARN_ON(rxsize > 20))
991 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
993 msg->reply = rxbuf[0] >> 4;
995 * Assume happy day, and copy the data. The caller is
996 * expected to check msg->reply before touching it.
998 * Return payload size.
1001 memcpy(msg->buffer, rxbuf + 1, ret);
1014 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1016 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1017 struct drm_i915_private *dev_priv = dev->dev_private;
1018 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1019 enum port port = intel_dig_port->port;
1020 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1021 const char *name = NULL;
1022 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1025 /* On SKL we don't have Aux for port E so we rely on VBT to set
1026 * a proper alternate aux channel.
1028 if (IS_SKYLAKE(dev) && port == PORT_E) {
1029 switch (info->alternate_aux_channel) {
1031 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1034 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1037 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1041 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1047 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1051 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1055 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1059 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1063 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1071 * The AUX_CTL register is usually DP_CTL + 0x10.
1073 * On Haswell and Broadwell though:
1074 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1075 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1077 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1079 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1080 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1082 intel_dp->aux.name = name;
1083 intel_dp->aux.dev = dev->dev;
1084 intel_dp->aux.transfer = intel_dp_aux_transfer;
1086 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1087 connector->base.kdev->kobj.name);
1089 ret = drm_dp_aux_register(&intel_dp->aux);
1091 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1096 ret = sysfs_create_link(&connector->base.kdev->kobj,
1097 &intel_dp->aux.ddc.dev.kobj,
1098 intel_dp->aux.ddc.dev.kobj.name);
1100 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1101 drm_dp_aux_unregister(&intel_dp->aux);
1106 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1108 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1110 if (!intel_connector->mst_port)
1111 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1112 intel_dp->aux.ddc.dev.kobj.name);
1113 intel_connector_unregister(intel_connector);
1117 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1121 memset(&pipe_config->dpll_hw_state, 0,
1122 sizeof(pipe_config->dpll_hw_state));
1124 pipe_config->ddi_pll_sel = SKL_DPLL0;
1125 pipe_config->dpll_hw_state.cfgcr1 = 0;
1126 pipe_config->dpll_hw_state.cfgcr2 = 0;
1128 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1129 switch (pipe_config->port_clock / 2) {
1131 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1135 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1139 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1143 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1146 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1147 results in CDCLK change. Need to handle the change of CDCLK by
1148 disabling pipes and re-enabling them */
1150 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1154 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1159 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1163 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1165 memset(&pipe_config->dpll_hw_state, 0,
1166 sizeof(pipe_config->dpll_hw_state));
1168 switch (pipe_config->port_clock / 2) {
1170 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1173 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1176 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1182 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1184 if (intel_dp->num_sink_rates) {
1185 *sink_rates = intel_dp->sink_rates;
1186 return intel_dp->num_sink_rates;
1189 *sink_rates = default_rates;
1191 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1195 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1197 if (IS_BROXTON(dev)) {
1198 *source_rates = bxt_rates;
1199 return ARRAY_SIZE(bxt_rates);
1200 } else if (IS_SKYLAKE(dev)) {
1201 *source_rates = skl_rates;
1202 return ARRAY_SIZE(skl_rates);
1203 } else if (IS_CHERRYVIEW(dev)) {
1204 *source_rates = chv_rates;
1205 return ARRAY_SIZE(chv_rates);
1208 *source_rates = default_rates;
1210 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1211 /* WaDisableHBR2:skl */
1212 return (DP_LINK_BW_2_7 >> 3) + 1;
1213 else if (INTEL_INFO(dev)->gen >= 8 ||
1214 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1215 return (DP_LINK_BW_5_4 >> 3) + 1;
1217 return (DP_LINK_BW_2_7 >> 3) + 1;
1221 intel_dp_set_clock(struct intel_encoder *encoder,
1222 struct intel_crtc_state *pipe_config)
1224 struct drm_device *dev = encoder->base.dev;
1225 const struct dp_link_dpll *divisor = NULL;
1229 divisor = gen4_dpll;
1230 count = ARRAY_SIZE(gen4_dpll);
1231 } else if (HAS_PCH_SPLIT(dev)) {
1233 count = ARRAY_SIZE(pch_dpll);
1234 } else if (IS_CHERRYVIEW(dev)) {
1236 count = ARRAY_SIZE(chv_dpll);
1237 } else if (IS_VALLEYVIEW(dev)) {
1239 count = ARRAY_SIZE(vlv_dpll);
1242 if (divisor && count) {
1243 for (i = 0; i < count; i++) {
1244 if (pipe_config->port_clock == divisor[i].clock) {
1245 pipe_config->dpll = divisor[i].dpll;
1246 pipe_config->clock_set = true;
1253 static int intersect_rates(const int *source_rates, int source_len,
1254 const int *sink_rates, int sink_len,
1257 int i = 0, j = 0, k = 0;
1259 while (i < source_len && j < sink_len) {
1260 if (source_rates[i] == sink_rates[j]) {
1261 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1263 common_rates[k] = source_rates[i];
1267 } else if (source_rates[i] < sink_rates[j]) {
1276 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1280 const int *source_rates, *sink_rates;
1281 int source_len, sink_len;
1283 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1284 source_len = intel_dp_source_rates(dev, &source_rates);
1286 return intersect_rates(source_rates, source_len,
1287 sink_rates, sink_len,
1291 static void snprintf_int_array(char *str, size_t len,
1292 const int *array, int nelem)
1298 for (i = 0; i < nelem; i++) {
1299 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1307 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1309 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1310 const int *source_rates, *sink_rates;
1311 int source_len, sink_len, common_len;
1312 int common_rates[DP_MAX_SUPPORTED_RATES];
1313 char str[128]; /* FIXME: too big for stack? */
1315 if ((drm_debug & DRM_UT_KMS) == 0)
1318 source_len = intel_dp_source_rates(dev, &source_rates);
1319 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1320 DRM_DEBUG_KMS("source rates: %s\n", str);
1322 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1323 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1324 DRM_DEBUG_KMS("sink rates: %s\n", str);
1326 common_len = intel_dp_common_rates(intel_dp, common_rates);
1327 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1328 DRM_DEBUG_KMS("common rates: %s\n", str);
1331 static int rate_to_index(int find, const int *rates)
1335 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1336 if (find == rates[i])
1343 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1345 int rates[DP_MAX_SUPPORTED_RATES] = {};
1348 len = intel_dp_common_rates(intel_dp, rates);
1349 if (WARN_ON(len <= 0))
1352 return rates[rate_to_index(0, rates) - 1];
1355 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1357 return rate_to_index(rate, intel_dp->sink_rates);
1360 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1361 uint8_t *link_bw, uint8_t *rate_select)
1363 if (intel_dp->num_sink_rates) {
1366 intel_dp_rate_select(intel_dp, port_clock);
1368 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1374 intel_dp_compute_config(struct intel_encoder *encoder,
1375 struct intel_crtc_state *pipe_config)
1377 struct drm_device *dev = encoder->base.dev;
1378 struct drm_i915_private *dev_priv = dev->dev_private;
1379 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1380 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1381 enum port port = dp_to_dig_port(intel_dp)->port;
1382 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1383 struct intel_connector *intel_connector = intel_dp->attached_connector;
1384 int lane_count, clock;
1385 int min_lane_count = 1;
1386 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1387 /* Conveniently, the link BW constants become indices with a shift...*/
1391 int link_avail, link_clock;
1392 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1394 uint8_t link_bw, rate_select;
1396 common_len = intel_dp_common_rates(intel_dp, common_rates);
1398 /* No common link rates between source and sink */
1399 WARN_ON(common_len <= 0);
1401 max_clock = common_len - 1;
1403 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1404 pipe_config->has_pch_encoder = true;
1406 pipe_config->has_dp_encoder = true;
1407 pipe_config->has_drrs = false;
1408 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1410 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1411 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1414 if (INTEL_INFO(dev)->gen >= 9) {
1416 ret = skl_update_scaler_crtc(pipe_config);
1421 if (!HAS_PCH_SPLIT(dev))
1422 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1423 intel_connector->panel.fitting_mode);
1425 intel_pch_panel_fitting(intel_crtc, pipe_config,
1426 intel_connector->panel.fitting_mode);
1429 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1432 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1433 "max bw %d pixel clock %iKHz\n",
1434 max_lane_count, common_rates[max_clock],
1435 adjusted_mode->crtc_clock);
1437 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1438 * bpc in between. */
1439 bpp = pipe_config->pipe_bpp;
1440 if (is_edp(intel_dp)) {
1442 /* Get bpp from vbt only for panels that dont have bpp in edid */
1443 if (intel_connector->base.display_info.bpc == 0 &&
1444 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1445 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1446 dev_priv->vbt.edp_bpp);
1447 bpp = dev_priv->vbt.edp_bpp;
1451 * Use the maximum clock and number of lanes the eDP panel
1452 * advertizes being capable of. The panels are generally
1453 * designed to support only a single clock and lane
1454 * configuration, and typically these values correspond to the
1455 * native resolution of the panel.
1457 min_lane_count = max_lane_count;
1458 min_clock = max_clock;
1461 for (; bpp >= 6*3; bpp -= 2*3) {
1462 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1465 for (clock = min_clock; clock <= max_clock; clock++) {
1466 for (lane_count = min_lane_count;
1467 lane_count <= max_lane_count;
1470 link_clock = common_rates[clock];
1471 link_avail = intel_dp_max_data_rate(link_clock,
1474 if (mode_rate <= link_avail) {
1484 if (intel_dp->color_range_auto) {
1487 * CEA-861-E - 5.1 Default Encoding Parameters
1488 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1490 pipe_config->limited_color_range =
1491 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1493 pipe_config->limited_color_range =
1494 intel_dp->limited_color_range;
1497 pipe_config->lane_count = lane_count;
1499 pipe_config->pipe_bpp = bpp;
1500 pipe_config->port_clock = common_rates[clock];
1502 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1503 &link_bw, &rate_select);
1505 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1506 link_bw, rate_select, pipe_config->lane_count,
1507 pipe_config->port_clock, bpp);
1508 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1509 mode_rate, link_avail);
1511 intel_link_compute_m_n(bpp, lane_count,
1512 adjusted_mode->crtc_clock,
1513 pipe_config->port_clock,
1514 &pipe_config->dp_m_n);
1516 if (intel_connector->panel.downclock_mode != NULL &&
1517 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1518 pipe_config->has_drrs = true;
1519 intel_link_compute_m_n(bpp, lane_count,
1520 intel_connector->panel.downclock_mode->clock,
1521 pipe_config->port_clock,
1522 &pipe_config->dp_m2_n2);
1525 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1526 skl_edp_set_pll_config(pipe_config);
1527 else if (IS_BROXTON(dev))
1528 /* handled in ddi */;
1529 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1530 hsw_dp_set_ddi_pll_sel(pipe_config);
1532 intel_dp_set_clock(encoder, pipe_config);
1537 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1539 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1540 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1541 struct drm_device *dev = crtc->base.dev;
1542 struct drm_i915_private *dev_priv = dev->dev_private;
1545 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1546 crtc->config->port_clock);
1547 dpa_ctl = I915_READ(DP_A);
1548 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1550 if (crtc->config->port_clock == 162000) {
1551 /* For a long time we've carried around a ILK-DevA w/a for the
1552 * 160MHz clock. If we're really unlucky, it's still required.
1554 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1555 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1556 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1558 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1559 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1562 I915_WRITE(DP_A, dpa_ctl);
1568 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1569 const struct intel_crtc_state *pipe_config)
1571 intel_dp->link_rate = pipe_config->port_clock;
1572 intel_dp->lane_count = pipe_config->lane_count;
1575 static void intel_dp_prepare(struct intel_encoder *encoder)
1577 struct drm_device *dev = encoder->base.dev;
1578 struct drm_i915_private *dev_priv = dev->dev_private;
1579 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1580 enum port port = dp_to_dig_port(intel_dp)->port;
1581 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1582 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1584 intel_dp_set_link_params(intel_dp, crtc->config);
1587 * There are four kinds of DP registers:
1594 * IBX PCH and CPU are the same for almost everything,
1595 * except that the CPU DP PLL is configured in this
1598 * CPT PCH is quite different, having many bits moved
1599 * to the TRANS_DP_CTL register instead. That
1600 * configuration happens (oddly) in ironlake_pch_enable
1603 /* Preserve the BIOS-computed detected bit. This is
1604 * supposed to be read-only.
1606 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1608 /* Handle DP bits in common between all three register formats */
1609 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1610 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1612 if (crtc->config->has_audio)
1613 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1615 /* Split out the IBX/CPU vs CPT settings */
1617 if (IS_GEN7(dev) && port == PORT_A) {
1618 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1619 intel_dp->DP |= DP_SYNC_HS_HIGH;
1620 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1621 intel_dp->DP |= DP_SYNC_VS_HIGH;
1622 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1624 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1625 intel_dp->DP |= DP_ENHANCED_FRAMING;
1627 intel_dp->DP |= crtc->pipe << 29;
1628 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1631 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1633 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1634 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1635 trans_dp |= TRANS_DP_ENH_FRAMING;
1637 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1638 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1640 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1641 crtc->config->limited_color_range)
1642 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1644 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1645 intel_dp->DP |= DP_SYNC_HS_HIGH;
1646 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1647 intel_dp->DP |= DP_SYNC_VS_HIGH;
1648 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1650 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1651 intel_dp->DP |= DP_ENHANCED_FRAMING;
1653 if (IS_CHERRYVIEW(dev))
1654 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1655 else if (crtc->pipe == PIPE_B)
1656 intel_dp->DP |= DP_PIPEB_SELECT;
1660 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1661 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1663 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1664 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1666 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1667 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1669 static void wait_panel_status(struct intel_dp *intel_dp,
1673 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1674 struct drm_i915_private *dev_priv = dev->dev_private;
1675 u32 pp_stat_reg, pp_ctrl_reg;
1677 lockdep_assert_held(&dev_priv->pps_mutex);
1679 pp_stat_reg = _pp_stat_reg(intel_dp);
1680 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1682 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1684 I915_READ(pp_stat_reg),
1685 I915_READ(pp_ctrl_reg));
1687 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1688 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1689 I915_READ(pp_stat_reg),
1690 I915_READ(pp_ctrl_reg));
1693 DRM_DEBUG_KMS("Wait complete\n");
1696 static void wait_panel_on(struct intel_dp *intel_dp)
1698 DRM_DEBUG_KMS("Wait for panel power on\n");
1699 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1702 static void wait_panel_off(struct intel_dp *intel_dp)
1704 DRM_DEBUG_KMS("Wait for panel power off time\n");
1705 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1708 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1710 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1712 /* When we disable the VDD override bit last we have to do the manual
1714 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1715 intel_dp->panel_power_cycle_delay);
1717 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1720 static void wait_backlight_on(struct intel_dp *intel_dp)
1722 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1723 intel_dp->backlight_on_delay);
1726 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1728 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1729 intel_dp->backlight_off_delay);
1732 /* Read the current pp_control value, unlocking the register if it
1736 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1738 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1739 struct drm_i915_private *dev_priv = dev->dev_private;
1742 lockdep_assert_held(&dev_priv->pps_mutex);
1744 control = I915_READ(_pp_ctrl_reg(intel_dp));
1745 if (!IS_BROXTON(dev)) {
1746 control &= ~PANEL_UNLOCK_MASK;
1747 control |= PANEL_UNLOCK_REGS;
1753 * Must be paired with edp_panel_vdd_off().
1754 * Must hold pps_mutex around the whole on/off sequence.
1755 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1757 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1759 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1760 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1761 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1762 struct drm_i915_private *dev_priv = dev->dev_private;
1763 enum intel_display_power_domain power_domain;
1765 u32 pp_stat_reg, pp_ctrl_reg;
1766 bool need_to_disable = !intel_dp->want_panel_vdd;
1768 lockdep_assert_held(&dev_priv->pps_mutex);
1770 if (!is_edp(intel_dp))
1773 cancel_delayed_work(&intel_dp->panel_vdd_work);
1774 intel_dp->want_panel_vdd = true;
1776 if (edp_have_panel_vdd(intel_dp))
1777 return need_to_disable;
1779 power_domain = intel_display_port_power_domain(intel_encoder);
1780 intel_display_power_get(dev_priv, power_domain);
1782 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1783 port_name(intel_dig_port->port));
1785 if (!edp_have_panel_power(intel_dp))
1786 wait_panel_power_cycle(intel_dp);
1788 pp = ironlake_get_pp_control(intel_dp);
1789 pp |= EDP_FORCE_VDD;
1791 pp_stat_reg = _pp_stat_reg(intel_dp);
1792 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1794 I915_WRITE(pp_ctrl_reg, pp);
1795 POSTING_READ(pp_ctrl_reg);
1796 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1797 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1799 * If the panel wasn't on, delay before accessing aux channel
1801 if (!edp_have_panel_power(intel_dp)) {
1802 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1803 port_name(intel_dig_port->port));
1804 msleep(intel_dp->panel_power_up_delay);
1807 return need_to_disable;
1811 * Must be paired with intel_edp_panel_vdd_off() or
1812 * intel_edp_panel_off().
1813 * Nested calls to these functions are not allowed since
1814 * we drop the lock. Caller must use some higher level
1815 * locking to prevent nested calls from other threads.
1817 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1821 if (!is_edp(intel_dp))
1825 vdd = edp_panel_vdd_on(intel_dp);
1826 pps_unlock(intel_dp);
1828 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1829 port_name(dp_to_dig_port(intel_dp)->port));
1832 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1834 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1835 struct drm_i915_private *dev_priv = dev->dev_private;
1836 struct intel_digital_port *intel_dig_port =
1837 dp_to_dig_port(intel_dp);
1838 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1839 enum intel_display_power_domain power_domain;
1841 u32 pp_stat_reg, pp_ctrl_reg;
1843 lockdep_assert_held(&dev_priv->pps_mutex);
1845 WARN_ON(intel_dp->want_panel_vdd);
1847 if (!edp_have_panel_vdd(intel_dp))
1850 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1851 port_name(intel_dig_port->port));
1853 pp = ironlake_get_pp_control(intel_dp);
1854 pp &= ~EDP_FORCE_VDD;
1856 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1857 pp_stat_reg = _pp_stat_reg(intel_dp);
1859 I915_WRITE(pp_ctrl_reg, pp);
1860 POSTING_READ(pp_ctrl_reg);
1862 /* Make sure sequencer is idle before allowing subsequent activity */
1863 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1864 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1866 if ((pp & POWER_TARGET_ON) == 0)
1867 intel_dp->last_power_cycle = jiffies;
1869 power_domain = intel_display_port_power_domain(intel_encoder);
1870 intel_display_power_put(dev_priv, power_domain);
1873 static void edp_panel_vdd_work(struct work_struct *__work)
1875 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1876 struct intel_dp, panel_vdd_work);
1879 if (!intel_dp->want_panel_vdd)
1880 edp_panel_vdd_off_sync(intel_dp);
1881 pps_unlock(intel_dp);
1884 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1886 unsigned long delay;
1889 * Queue the timer to fire a long time from now (relative to the power
1890 * down delay) to keep the panel power up across a sequence of
1893 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1894 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1898 * Must be paired with edp_panel_vdd_on().
1899 * Must hold pps_mutex around the whole on/off sequence.
1900 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1902 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1904 struct drm_i915_private *dev_priv =
1905 intel_dp_to_dev(intel_dp)->dev_private;
1907 lockdep_assert_held(&dev_priv->pps_mutex);
1909 if (!is_edp(intel_dp))
1912 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1913 port_name(dp_to_dig_port(intel_dp)->port));
1915 intel_dp->want_panel_vdd = false;
1918 edp_panel_vdd_off_sync(intel_dp);
1920 edp_panel_vdd_schedule_off(intel_dp);
1923 static void edp_panel_on(struct intel_dp *intel_dp)
1925 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1926 struct drm_i915_private *dev_priv = dev->dev_private;
1930 lockdep_assert_held(&dev_priv->pps_mutex);
1932 if (!is_edp(intel_dp))
1935 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1936 port_name(dp_to_dig_port(intel_dp)->port));
1938 if (WARN(edp_have_panel_power(intel_dp),
1939 "eDP port %c panel power already on\n",
1940 port_name(dp_to_dig_port(intel_dp)->port)))
1943 wait_panel_power_cycle(intel_dp);
1945 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1946 pp = ironlake_get_pp_control(intel_dp);
1948 /* ILK workaround: disable reset around power sequence */
1949 pp &= ~PANEL_POWER_RESET;
1950 I915_WRITE(pp_ctrl_reg, pp);
1951 POSTING_READ(pp_ctrl_reg);
1954 pp |= POWER_TARGET_ON;
1956 pp |= PANEL_POWER_RESET;
1958 I915_WRITE(pp_ctrl_reg, pp);
1959 POSTING_READ(pp_ctrl_reg);
1961 wait_panel_on(intel_dp);
1962 intel_dp->last_power_on = jiffies;
1965 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1966 I915_WRITE(pp_ctrl_reg, pp);
1967 POSTING_READ(pp_ctrl_reg);
1971 void intel_edp_panel_on(struct intel_dp *intel_dp)
1973 if (!is_edp(intel_dp))
1977 edp_panel_on(intel_dp);
1978 pps_unlock(intel_dp);
1982 static void edp_panel_off(struct intel_dp *intel_dp)
1984 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1985 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1986 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1987 struct drm_i915_private *dev_priv = dev->dev_private;
1988 enum intel_display_power_domain power_domain;
1992 lockdep_assert_held(&dev_priv->pps_mutex);
1994 if (!is_edp(intel_dp))
1997 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1998 port_name(dp_to_dig_port(intel_dp)->port));
2000 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2001 port_name(dp_to_dig_port(intel_dp)->port));
2003 pp = ironlake_get_pp_control(intel_dp);
2004 /* We need to switch off panel power _and_ force vdd, for otherwise some
2005 * panels get very unhappy and cease to work. */
2006 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2009 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2011 intel_dp->want_panel_vdd = false;
2013 I915_WRITE(pp_ctrl_reg, pp);
2014 POSTING_READ(pp_ctrl_reg);
2016 intel_dp->last_power_cycle = jiffies;
2017 wait_panel_off(intel_dp);
2019 /* We got a reference when we enabled the VDD. */
2020 power_domain = intel_display_port_power_domain(intel_encoder);
2021 intel_display_power_put(dev_priv, power_domain);
2024 void intel_edp_panel_off(struct intel_dp *intel_dp)
2026 if (!is_edp(intel_dp))
2030 edp_panel_off(intel_dp);
2031 pps_unlock(intel_dp);
2034 /* Enable backlight in the panel power control. */
2035 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2037 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2038 struct drm_device *dev = intel_dig_port->base.base.dev;
2039 struct drm_i915_private *dev_priv = dev->dev_private;
2044 * If we enable the backlight right away following a panel power
2045 * on, we may see slight flicker as the panel syncs with the eDP
2046 * link. So delay a bit to make sure the image is solid before
2047 * allowing it to appear.
2049 wait_backlight_on(intel_dp);
2053 pp = ironlake_get_pp_control(intel_dp);
2054 pp |= EDP_BLC_ENABLE;
2056 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2058 I915_WRITE(pp_ctrl_reg, pp);
2059 POSTING_READ(pp_ctrl_reg);
2061 pps_unlock(intel_dp);
2064 /* Enable backlight PWM and backlight PP control. */
2065 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2067 if (!is_edp(intel_dp))
2070 DRM_DEBUG_KMS("\n");
2072 intel_panel_enable_backlight(intel_dp->attached_connector);
2073 _intel_edp_backlight_on(intel_dp);
2076 /* Disable backlight in the panel power control. */
2077 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2079 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2080 struct drm_i915_private *dev_priv = dev->dev_private;
2084 if (!is_edp(intel_dp))
2089 pp = ironlake_get_pp_control(intel_dp);
2090 pp &= ~EDP_BLC_ENABLE;
2092 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2094 I915_WRITE(pp_ctrl_reg, pp);
2095 POSTING_READ(pp_ctrl_reg);
2097 pps_unlock(intel_dp);
2099 intel_dp->last_backlight_off = jiffies;
2100 edp_wait_backlight_off(intel_dp);
2103 /* Disable backlight PP control and backlight PWM. */
2104 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2106 if (!is_edp(intel_dp))
2109 DRM_DEBUG_KMS("\n");
2111 _intel_edp_backlight_off(intel_dp);
2112 intel_panel_disable_backlight(intel_dp->attached_connector);
2116 * Hook for controlling the panel power control backlight through the bl_power
2117 * sysfs attribute. Take care to handle multiple calls.
2119 static void intel_edp_backlight_power(struct intel_connector *connector,
2122 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2126 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2127 pps_unlock(intel_dp);
2129 if (is_enabled == enable)
2132 DRM_DEBUG_KMS("panel power control backlight %s\n",
2133 enable ? "enable" : "disable");
2136 _intel_edp_backlight_on(intel_dp);
2138 _intel_edp_backlight_off(intel_dp);
2141 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2143 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2144 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2145 struct drm_device *dev = crtc->dev;
2146 struct drm_i915_private *dev_priv = dev->dev_private;
2149 assert_pipe_disabled(dev_priv,
2150 to_intel_crtc(crtc)->pipe);
2152 DRM_DEBUG_KMS("\n");
2153 dpa_ctl = I915_READ(DP_A);
2154 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2155 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2157 /* We don't adjust intel_dp->DP while tearing down the link, to
2158 * facilitate link retraining (e.g. after hotplug). Hence clear all
2159 * enable bits here to ensure that we don't enable too much. */
2160 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2161 intel_dp->DP |= DP_PLL_ENABLE;
2162 I915_WRITE(DP_A, intel_dp->DP);
2167 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2169 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2170 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2171 struct drm_device *dev = crtc->dev;
2172 struct drm_i915_private *dev_priv = dev->dev_private;
2175 assert_pipe_disabled(dev_priv,
2176 to_intel_crtc(crtc)->pipe);
2178 dpa_ctl = I915_READ(DP_A);
2179 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2180 "dp pll off, should be on\n");
2181 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2183 /* We can't rely on the value tracked for the DP register in
2184 * intel_dp->DP because link_down must not change that (otherwise link
2185 * re-training will fail. */
2186 dpa_ctl &= ~DP_PLL_ENABLE;
2187 I915_WRITE(DP_A, dpa_ctl);
2192 /* If the sink supports it, try to set the power state appropriately */
2193 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2197 /* Should have a valid DPCD by this point */
2198 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2201 if (mode != DRM_MODE_DPMS_ON) {
2202 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2206 * When turning on, we need to retry for 1ms to give the sink
2209 for (i = 0; i < 3; i++) {
2210 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2219 DRM_DEBUG_KMS("failed to %s sink power state\n",
2220 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2223 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2226 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2227 enum port port = dp_to_dig_port(intel_dp)->port;
2228 struct drm_device *dev = encoder->base.dev;
2229 struct drm_i915_private *dev_priv = dev->dev_private;
2230 enum intel_display_power_domain power_domain;
2233 power_domain = intel_display_port_power_domain(encoder);
2234 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2237 tmp = I915_READ(intel_dp->output_reg);
2239 if (!(tmp & DP_PORT_EN))
2242 if (IS_GEN7(dev) && port == PORT_A) {
2243 *pipe = PORT_TO_PIPE_CPT(tmp);
2244 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2247 for_each_pipe(dev_priv, p) {
2248 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2249 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2255 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2256 intel_dp->output_reg);
2257 } else if (IS_CHERRYVIEW(dev)) {
2258 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2260 *pipe = PORT_TO_PIPE(tmp);
2266 static void intel_dp_get_config(struct intel_encoder *encoder,
2267 struct intel_crtc_state *pipe_config)
2269 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2271 struct drm_device *dev = encoder->base.dev;
2272 struct drm_i915_private *dev_priv = dev->dev_private;
2273 enum port port = dp_to_dig_port(intel_dp)->port;
2274 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2277 tmp = I915_READ(intel_dp->output_reg);
2279 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2281 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2282 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2284 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2285 flags |= DRM_MODE_FLAG_PHSYNC;
2287 flags |= DRM_MODE_FLAG_NHSYNC;
2289 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2290 flags |= DRM_MODE_FLAG_PVSYNC;
2292 flags |= DRM_MODE_FLAG_NVSYNC;
2294 if (tmp & DP_SYNC_HS_HIGH)
2295 flags |= DRM_MODE_FLAG_PHSYNC;
2297 flags |= DRM_MODE_FLAG_NHSYNC;
2299 if (tmp & DP_SYNC_VS_HIGH)
2300 flags |= DRM_MODE_FLAG_PVSYNC;
2302 flags |= DRM_MODE_FLAG_NVSYNC;
2305 pipe_config->base.adjusted_mode.flags |= flags;
2307 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2308 tmp & DP_COLOR_RANGE_16_235)
2309 pipe_config->limited_color_range = true;
2311 pipe_config->has_dp_encoder = true;
2313 pipe_config->lane_count =
2314 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2316 intel_dp_get_m_n(crtc, pipe_config);
2318 if (port == PORT_A) {
2319 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2320 pipe_config->port_clock = 162000;
2322 pipe_config->port_clock = 270000;
2325 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2326 &pipe_config->dp_m_n);
2328 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2329 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2331 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2333 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2334 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2336 * This is a big fat ugly hack.
2338 * Some machines in UEFI boot mode provide us a VBT that has 18
2339 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2340 * unknown we fail to light up. Yet the same BIOS boots up with
2341 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2342 * max, not what it tells us to use.
2344 * Note: This will still be broken if the eDP panel is not lit
2345 * up by the BIOS, and thus we can't get the mode at module
2348 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2349 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2350 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2354 static void intel_disable_dp(struct intel_encoder *encoder)
2356 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2357 struct drm_device *dev = encoder->base.dev;
2358 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2360 if (crtc->config->has_audio)
2361 intel_audio_codec_disable(encoder);
2363 if (HAS_PSR(dev) && !HAS_DDI(dev))
2364 intel_psr_disable(intel_dp);
2366 /* Make sure the panel is off before trying to change the mode. But also
2367 * ensure that we have vdd while we switch off the panel. */
2368 intel_edp_panel_vdd_on(intel_dp);
2369 intel_edp_backlight_off(intel_dp);
2370 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2371 intel_edp_panel_off(intel_dp);
2373 /* disable the port before the pipe on g4x */
2374 if (INTEL_INFO(dev)->gen < 5)
2375 intel_dp_link_down(intel_dp);
2378 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2380 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2381 enum port port = dp_to_dig_port(intel_dp)->port;
2383 intel_dp_link_down(intel_dp);
2385 ironlake_edp_pll_off(intel_dp);
2388 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2390 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2392 intel_dp_link_down(intel_dp);
2395 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2398 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2399 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2400 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2401 enum pipe pipe = crtc->pipe;
2404 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2406 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2408 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2409 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2411 if (crtc->config->lane_count > 2) {
2412 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2414 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2416 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2417 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2420 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2421 val |= CHV_PCS_REQ_SOFTRESET_EN;
2423 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2425 val |= DPIO_PCS_CLK_SOFT_RESET;
2426 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2428 if (crtc->config->lane_count > 2) {
2429 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2430 val |= CHV_PCS_REQ_SOFTRESET_EN;
2432 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2434 val |= DPIO_PCS_CLK_SOFT_RESET;
2435 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2439 static void chv_post_disable_dp(struct intel_encoder *encoder)
2441 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2442 struct drm_device *dev = encoder->base.dev;
2443 struct drm_i915_private *dev_priv = dev->dev_private;
2445 intel_dp_link_down(intel_dp);
2447 mutex_lock(&dev_priv->sb_lock);
2449 /* Assert data lane reset */
2450 chv_data_lane_soft_reset(encoder, true);
2452 mutex_unlock(&dev_priv->sb_lock);
2456 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2458 uint8_t dp_train_pat)
2460 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2461 struct drm_device *dev = intel_dig_port->base.base.dev;
2462 struct drm_i915_private *dev_priv = dev->dev_private;
2463 enum port port = intel_dig_port->port;
2466 uint32_t temp = I915_READ(DP_TP_CTL(port));
2468 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2469 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2471 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2473 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2474 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2475 case DP_TRAINING_PATTERN_DISABLE:
2476 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2479 case DP_TRAINING_PATTERN_1:
2480 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2482 case DP_TRAINING_PATTERN_2:
2483 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2485 case DP_TRAINING_PATTERN_3:
2486 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2489 I915_WRITE(DP_TP_CTL(port), temp);
2491 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2492 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2493 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2495 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2496 case DP_TRAINING_PATTERN_DISABLE:
2497 *DP |= DP_LINK_TRAIN_OFF_CPT;
2499 case DP_TRAINING_PATTERN_1:
2500 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2502 case DP_TRAINING_PATTERN_2:
2503 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2505 case DP_TRAINING_PATTERN_3:
2506 DRM_ERROR("DP training pattern 3 not supported\n");
2507 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2512 if (IS_CHERRYVIEW(dev))
2513 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2515 *DP &= ~DP_LINK_TRAIN_MASK;
2517 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2518 case DP_TRAINING_PATTERN_DISABLE:
2519 *DP |= DP_LINK_TRAIN_OFF;
2521 case DP_TRAINING_PATTERN_1:
2522 *DP |= DP_LINK_TRAIN_PAT_1;
2524 case DP_TRAINING_PATTERN_2:
2525 *DP |= DP_LINK_TRAIN_PAT_2;
2527 case DP_TRAINING_PATTERN_3:
2528 if (IS_CHERRYVIEW(dev)) {
2529 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2531 DRM_ERROR("DP training pattern 3 not supported\n");
2532 *DP |= DP_LINK_TRAIN_PAT_2;
2539 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2541 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2542 struct drm_i915_private *dev_priv = dev->dev_private;
2544 /* enable with pattern 1 (as per spec) */
2545 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2546 DP_TRAINING_PATTERN_1);
2548 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2549 POSTING_READ(intel_dp->output_reg);
2552 * Magic for VLV/CHV. We _must_ first set up the register
2553 * without actually enabling the port, and then do another
2554 * write to enable the port. Otherwise link training will
2555 * fail when the power sequencer is freshly used for this port.
2557 intel_dp->DP |= DP_PORT_EN;
2559 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2560 POSTING_READ(intel_dp->output_reg);
2563 static void intel_enable_dp(struct intel_encoder *encoder)
2565 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2566 struct drm_device *dev = encoder->base.dev;
2567 struct drm_i915_private *dev_priv = dev->dev_private;
2568 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2569 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2571 if (WARN_ON(dp_reg & DP_PORT_EN))
2576 if (IS_VALLEYVIEW(dev))
2577 vlv_init_panel_power_sequencer(intel_dp);
2579 intel_dp_enable_port(intel_dp);
2581 edp_panel_vdd_on(intel_dp);
2582 edp_panel_on(intel_dp);
2583 edp_panel_vdd_off(intel_dp, true);
2585 pps_unlock(intel_dp);
2587 if (IS_VALLEYVIEW(dev)) {
2588 unsigned int lane_mask = 0x0;
2590 if (IS_CHERRYVIEW(dev))
2591 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2593 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2597 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2598 intel_dp_start_link_train(intel_dp);
2599 intel_dp_complete_link_train(intel_dp);
2600 intel_dp_stop_link_train(intel_dp);
2602 if (crtc->config->has_audio) {
2603 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2604 pipe_name(crtc->pipe));
2605 intel_audio_codec_enable(encoder);
2609 static void g4x_enable_dp(struct intel_encoder *encoder)
2611 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2613 intel_enable_dp(encoder);
2614 intel_edp_backlight_on(intel_dp);
2617 static void vlv_enable_dp(struct intel_encoder *encoder)
2619 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2621 intel_edp_backlight_on(intel_dp);
2622 intel_psr_enable(intel_dp);
2625 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2627 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2628 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2630 intel_dp_prepare(encoder);
2632 /* Only ilk+ has port A */
2633 if (dport->port == PORT_A) {
2634 ironlake_set_pll_cpu_edp(intel_dp);
2635 ironlake_edp_pll_on(intel_dp);
2639 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2641 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2642 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2643 enum pipe pipe = intel_dp->pps_pipe;
2644 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2646 edp_panel_vdd_off_sync(intel_dp);
2649 * VLV seems to get confused when multiple power seqeuencers
2650 * have the same port selected (even if only one has power/vdd
2651 * enabled). The failure manifests as vlv_wait_port_ready() failing
2652 * CHV on the other hand doesn't seem to mind having the same port
2653 * selected in multiple power seqeuencers, but let's clear the
2654 * port select always when logically disconnecting a power sequencer
2657 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2658 pipe_name(pipe), port_name(intel_dig_port->port));
2659 I915_WRITE(pp_on_reg, 0);
2660 POSTING_READ(pp_on_reg);
2662 intel_dp->pps_pipe = INVALID_PIPE;
2665 static void vlv_steal_power_sequencer(struct drm_device *dev,
2668 struct drm_i915_private *dev_priv = dev->dev_private;
2669 struct intel_encoder *encoder;
2671 lockdep_assert_held(&dev_priv->pps_mutex);
2673 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2676 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2678 struct intel_dp *intel_dp;
2681 if (encoder->type != INTEL_OUTPUT_EDP)
2684 intel_dp = enc_to_intel_dp(&encoder->base);
2685 port = dp_to_dig_port(intel_dp)->port;
2687 if (intel_dp->pps_pipe != pipe)
2690 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2691 pipe_name(pipe), port_name(port));
2693 WARN(encoder->base.crtc,
2694 "stealing pipe %c power sequencer from active eDP port %c\n",
2695 pipe_name(pipe), port_name(port));
2697 /* make sure vdd is off before we steal it */
2698 vlv_detach_power_sequencer(intel_dp);
2702 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2705 struct intel_encoder *encoder = &intel_dig_port->base;
2706 struct drm_device *dev = encoder->base.dev;
2707 struct drm_i915_private *dev_priv = dev->dev_private;
2708 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2710 lockdep_assert_held(&dev_priv->pps_mutex);
2712 if (!is_edp(intel_dp))
2715 if (intel_dp->pps_pipe == crtc->pipe)
2719 * If another power sequencer was being used on this
2720 * port previously make sure to turn off vdd there while
2721 * we still have control of it.
2723 if (intel_dp->pps_pipe != INVALID_PIPE)
2724 vlv_detach_power_sequencer(intel_dp);
2727 * We may be stealing the power
2728 * sequencer from another port.
2730 vlv_steal_power_sequencer(dev, crtc->pipe);
2732 /* now it's all ours */
2733 intel_dp->pps_pipe = crtc->pipe;
2735 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2736 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2738 /* init power sequencer on this pipe and port */
2739 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2740 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2743 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2745 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2746 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2747 struct drm_device *dev = encoder->base.dev;
2748 struct drm_i915_private *dev_priv = dev->dev_private;
2749 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2750 enum dpio_channel port = vlv_dport_to_channel(dport);
2751 int pipe = intel_crtc->pipe;
2754 mutex_lock(&dev_priv->sb_lock);
2756 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2763 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2764 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2765 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2767 mutex_unlock(&dev_priv->sb_lock);
2769 intel_enable_dp(encoder);
2772 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2774 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2775 struct drm_device *dev = encoder->base.dev;
2776 struct drm_i915_private *dev_priv = dev->dev_private;
2777 struct intel_crtc *intel_crtc =
2778 to_intel_crtc(encoder->base.crtc);
2779 enum dpio_channel port = vlv_dport_to_channel(dport);
2780 int pipe = intel_crtc->pipe;
2782 intel_dp_prepare(encoder);
2784 /* Program Tx lane resets to default */
2785 mutex_lock(&dev_priv->sb_lock);
2786 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2787 DPIO_PCS_TX_LANE2_RESET |
2788 DPIO_PCS_TX_LANE1_RESET);
2789 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2790 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2791 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2792 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2793 DPIO_PCS_CLK_SOFT_RESET);
2795 /* Fix up inter-pair skew failure */
2796 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2797 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2798 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2799 mutex_unlock(&dev_priv->sb_lock);
2802 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2804 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2805 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2806 struct drm_device *dev = encoder->base.dev;
2807 struct drm_i915_private *dev_priv = dev->dev_private;
2808 struct intel_crtc *intel_crtc =
2809 to_intel_crtc(encoder->base.crtc);
2810 enum dpio_channel ch = vlv_dport_to_channel(dport);
2811 int pipe = intel_crtc->pipe;
2812 int data, i, stagger;
2815 mutex_lock(&dev_priv->sb_lock);
2817 /* allow hardware to manage TX FIFO reset source */
2818 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2819 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2820 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2822 if (intel_crtc->config->lane_count > 2) {
2823 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2824 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2825 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2828 /* Program Tx lane latency optimal setting*/
2829 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2830 /* Set the upar bit */
2831 if (intel_crtc->config->lane_count == 1)
2834 data = (i == 1) ? 0x0 : 0x1;
2835 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2836 data << DPIO_UPAR_SHIFT);
2839 /* Data lane stagger programming */
2840 if (intel_crtc->config->port_clock > 270000)
2842 else if (intel_crtc->config->port_clock > 135000)
2844 else if (intel_crtc->config->port_clock > 67500)
2846 else if (intel_crtc->config->port_clock > 33750)
2851 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2852 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2853 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2855 if (intel_crtc->config->lane_count > 2) {
2856 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2857 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2858 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2861 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2862 DPIO_LANESTAGGER_STRAP(stagger) |
2863 DPIO_LANESTAGGER_STRAP_OVRD |
2864 DPIO_TX1_STAGGER_MASK(0x1f) |
2865 DPIO_TX1_STAGGER_MULT(6) |
2866 DPIO_TX2_STAGGER_MULT(0));
2868 if (intel_crtc->config->lane_count > 2) {
2869 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2870 DPIO_LANESTAGGER_STRAP(stagger) |
2871 DPIO_LANESTAGGER_STRAP_OVRD |
2872 DPIO_TX1_STAGGER_MASK(0x1f) |
2873 DPIO_TX1_STAGGER_MULT(7) |
2874 DPIO_TX2_STAGGER_MULT(5));
2877 /* Deassert data lane reset */
2878 chv_data_lane_soft_reset(encoder, false);
2880 mutex_unlock(&dev_priv->sb_lock);
2882 intel_enable_dp(encoder);
2884 /* Second common lane will stay alive on its own now */
2885 if (dport->release_cl2_override) {
2886 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2887 dport->release_cl2_override = false;
2891 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2893 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2894 struct drm_device *dev = encoder->base.dev;
2895 struct drm_i915_private *dev_priv = dev->dev_private;
2896 struct intel_crtc *intel_crtc =
2897 to_intel_crtc(encoder->base.crtc);
2898 enum dpio_channel ch = vlv_dport_to_channel(dport);
2899 enum pipe pipe = intel_crtc->pipe;
2900 unsigned int lane_mask =
2901 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2904 intel_dp_prepare(encoder);
2907 * Must trick the second common lane into life.
2908 * Otherwise we can't even access the PLL.
2910 if (ch == DPIO_CH0 && pipe == PIPE_B)
2911 dport->release_cl2_override =
2912 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2914 chv_phy_powergate_lanes(encoder, true, lane_mask);
2916 mutex_lock(&dev_priv->sb_lock);
2918 /* Assert data lane reset */
2919 chv_data_lane_soft_reset(encoder, true);
2921 /* program left/right clock distribution */
2922 if (pipe != PIPE_B) {
2923 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2924 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2926 val |= CHV_BUFLEFTENA1_FORCE;
2928 val |= CHV_BUFRIGHTENA1_FORCE;
2929 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2931 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2932 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2934 val |= CHV_BUFLEFTENA2_FORCE;
2936 val |= CHV_BUFRIGHTENA2_FORCE;
2937 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2940 /* program clock channel usage */
2941 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2942 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2944 val &= ~CHV_PCS_USEDCLKCHANNEL;
2946 val |= CHV_PCS_USEDCLKCHANNEL;
2947 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2949 if (intel_crtc->config->lane_count > 2) {
2950 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2951 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2953 val &= ~CHV_PCS_USEDCLKCHANNEL;
2955 val |= CHV_PCS_USEDCLKCHANNEL;
2956 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2960 * This a a bit weird since generally CL
2961 * matches the pipe, but here we need to
2962 * pick the CL based on the port.
2964 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2966 val &= ~CHV_CMN_USEDCLKCHANNEL;
2968 val |= CHV_CMN_USEDCLKCHANNEL;
2969 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2971 mutex_unlock(&dev_priv->sb_lock);
2974 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2976 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2977 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2980 mutex_lock(&dev_priv->sb_lock);
2982 /* disable left/right clock distribution */
2983 if (pipe != PIPE_B) {
2984 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2985 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2986 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2988 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2989 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2990 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2993 mutex_unlock(&dev_priv->sb_lock);
2996 * Leave the power down bit cleared for at least one
2997 * lane so that chv_powergate_phy_ch() will power
2998 * on something when the channel is otherwise unused.
2999 * When the port is off and the override is removed
3000 * the lanes power down anyway, so otherwise it doesn't
3001 * really matter what the state of power down bits is
3004 chv_phy_powergate_lanes(encoder, false, 0x0);
3008 * Native read with retry for link status and receiver capability reads for
3009 * cases where the sink may still be asleep.
3011 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3012 * supposed to retry 3 times per the spec.
3015 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3016 void *buffer, size_t size)
3022 * Sometime we just get the same incorrect byte repeated
3023 * over the entire buffer. Doing just one throw away read
3024 * initially seems to "solve" it.
3026 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3028 for (i = 0; i < 3; i++) {
3029 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3039 * Fetch AUX CH registers 0x202 - 0x207 which contain
3040 * link status information
3043 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3045 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3048 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3051 /* These are source-specific values. */
3053 intel_dp_voltage_max(struct intel_dp *intel_dp)
3055 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3056 struct drm_i915_private *dev_priv = dev->dev_private;
3057 enum port port = dp_to_dig_port(intel_dp)->port;
3059 if (IS_BROXTON(dev))
3060 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3061 else if (INTEL_INFO(dev)->gen >= 9) {
3062 if (dev_priv->edp_low_vswing && port == PORT_A)
3063 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3064 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3065 } else if (IS_VALLEYVIEW(dev))
3066 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3067 else if (IS_GEN7(dev) && port == PORT_A)
3068 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3069 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3070 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3072 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3076 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3079 enum port port = dp_to_dig_port(intel_dp)->port;
3081 if (INTEL_INFO(dev)->gen >= 9) {
3082 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3084 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3085 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3086 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3088 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3089 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3090 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3092 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3094 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3095 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3097 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3098 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3099 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3100 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3101 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3102 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3104 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3106 } else if (IS_VALLEYVIEW(dev)) {
3107 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3108 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3109 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3111 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3112 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3113 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3116 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3118 } else if (IS_GEN7(dev) && port == PORT_A) {
3119 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3120 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3121 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3122 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3123 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3124 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3126 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3129 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3131 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3133 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3135 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3138 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3143 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3145 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3146 struct drm_i915_private *dev_priv = dev->dev_private;
3147 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3148 struct intel_crtc *intel_crtc =
3149 to_intel_crtc(dport->base.base.crtc);
3150 unsigned long demph_reg_value, preemph_reg_value,
3151 uniqtranscale_reg_value;
3152 uint8_t train_set = intel_dp->train_set[0];
3153 enum dpio_channel port = vlv_dport_to_channel(dport);
3154 int pipe = intel_crtc->pipe;
3156 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3157 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3158 preemph_reg_value = 0x0004000;
3159 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3160 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3161 demph_reg_value = 0x2B405555;
3162 uniqtranscale_reg_value = 0x552AB83A;
3164 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3165 demph_reg_value = 0x2B404040;
3166 uniqtranscale_reg_value = 0x5548B83A;
3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3169 demph_reg_value = 0x2B245555;
3170 uniqtranscale_reg_value = 0x5560B83A;
3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3173 demph_reg_value = 0x2B405555;
3174 uniqtranscale_reg_value = 0x5598DA3A;
3180 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3181 preemph_reg_value = 0x0002000;
3182 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3183 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3184 demph_reg_value = 0x2B404040;
3185 uniqtranscale_reg_value = 0x5552B83A;
3187 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3188 demph_reg_value = 0x2B404848;
3189 uniqtranscale_reg_value = 0x5580B83A;
3191 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3192 demph_reg_value = 0x2B404040;
3193 uniqtranscale_reg_value = 0x55ADDA3A;
3199 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3200 preemph_reg_value = 0x0000000;
3201 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3203 demph_reg_value = 0x2B305555;
3204 uniqtranscale_reg_value = 0x5570B83A;
3206 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3207 demph_reg_value = 0x2B2B4040;
3208 uniqtranscale_reg_value = 0x55ADDA3A;
3214 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3215 preemph_reg_value = 0x0006000;
3216 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3217 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3218 demph_reg_value = 0x1B405555;
3219 uniqtranscale_reg_value = 0x55ADDA3A;
3229 mutex_lock(&dev_priv->sb_lock);
3230 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3231 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3232 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3233 uniqtranscale_reg_value);
3234 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3235 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3236 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3237 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3238 mutex_unlock(&dev_priv->sb_lock);
3243 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3245 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3246 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3249 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3251 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3252 struct drm_i915_private *dev_priv = dev->dev_private;
3253 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3254 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3255 u32 deemph_reg_value, margin_reg_value, val;
3256 uint8_t train_set = intel_dp->train_set[0];
3257 enum dpio_channel ch = vlv_dport_to_channel(dport);
3258 enum pipe pipe = intel_crtc->pipe;
3261 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3262 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3263 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3265 deemph_reg_value = 128;
3266 margin_reg_value = 52;
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3269 deemph_reg_value = 128;
3270 margin_reg_value = 77;
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3273 deemph_reg_value = 128;
3274 margin_reg_value = 102;
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3277 deemph_reg_value = 128;
3278 margin_reg_value = 154;
3279 /* FIXME extra to set for 1200 */
3285 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3286 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3288 deemph_reg_value = 85;
3289 margin_reg_value = 78;
3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3292 deemph_reg_value = 85;
3293 margin_reg_value = 116;
3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3296 deemph_reg_value = 85;
3297 margin_reg_value = 154;
3303 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3304 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3306 deemph_reg_value = 64;
3307 margin_reg_value = 104;
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3310 deemph_reg_value = 64;
3311 margin_reg_value = 154;
3317 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3318 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3320 deemph_reg_value = 43;
3321 margin_reg_value = 154;
3331 mutex_lock(&dev_priv->sb_lock);
3333 /* Clear calc init */
3334 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3335 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3336 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3337 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3338 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3340 if (intel_crtc->config->lane_count > 2) {
3341 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3342 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3343 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3344 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3345 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3349 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3350 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3351 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3353 if (intel_crtc->config->lane_count > 2) {
3354 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3355 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3356 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3357 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3360 /* Program swing deemph */
3361 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3362 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3363 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3364 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3365 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3368 /* Program swing margin */
3369 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3370 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3372 val &= ~DPIO_SWING_MARGIN000_MASK;
3373 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3376 * Supposedly this value shouldn't matter when unique transition
3377 * scale is disabled, but in fact it does matter. Let's just
3378 * always program the same value and hope it's OK.
3380 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3381 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3383 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3387 * The document said it needs to set bit 27 for ch0 and bit 26
3388 * for ch1. Might be a typo in the doc.
3389 * For now, for this unique transition scale selection, set bit
3390 * 27 for ch0 and ch1.
3392 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3393 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3394 if (chv_need_uniq_trans_scale(train_set))
3395 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3397 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3398 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3401 /* Start swing calculation */
3402 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3403 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3404 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3406 if (intel_crtc->config->lane_count > 2) {
3407 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3408 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3409 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3413 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3414 val |= DPIO_LRC_BYPASS;
3415 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3417 mutex_unlock(&dev_priv->sb_lock);
3423 intel_get_adjust_train(struct intel_dp *intel_dp,
3424 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3429 uint8_t voltage_max;
3430 uint8_t preemph_max;
3432 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3433 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3434 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3442 voltage_max = intel_dp_voltage_max(intel_dp);
3443 if (v >= voltage_max)
3444 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3446 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3447 if (p >= preemph_max)
3448 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3450 for (lane = 0; lane < 4; lane++)
3451 intel_dp->train_set[lane] = v | p;
3455 gen4_signal_levels(uint8_t train_set)
3457 uint32_t signal_levels = 0;
3459 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3462 signal_levels |= DP_VOLTAGE_0_4;
3464 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3465 signal_levels |= DP_VOLTAGE_0_6;
3467 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3468 signal_levels |= DP_VOLTAGE_0_8;
3470 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3471 signal_levels |= DP_VOLTAGE_1_2;
3474 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3475 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3477 signal_levels |= DP_PRE_EMPHASIS_0;
3479 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3480 signal_levels |= DP_PRE_EMPHASIS_3_5;
3482 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3483 signal_levels |= DP_PRE_EMPHASIS_6;
3485 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3486 signal_levels |= DP_PRE_EMPHASIS_9_5;
3489 return signal_levels;
3492 /* Gen6's DP voltage swing and pre-emphasis control */
3494 gen6_edp_signal_levels(uint8_t train_set)
3496 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3497 DP_TRAIN_PRE_EMPHASIS_MASK);
3498 switch (signal_levels) {
3499 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3500 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3501 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3502 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3503 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3504 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3505 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3506 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3507 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3508 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3509 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3510 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3512 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3514 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3515 "0x%x\n", signal_levels);
3516 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3520 /* Gen7's DP voltage swing and pre-emphasis control */
3522 gen7_edp_signal_levels(uint8_t train_set)
3524 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3525 DP_TRAIN_PRE_EMPHASIS_MASK);
3526 switch (signal_levels) {
3527 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3528 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3529 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3530 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3531 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3532 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3534 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3535 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3536 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3537 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3539 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3540 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3541 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3542 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3545 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3546 "0x%x\n", signal_levels);
3547 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3551 /* Properly updates "DP" with the correct signal levels. */
3553 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3556 enum port port = intel_dig_port->port;
3557 struct drm_device *dev = intel_dig_port->base.base.dev;
3558 uint32_t signal_levels, mask = 0;
3559 uint8_t train_set = intel_dp->train_set[0];
3562 signal_levels = ddi_signal_levels(intel_dp);
3564 if (IS_BROXTON(dev))
3567 mask = DDI_BUF_EMP_MASK;
3568 } else if (IS_CHERRYVIEW(dev)) {
3569 signal_levels = chv_signal_levels(intel_dp);
3570 } else if (IS_VALLEYVIEW(dev)) {
3571 signal_levels = vlv_signal_levels(intel_dp);
3572 } else if (IS_GEN7(dev) && port == PORT_A) {
3573 signal_levels = gen7_edp_signal_levels(train_set);
3574 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3575 } else if (IS_GEN6(dev) && port == PORT_A) {
3576 signal_levels = gen6_edp_signal_levels(train_set);
3577 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3579 signal_levels = gen4_signal_levels(train_set);
3580 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3584 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3586 DRM_DEBUG_KMS("Using vswing level %d\n",
3587 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3588 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3589 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3590 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3592 *DP = (*DP & ~mask) | signal_levels;
3596 intel_dp_set_link_train(struct intel_dp *intel_dp,
3598 uint8_t dp_train_pat)
3600 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3601 struct drm_i915_private *dev_priv =
3602 to_i915(intel_dig_port->base.base.dev);
3603 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3606 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3608 I915_WRITE(intel_dp->output_reg, *DP);
3609 POSTING_READ(intel_dp->output_reg);
3611 buf[0] = dp_train_pat;
3612 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3613 DP_TRAINING_PATTERN_DISABLE) {
3614 /* don't write DP_TRAINING_LANEx_SET on disable */
3617 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3618 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3619 len = intel_dp->lane_count + 1;
3622 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3629 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3630 uint8_t dp_train_pat)
3632 if (!intel_dp->train_set_valid)
3633 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3634 intel_dp_set_signal_levels(intel_dp, DP);
3635 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3639 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3640 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3642 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3643 struct drm_i915_private *dev_priv =
3644 to_i915(intel_dig_port->base.base.dev);
3647 intel_get_adjust_train(intel_dp, link_status);
3648 intel_dp_set_signal_levels(intel_dp, DP);
3650 I915_WRITE(intel_dp->output_reg, *DP);
3651 POSTING_READ(intel_dp->output_reg);
3653 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3654 intel_dp->train_set, intel_dp->lane_count);
3656 return ret == intel_dp->lane_count;
3659 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3661 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3662 struct drm_device *dev = intel_dig_port->base.base.dev;
3663 struct drm_i915_private *dev_priv = dev->dev_private;
3664 enum port port = intel_dig_port->port;
3670 val = I915_READ(DP_TP_CTL(port));
3671 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3672 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3673 I915_WRITE(DP_TP_CTL(port), val);
3676 * On PORT_A we can have only eDP in SST mode. There the only reason
3677 * we need to set idle transmission mode is to work around a HW issue
3678 * where we enable the pipe while not in idle link-training mode.
3679 * In this case there is requirement to wait for a minimum number of
3680 * idle patterns to be sent.
3685 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3687 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3690 /* Enable corresponding port and start training pattern 1 */
3692 intel_dp_start_link_train(struct intel_dp *intel_dp)
3694 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3695 struct drm_device *dev = encoder->dev;
3698 int voltage_tries, loop_tries;
3699 uint32_t DP = intel_dp->DP;
3700 uint8_t link_config[2];
3701 uint8_t link_bw, rate_select;
3704 intel_ddi_prepare_link_retrain(encoder);
3706 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3707 &link_bw, &rate_select);
3709 /* Write the link configuration data */
3710 link_config[0] = link_bw;
3711 link_config[1] = intel_dp->lane_count;
3712 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3713 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3714 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3715 if (intel_dp->num_sink_rates)
3716 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3720 link_config[1] = DP_SET_ANSI_8B10B;
3721 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3725 /* clock recovery */
3726 if (!intel_dp_reset_link_train(intel_dp, &DP,
3727 DP_TRAINING_PATTERN_1 |
3728 DP_LINK_SCRAMBLING_DISABLE)) {
3729 DRM_ERROR("failed to enable link training\n");
3737 uint8_t link_status[DP_LINK_STATUS_SIZE];
3739 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3740 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3741 DRM_ERROR("failed to get link status\n");
3745 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3746 DRM_DEBUG_KMS("clock recovery OK\n");
3751 * if we used previously trained voltage and pre-emphasis values
3752 * and we don't get clock recovery, reset link training values
3754 if (intel_dp->train_set_valid) {
3755 DRM_DEBUG_KMS("clock recovery not ok, reset");
3756 /* clear the flag as we are not reusing train set */
3757 intel_dp->train_set_valid = false;
3758 if (!intel_dp_reset_link_train(intel_dp, &DP,
3759 DP_TRAINING_PATTERN_1 |
3760 DP_LINK_SCRAMBLING_DISABLE)) {
3761 DRM_ERROR("failed to enable link training\n");
3767 /* Check to see if we've tried the max voltage */
3768 for (i = 0; i < intel_dp->lane_count; i++)
3769 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3771 if (i == intel_dp->lane_count) {
3773 if (loop_tries == 5) {
3774 DRM_ERROR("too many full retries, give up\n");
3777 intel_dp_reset_link_train(intel_dp, &DP,
3778 DP_TRAINING_PATTERN_1 |
3779 DP_LINK_SCRAMBLING_DISABLE);
3784 /* Check to see if we've tried the same voltage 5 times */
3785 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3787 if (voltage_tries == 5) {
3788 DRM_ERROR("too many voltage retries, give up\n");
3793 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3795 /* Update training set as requested by target */
3796 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3797 DRM_ERROR("failed to update link training\n");
3806 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3808 bool channel_eq = false;
3809 int tries, cr_tries;
3810 uint32_t DP = intel_dp->DP;
3811 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3813 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
3814 if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
3815 training_pattern = DP_TRAINING_PATTERN_3;
3817 /* channel equalization */
3818 if (!intel_dp_set_link_train(intel_dp, &DP,
3820 DP_LINK_SCRAMBLING_DISABLE)) {
3821 DRM_ERROR("failed to start channel equalization\n");
3829 uint8_t link_status[DP_LINK_STATUS_SIZE];
3832 DRM_ERROR("failed to train DP, aborting\n");
3836 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3837 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3838 DRM_ERROR("failed to get link status\n");
3842 /* Make sure clock is still ok */
3843 if (!drm_dp_clock_recovery_ok(link_status,
3844 intel_dp->lane_count)) {
3845 intel_dp->train_set_valid = false;
3846 intel_dp_start_link_train(intel_dp);
3847 intel_dp_set_link_train(intel_dp, &DP,
3849 DP_LINK_SCRAMBLING_DISABLE);
3854 if (drm_dp_channel_eq_ok(link_status,
3855 intel_dp->lane_count)) {
3860 /* Try 5 times, then try clock recovery if that fails */
3862 intel_dp->train_set_valid = false;
3863 intel_dp_start_link_train(intel_dp);
3864 intel_dp_set_link_train(intel_dp, &DP,
3866 DP_LINK_SCRAMBLING_DISABLE);
3872 /* Update training set as requested by target */
3873 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3874 DRM_ERROR("failed to update link training\n");
3880 intel_dp_set_idle_link_train(intel_dp);
3885 intel_dp->train_set_valid = true;
3886 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3890 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3892 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3893 DP_TRAINING_PATTERN_DISABLE);
3897 intel_dp_link_down(struct intel_dp *intel_dp)
3899 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3900 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3901 enum port port = intel_dig_port->port;
3902 struct drm_device *dev = intel_dig_port->base.base.dev;
3903 struct drm_i915_private *dev_priv = dev->dev_private;
3904 uint32_t DP = intel_dp->DP;
3906 if (WARN_ON(HAS_DDI(dev)))
3909 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3912 DRM_DEBUG_KMS("\n");
3914 if ((IS_GEN7(dev) && port == PORT_A) ||
3915 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3916 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3917 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3919 if (IS_CHERRYVIEW(dev))
3920 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3922 DP &= ~DP_LINK_TRAIN_MASK;
3923 DP |= DP_LINK_TRAIN_PAT_IDLE;
3925 I915_WRITE(intel_dp->output_reg, DP);
3926 POSTING_READ(intel_dp->output_reg);
3928 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3929 I915_WRITE(intel_dp->output_reg, DP);
3930 POSTING_READ(intel_dp->output_reg);
3933 * HW workaround for IBX, we need to move the port
3934 * to transcoder A after disabling it to allow the
3935 * matching HDMI port to be enabled on transcoder A.
3937 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3938 /* always enable with pattern 1 (as per spec) */
3939 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3940 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3941 I915_WRITE(intel_dp->output_reg, DP);
3942 POSTING_READ(intel_dp->output_reg);
3945 I915_WRITE(intel_dp->output_reg, DP);
3946 POSTING_READ(intel_dp->output_reg);
3949 msleep(intel_dp->panel_power_down_delay);
3953 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3955 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3956 struct drm_device *dev = dig_port->base.base.dev;
3957 struct drm_i915_private *dev_priv = dev->dev_private;
3960 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3961 sizeof(intel_dp->dpcd)) < 0)
3962 return false; /* aux transfer failed */
3964 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3966 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3967 return false; /* DPCD not present */
3969 /* Check if the panel supports PSR */
3970 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3971 if (is_edp(intel_dp)) {
3972 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3974 sizeof(intel_dp->psr_dpcd));
3975 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3976 dev_priv->psr.sink_support = true;
3977 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3980 if (INTEL_INFO(dev)->gen >= 9 &&
3981 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3982 uint8_t frame_sync_cap;
3984 dev_priv->psr.sink_support = true;
3985 intel_dp_dpcd_read_wake(&intel_dp->aux,
3986 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3987 &frame_sync_cap, 1);
3988 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3989 /* PSR2 needs frame sync as well */
3990 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3991 DRM_DEBUG_KMS("PSR2 %s on sink",
3992 dev_priv->psr.psr2_support ? "supported" : "not supported");
3996 /* Training Pattern 3 support, both source and sink */
3997 if (drm_dp_tps3_supported(intel_dp->dpcd) &&
3998 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3999 intel_dp->use_tps3 = true;
4000 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4002 intel_dp->use_tps3 = false;
4004 /* Intermediate frequency support */
4005 if (is_edp(intel_dp) &&
4006 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4007 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4008 (rev >= 0x03)) { /* eDp v1.4 or higher */
4009 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4012 intel_dp_dpcd_read_wake(&intel_dp->aux,
4013 DP_SUPPORTED_LINK_RATES,
4015 sizeof(sink_rates));
4017 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4018 int val = le16_to_cpu(sink_rates[i]);
4023 /* Value read is in kHz while drm clock is saved in deca-kHz */
4024 intel_dp->sink_rates[i] = (val * 200) / 10;
4026 intel_dp->num_sink_rates = i;
4029 intel_dp_print_rates(intel_dp);
4031 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4032 DP_DWN_STRM_PORT_PRESENT))
4033 return true; /* native DP sink */
4035 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4036 return true; /* no per-port downstream info */
4038 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4039 intel_dp->downstream_ports,
4040 DP_MAX_DOWNSTREAM_PORTS) < 0)
4041 return false; /* downstream port status fetch failed */
4047 intel_dp_probe_oui(struct intel_dp *intel_dp)
4051 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4054 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4055 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4056 buf[0], buf[1], buf[2]);
4058 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4059 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4060 buf[0], buf[1], buf[2]);
4064 intel_dp_probe_mst(struct intel_dp *intel_dp)
4068 if (!intel_dp->can_mst)
4071 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4074 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4075 if (buf[0] & DP_MST_CAP) {
4076 DRM_DEBUG_KMS("Sink is MST capable\n");
4077 intel_dp->is_mst = true;
4079 DRM_DEBUG_KMS("Sink is not MST capable\n");
4080 intel_dp->is_mst = false;
4084 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4085 return intel_dp->is_mst;
4088 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4090 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4091 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4095 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4096 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4101 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4102 buf & ~DP_TEST_SINK_START) < 0) {
4103 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4108 intel_dp->sink_crc.started = false;
4110 hsw_enable_ips(intel_crtc);
4114 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4116 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4117 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4121 if (intel_dp->sink_crc.started) {
4122 ret = intel_dp_sink_crc_stop(intel_dp);
4127 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4130 if (!(buf & DP_TEST_CRC_SUPPORTED))
4133 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4135 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4138 hsw_disable_ips(intel_crtc);
4140 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4141 buf | DP_TEST_SINK_START) < 0) {
4142 hsw_enable_ips(intel_crtc);
4146 intel_dp->sink_crc.started = true;
4150 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4152 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4153 struct drm_device *dev = dig_port->base.base.dev;
4154 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4160 ret = intel_dp_sink_crc_start(intel_dp);
4165 intel_wait_for_vblank(dev, intel_crtc->pipe);
4167 if (drm_dp_dpcd_readb(&intel_dp->aux,
4168 DP_TEST_SINK_MISC, &buf) < 0) {
4172 count = buf & DP_TEST_COUNT_MASK;
4175 * Count might be reset during the loop. In this case
4176 * last known count needs to be reset as well.
4179 intel_dp->sink_crc.last_count = 0;
4181 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4186 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4187 !memcmp(intel_dp->sink_crc.last_crc, crc,
4190 } while (--attempts && (count == 0 || old_equal_new));
4192 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4193 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4195 if (attempts == 0) {
4196 if (old_equal_new) {
4197 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4199 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4206 intel_dp_sink_crc_stop(intel_dp);
4211 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4213 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4214 DP_DEVICE_SERVICE_IRQ_VECTOR,
4215 sink_irq_vector, 1) == 1;
4219 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4223 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4225 sink_irq_vector, 14);
4232 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4234 uint8_t test_result = DP_TEST_ACK;
4238 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4240 uint8_t test_result = DP_TEST_NAK;
4244 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4246 uint8_t test_result = DP_TEST_NAK;
4247 struct intel_connector *intel_connector = intel_dp->attached_connector;
4248 struct drm_connector *connector = &intel_connector->base;
4250 if (intel_connector->detect_edid == NULL ||
4251 connector->edid_corrupt ||
4252 intel_dp->aux.i2c_defer_count > 6) {
4253 /* Check EDID read for NACKs, DEFERs and corruption
4254 * (DP CTS 1.2 Core r1.1)
4255 * 4.2.2.4 : Failed EDID read, I2C_NAK
4256 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4257 * 4.2.2.6 : EDID corruption detected
4258 * Use failsafe mode for all cases
4260 if (intel_dp->aux.i2c_nack_count > 0 ||
4261 intel_dp->aux.i2c_defer_count > 0)
4262 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4263 intel_dp->aux.i2c_nack_count,
4264 intel_dp->aux.i2c_defer_count);
4265 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4267 struct edid *block = intel_connector->detect_edid;
4269 /* We have to write the checksum
4270 * of the last block read
4272 block += intel_connector->detect_edid->extensions;
4274 if (!drm_dp_dpcd_write(&intel_dp->aux,
4275 DP_TEST_EDID_CHECKSUM,
4278 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4280 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4281 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4284 /* Set test active flag here so userspace doesn't interrupt things */
4285 intel_dp->compliance_test_active = 1;
4290 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4292 uint8_t test_result = DP_TEST_NAK;
4296 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4298 uint8_t response = DP_TEST_NAK;
4302 intel_dp->compliance_test_active = 0;
4303 intel_dp->compliance_test_type = 0;
4304 intel_dp->compliance_test_data = 0;
4306 intel_dp->aux.i2c_nack_count = 0;
4307 intel_dp->aux.i2c_defer_count = 0;
4309 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4311 DRM_DEBUG_KMS("Could not read test request from sink\n");
4316 case DP_TEST_LINK_TRAINING:
4317 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4318 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4319 response = intel_dp_autotest_link_training(intel_dp);
4321 case DP_TEST_LINK_VIDEO_PATTERN:
4322 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4323 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4324 response = intel_dp_autotest_video_pattern(intel_dp);
4326 case DP_TEST_LINK_EDID_READ:
4327 DRM_DEBUG_KMS("EDID test requested\n");
4328 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4329 response = intel_dp_autotest_edid(intel_dp);
4331 case DP_TEST_LINK_PHY_TEST_PATTERN:
4332 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4333 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4334 response = intel_dp_autotest_phy_pattern(intel_dp);
4337 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4342 status = drm_dp_dpcd_write(&intel_dp->aux,
4346 DRM_DEBUG_KMS("Could not write test response to sink\n");
4350 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4354 if (intel_dp->is_mst) {
4359 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4363 /* check link status - esi[10] = 0x200c */
4364 if (intel_dp->active_mst_links &&
4365 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4366 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4367 intel_dp_start_link_train(intel_dp);
4368 intel_dp_complete_link_train(intel_dp);
4369 intel_dp_stop_link_train(intel_dp);
4372 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4373 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4376 for (retry = 0; retry < 3; retry++) {
4378 wret = drm_dp_dpcd_write(&intel_dp->aux,
4379 DP_SINK_COUNT_ESI+1,
4386 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4388 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4396 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4397 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4398 intel_dp->is_mst = false;
4399 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4400 /* send a hotplug event */
4401 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4408 * According to DP spec
4411 * 2. Configure link according to Receiver Capabilities
4412 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4413 * 4. Check link status on receipt of hot-plug interrupt
4416 intel_dp_check_link_status(struct intel_dp *intel_dp)
4418 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4419 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4421 u8 link_status[DP_LINK_STATUS_SIZE];
4423 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4425 if (!intel_encoder->base.crtc)
4428 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4431 /* Try to read receiver status if the link appears to be up */
4432 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4436 /* Now read the DPCD to see if it's actually running */
4437 if (!intel_dp_get_dpcd(intel_dp)) {
4441 /* Try to read the source of the interrupt */
4442 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4443 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4444 /* Clear interrupt source */
4445 drm_dp_dpcd_writeb(&intel_dp->aux,
4446 DP_DEVICE_SERVICE_IRQ_VECTOR,
4449 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4450 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4451 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4452 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4455 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4456 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4457 intel_encoder->base.name);
4458 intel_dp_start_link_train(intel_dp);
4459 intel_dp_complete_link_train(intel_dp);
4460 intel_dp_stop_link_train(intel_dp);
4464 /* XXX this is probably wrong for multiple downstream ports */
4465 static enum drm_connector_status
4466 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4468 uint8_t *dpcd = intel_dp->dpcd;
4471 if (!intel_dp_get_dpcd(intel_dp))
4472 return connector_status_disconnected;
4474 /* if there's no downstream port, we're done */
4475 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4476 return connector_status_connected;
4478 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4479 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4480 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4483 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4485 return connector_status_unknown;
4487 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4488 : connector_status_disconnected;
4491 /* If no HPD, poke DDC gently */
4492 if (drm_probe_ddc(&intel_dp->aux.ddc))
4493 return connector_status_connected;
4495 /* Well we tried, say unknown for unreliable port types */
4496 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4497 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4498 if (type == DP_DS_PORT_TYPE_VGA ||
4499 type == DP_DS_PORT_TYPE_NON_EDID)
4500 return connector_status_unknown;
4502 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4503 DP_DWN_STRM_PORT_TYPE_MASK;
4504 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4505 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4506 return connector_status_unknown;
4509 /* Anything else is out of spec, warn and ignore */
4510 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4511 return connector_status_disconnected;
4514 static enum drm_connector_status
4515 edp_detect(struct intel_dp *intel_dp)
4517 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4518 enum drm_connector_status status;
4520 status = intel_panel_detect(dev);
4521 if (status == connector_status_unknown)
4522 status = connector_status_connected;
4527 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4528 struct intel_digital_port *port)
4532 switch (port->port) {
4536 bit = SDE_PORTB_HOTPLUG;
4539 bit = SDE_PORTC_HOTPLUG;
4542 bit = SDE_PORTD_HOTPLUG;
4545 MISSING_CASE(port->port);
4549 return I915_READ(SDEISR) & bit;
4552 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4553 struct intel_digital_port *port)
4557 switch (port->port) {
4561 bit = SDE_PORTB_HOTPLUG_CPT;
4564 bit = SDE_PORTC_HOTPLUG_CPT;
4567 bit = SDE_PORTD_HOTPLUG_CPT;
4570 MISSING_CASE(port->port);
4574 return I915_READ(SDEISR) & bit;
4577 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4578 struct intel_digital_port *port)
4582 switch (port->port) {
4584 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4587 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4590 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4593 MISSING_CASE(port->port);
4597 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4600 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4601 struct intel_digital_port *port)
4605 switch (port->port) {
4607 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4610 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4613 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4616 MISSING_CASE(port->port);
4620 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4623 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4624 struct intel_digital_port *port)
4628 switch (port->port) {
4630 bit = BXT_DE_PORT_HP_DDIA;
4633 bit = BXT_DE_PORT_HP_DDIB;
4636 bit = BXT_DE_PORT_HP_DDIC;
4639 MISSING_CASE(port->port);
4643 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4647 * intel_digital_port_connected - is the specified port connected?
4648 * @dev_priv: i915 private structure
4649 * @port: the port to test
4651 * Return %true if @port is connected, %false otherwise.
4653 static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4654 struct intel_digital_port *port)
4656 if (HAS_PCH_IBX(dev_priv))
4657 return ibx_digital_port_connected(dev_priv, port);
4658 if (HAS_PCH_SPLIT(dev_priv))
4659 return cpt_digital_port_connected(dev_priv, port);
4660 else if (IS_BROXTON(dev_priv))
4661 return bxt_digital_port_connected(dev_priv, port);
4662 else if (IS_VALLEYVIEW(dev_priv))
4663 return vlv_digital_port_connected(dev_priv, port);
4665 return g4x_digital_port_connected(dev_priv, port);
4668 static enum drm_connector_status
4669 ironlake_dp_detect(struct intel_dp *intel_dp)
4671 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4672 struct drm_i915_private *dev_priv = dev->dev_private;
4673 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4675 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4676 return connector_status_disconnected;
4678 return intel_dp_detect_dpcd(intel_dp);
4681 static enum drm_connector_status
4682 g4x_dp_detect(struct intel_dp *intel_dp)
4684 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4687 /* Can't disconnect eDP, but you can close the lid... */
4688 if (is_edp(intel_dp)) {
4689 enum drm_connector_status status;
4691 status = intel_panel_detect(dev);
4692 if (status == connector_status_unknown)
4693 status = connector_status_connected;
4697 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4698 return connector_status_disconnected;
4700 return intel_dp_detect_dpcd(intel_dp);
4703 static struct edid *
4704 intel_dp_get_edid(struct intel_dp *intel_dp)
4706 struct intel_connector *intel_connector = intel_dp->attached_connector;
4708 /* use cached edid if we have one */
4709 if (intel_connector->edid) {
4711 if (IS_ERR(intel_connector->edid))
4714 return drm_edid_duplicate(intel_connector->edid);
4716 return drm_get_edid(&intel_connector->base,
4717 &intel_dp->aux.ddc);
4721 intel_dp_set_edid(struct intel_dp *intel_dp)
4723 struct intel_connector *intel_connector = intel_dp->attached_connector;
4726 edid = intel_dp_get_edid(intel_dp);
4727 intel_connector->detect_edid = edid;
4729 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4730 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4732 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4736 intel_dp_unset_edid(struct intel_dp *intel_dp)
4738 struct intel_connector *intel_connector = intel_dp->attached_connector;
4740 kfree(intel_connector->detect_edid);
4741 intel_connector->detect_edid = NULL;
4743 intel_dp->has_audio = false;
4746 static enum intel_display_power_domain
4747 intel_dp_power_get(struct intel_dp *dp)
4749 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4750 enum intel_display_power_domain power_domain;
4752 power_domain = intel_display_port_power_domain(encoder);
4753 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4755 return power_domain;
4759 intel_dp_power_put(struct intel_dp *dp,
4760 enum intel_display_power_domain power_domain)
4762 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4763 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4766 static enum drm_connector_status
4767 intel_dp_detect(struct drm_connector *connector, bool force)
4769 struct intel_dp *intel_dp = intel_attached_dp(connector);
4770 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4771 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4772 struct drm_device *dev = connector->dev;
4773 enum drm_connector_status status;
4774 enum intel_display_power_domain power_domain;
4778 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4779 connector->base.id, connector->name);
4780 intel_dp_unset_edid(intel_dp);
4782 if (intel_dp->is_mst) {
4783 /* MST devices are disconnected from a monitor POV */
4784 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4785 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4786 return connector_status_disconnected;
4789 power_domain = intel_dp_power_get(intel_dp);
4791 /* Can't disconnect eDP, but you can close the lid... */
4792 if (is_edp(intel_dp))
4793 status = edp_detect(intel_dp);
4794 else if (HAS_PCH_SPLIT(dev))
4795 status = ironlake_dp_detect(intel_dp);
4797 status = g4x_dp_detect(intel_dp);
4798 if (status != connector_status_connected)
4801 intel_dp_probe_oui(intel_dp);
4803 ret = intel_dp_probe_mst(intel_dp);
4805 /* if we are in MST mode then this connector
4806 won't appear connected or have anything with EDID on it */
4807 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4808 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4809 status = connector_status_disconnected;
4813 intel_dp_set_edid(intel_dp);
4815 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4816 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4817 status = connector_status_connected;
4819 /* Try to read the source of the interrupt */
4820 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4821 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4822 /* Clear interrupt source */
4823 drm_dp_dpcd_writeb(&intel_dp->aux,
4824 DP_DEVICE_SERVICE_IRQ_VECTOR,
4827 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4828 intel_dp_handle_test_request(intel_dp);
4829 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4830 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4834 intel_dp_power_put(intel_dp, power_domain);
4839 intel_dp_force(struct drm_connector *connector)
4841 struct intel_dp *intel_dp = intel_attached_dp(connector);
4842 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4843 enum intel_display_power_domain power_domain;
4845 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4846 connector->base.id, connector->name);
4847 intel_dp_unset_edid(intel_dp);
4849 if (connector->status != connector_status_connected)
4852 power_domain = intel_dp_power_get(intel_dp);
4854 intel_dp_set_edid(intel_dp);
4856 intel_dp_power_put(intel_dp, power_domain);
4858 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4859 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4862 static int intel_dp_get_modes(struct drm_connector *connector)
4864 struct intel_connector *intel_connector = to_intel_connector(connector);
4867 edid = intel_connector->detect_edid;
4869 int ret = intel_connector_update_modes(connector, edid);
4874 /* if eDP has no EDID, fall back to fixed mode */
4875 if (is_edp(intel_attached_dp(connector)) &&
4876 intel_connector->panel.fixed_mode) {
4877 struct drm_display_mode *mode;
4879 mode = drm_mode_duplicate(connector->dev,
4880 intel_connector->panel.fixed_mode);
4882 drm_mode_probed_add(connector, mode);
4891 intel_dp_detect_audio(struct drm_connector *connector)
4893 bool has_audio = false;
4896 edid = to_intel_connector(connector)->detect_edid;
4898 has_audio = drm_detect_monitor_audio(edid);
4904 intel_dp_set_property(struct drm_connector *connector,
4905 struct drm_property *property,
4908 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4909 struct intel_connector *intel_connector = to_intel_connector(connector);
4910 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4911 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4914 ret = drm_object_property_set_value(&connector->base, property, val);
4918 if (property == dev_priv->force_audio_property) {
4922 if (i == intel_dp->force_audio)
4925 intel_dp->force_audio = i;
4927 if (i == HDMI_AUDIO_AUTO)
4928 has_audio = intel_dp_detect_audio(connector);
4930 has_audio = (i == HDMI_AUDIO_ON);
4932 if (has_audio == intel_dp->has_audio)
4935 intel_dp->has_audio = has_audio;
4939 if (property == dev_priv->broadcast_rgb_property) {
4940 bool old_auto = intel_dp->color_range_auto;
4941 bool old_range = intel_dp->limited_color_range;
4944 case INTEL_BROADCAST_RGB_AUTO:
4945 intel_dp->color_range_auto = true;
4947 case INTEL_BROADCAST_RGB_FULL:
4948 intel_dp->color_range_auto = false;
4949 intel_dp->limited_color_range = false;
4951 case INTEL_BROADCAST_RGB_LIMITED:
4952 intel_dp->color_range_auto = false;
4953 intel_dp->limited_color_range = true;
4959 if (old_auto == intel_dp->color_range_auto &&
4960 old_range == intel_dp->limited_color_range)
4966 if (is_edp(intel_dp) &&
4967 property == connector->dev->mode_config.scaling_mode_property) {
4968 if (val == DRM_MODE_SCALE_NONE) {
4969 DRM_DEBUG_KMS("no scaling not supported\n");
4973 if (intel_connector->panel.fitting_mode == val) {
4974 /* the eDP scaling property is not changed */
4977 intel_connector->panel.fitting_mode = val;
4985 if (intel_encoder->base.crtc)
4986 intel_crtc_restore_mode(intel_encoder->base.crtc);
4992 intel_dp_connector_destroy(struct drm_connector *connector)
4994 struct intel_connector *intel_connector = to_intel_connector(connector);
4996 kfree(intel_connector->detect_edid);
4998 if (!IS_ERR_OR_NULL(intel_connector->edid))
4999 kfree(intel_connector->edid);
5001 /* Can't call is_edp() since the encoder may have been destroyed
5003 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5004 intel_panel_fini(&intel_connector->panel);
5006 drm_connector_cleanup(connector);
5010 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5012 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5013 struct intel_dp *intel_dp = &intel_dig_port->dp;
5015 drm_dp_aux_unregister(&intel_dp->aux);
5016 intel_dp_mst_encoder_cleanup(intel_dig_port);
5017 if (is_edp(intel_dp)) {
5018 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5020 * vdd might still be enabled do to the delayed vdd off.
5021 * Make sure vdd is actually turned off here.
5024 edp_panel_vdd_off_sync(intel_dp);
5025 pps_unlock(intel_dp);
5027 if (intel_dp->edp_notifier.notifier_call) {
5028 unregister_reboot_notifier(&intel_dp->edp_notifier);
5029 intel_dp->edp_notifier.notifier_call = NULL;
5032 drm_encoder_cleanup(encoder);
5033 kfree(intel_dig_port);
5036 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5038 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5040 if (!is_edp(intel_dp))
5044 * vdd might still be enabled do to the delayed vdd off.
5045 * Make sure vdd is actually turned off here.
5047 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5049 edp_panel_vdd_off_sync(intel_dp);
5050 pps_unlock(intel_dp);
5053 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5055 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5056 struct drm_device *dev = intel_dig_port->base.base.dev;
5057 struct drm_i915_private *dev_priv = dev->dev_private;
5058 enum intel_display_power_domain power_domain;
5060 lockdep_assert_held(&dev_priv->pps_mutex);
5062 if (!edp_have_panel_vdd(intel_dp))
5066 * The VDD bit needs a power domain reference, so if the bit is
5067 * already enabled when we boot or resume, grab this reference and
5068 * schedule a vdd off, so we don't hold on to the reference
5071 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5072 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5073 intel_display_power_get(dev_priv, power_domain);
5075 edp_panel_vdd_schedule_off(intel_dp);
5078 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5080 struct intel_dp *intel_dp;
5082 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5085 intel_dp = enc_to_intel_dp(encoder);
5090 * Read out the current power sequencer assignment,
5091 * in case the BIOS did something with it.
5093 if (IS_VALLEYVIEW(encoder->dev))
5094 vlv_initial_power_sequencer_setup(intel_dp);
5096 intel_edp_panel_vdd_sanitize(intel_dp);
5098 pps_unlock(intel_dp);
5101 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5102 .dpms = drm_atomic_helper_connector_dpms,
5103 .detect = intel_dp_detect,
5104 .force = intel_dp_force,
5105 .fill_modes = drm_helper_probe_single_connector_modes,
5106 .set_property = intel_dp_set_property,
5107 .atomic_get_property = intel_connector_atomic_get_property,
5108 .destroy = intel_dp_connector_destroy,
5109 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5110 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5113 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5114 .get_modes = intel_dp_get_modes,
5115 .mode_valid = intel_dp_mode_valid,
5116 .best_encoder = intel_best_encoder,
5119 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5120 .reset = intel_dp_encoder_reset,
5121 .destroy = intel_dp_encoder_destroy,
5125 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5127 struct intel_dp *intel_dp = &intel_dig_port->dp;
5128 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5129 struct drm_device *dev = intel_dig_port->base.base.dev;
5130 struct drm_i915_private *dev_priv = dev->dev_private;
5131 enum intel_display_power_domain power_domain;
5132 enum irqreturn ret = IRQ_NONE;
5134 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5135 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5137 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5139 * vdd off can generate a long pulse on eDP which
5140 * would require vdd on to handle it, and thus we
5141 * would end up in an endless cycle of
5142 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5144 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5145 port_name(intel_dig_port->port));
5149 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5150 port_name(intel_dig_port->port),
5151 long_hpd ? "long" : "short");
5153 power_domain = intel_display_port_power_domain(intel_encoder);
5154 intel_display_power_get(dev_priv, power_domain);
5157 /* indicate that we need to restart link training */
5158 intel_dp->train_set_valid = false;
5160 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5163 if (!intel_dp_get_dpcd(intel_dp)) {
5167 intel_dp_probe_oui(intel_dp);
5169 if (!intel_dp_probe_mst(intel_dp))
5173 if (intel_dp->is_mst) {
5174 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5178 if (!intel_dp->is_mst) {
5180 * we'll check the link status via the normal hot plug path later -
5181 * but for short hpds we should check it now
5183 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5184 intel_dp_check_link_status(intel_dp);
5185 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5193 /* if we were in MST mode, and device is not there get out of MST mode */
5194 if (intel_dp->is_mst) {
5195 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5196 intel_dp->is_mst = false;
5197 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5200 intel_display_power_put(dev_priv, power_domain);
5205 /* Return which DP Port should be selected for Transcoder DP control */
5207 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5209 struct drm_device *dev = crtc->dev;
5210 struct intel_encoder *intel_encoder;
5211 struct intel_dp *intel_dp;
5213 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5214 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5216 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5217 intel_encoder->type == INTEL_OUTPUT_EDP)
5218 return intel_dp->output_reg;
5224 /* check the VBT to see whether the eDP is on DP-D port */
5225 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5227 struct drm_i915_private *dev_priv = dev->dev_private;
5228 union child_device_config *p_child;
5230 static const short port_mapping[] = {
5231 [PORT_B] = PORT_IDPB,
5232 [PORT_C] = PORT_IDPC,
5233 [PORT_D] = PORT_IDPD,
5239 if (!dev_priv->vbt.child_dev_num)
5242 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5243 p_child = dev_priv->vbt.child_dev + i;
5245 if (p_child->common.dvo_port == port_mapping[port] &&
5246 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5247 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5254 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5256 struct intel_connector *intel_connector = to_intel_connector(connector);
5258 intel_attach_force_audio_property(connector);
5259 intel_attach_broadcast_rgb_property(connector);
5260 intel_dp->color_range_auto = true;
5262 if (is_edp(intel_dp)) {
5263 drm_mode_create_scaling_mode_property(connector->dev);
5264 drm_object_attach_property(
5266 connector->dev->mode_config.scaling_mode_property,
5267 DRM_MODE_SCALE_ASPECT);
5268 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5272 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5274 intel_dp->last_power_cycle = jiffies;
5275 intel_dp->last_power_on = jiffies;
5276 intel_dp->last_backlight_off = jiffies;
5280 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5281 struct intel_dp *intel_dp)
5283 struct drm_i915_private *dev_priv = dev->dev_private;
5284 struct edp_power_seq cur, vbt, spec,
5285 *final = &intel_dp->pps_delays;
5286 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5287 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5289 lockdep_assert_held(&dev_priv->pps_mutex);
5291 /* already initialized? */
5292 if (final->t11_t12 != 0)
5295 if (IS_BROXTON(dev)) {
5297 * TODO: BXT has 2 sets of PPS registers.
5298 * Correct Register for Broxton need to be identified
5299 * using VBT. hardcoding for now
5301 pp_ctrl_reg = BXT_PP_CONTROL(0);
5302 pp_on_reg = BXT_PP_ON_DELAYS(0);
5303 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5304 } else if (HAS_PCH_SPLIT(dev)) {
5305 pp_ctrl_reg = PCH_PP_CONTROL;
5306 pp_on_reg = PCH_PP_ON_DELAYS;
5307 pp_off_reg = PCH_PP_OFF_DELAYS;
5308 pp_div_reg = PCH_PP_DIVISOR;
5310 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5312 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5313 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5314 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5315 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5318 /* Workaround: Need to write PP_CONTROL with the unlock key as
5319 * the very first thing. */
5320 pp_ctl = ironlake_get_pp_control(intel_dp);
5322 pp_on = I915_READ(pp_on_reg);
5323 pp_off = I915_READ(pp_off_reg);
5324 if (!IS_BROXTON(dev)) {
5325 I915_WRITE(pp_ctrl_reg, pp_ctl);
5326 pp_div = I915_READ(pp_div_reg);
5329 /* Pull timing values out of registers */
5330 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5331 PANEL_POWER_UP_DELAY_SHIFT;
5333 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5334 PANEL_LIGHT_ON_DELAY_SHIFT;
5336 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5337 PANEL_LIGHT_OFF_DELAY_SHIFT;
5339 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5340 PANEL_POWER_DOWN_DELAY_SHIFT;
5342 if (IS_BROXTON(dev)) {
5343 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5344 BXT_POWER_CYCLE_DELAY_SHIFT;
5346 cur.t11_t12 = (tmp - 1) * 1000;
5350 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5351 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5354 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5355 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5357 vbt = dev_priv->vbt.edp_pps;
5359 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5360 * our hw here, which are all in 100usec. */
5361 spec.t1_t3 = 210 * 10;
5362 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5363 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5364 spec.t10 = 500 * 10;
5365 /* This one is special and actually in units of 100ms, but zero
5366 * based in the hw (so we need to add 100 ms). But the sw vbt
5367 * table multiplies it with 1000 to make it in units of 100usec,
5369 spec.t11_t12 = (510 + 100) * 10;
5371 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5372 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5374 /* Use the max of the register settings and vbt. If both are
5375 * unset, fall back to the spec limits. */
5376 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5378 max(cur.field, vbt.field))
5379 assign_final(t1_t3);
5383 assign_final(t11_t12);
5386 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5387 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5388 intel_dp->backlight_on_delay = get_delay(t8);
5389 intel_dp->backlight_off_delay = get_delay(t9);
5390 intel_dp->panel_power_down_delay = get_delay(t10);
5391 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5394 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5395 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5396 intel_dp->panel_power_cycle_delay);
5398 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5399 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5403 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5404 struct intel_dp *intel_dp)
5406 struct drm_i915_private *dev_priv = dev->dev_private;
5407 u32 pp_on, pp_off, pp_div, port_sel = 0;
5408 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5409 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5410 enum port port = dp_to_dig_port(intel_dp)->port;
5411 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5413 lockdep_assert_held(&dev_priv->pps_mutex);
5415 if (IS_BROXTON(dev)) {
5417 * TODO: BXT has 2 sets of PPS registers.
5418 * Correct Register for Broxton need to be identified
5419 * using VBT. hardcoding for now
5421 pp_ctrl_reg = BXT_PP_CONTROL(0);
5422 pp_on_reg = BXT_PP_ON_DELAYS(0);
5423 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5425 } else if (HAS_PCH_SPLIT(dev)) {
5426 pp_on_reg = PCH_PP_ON_DELAYS;
5427 pp_off_reg = PCH_PP_OFF_DELAYS;
5428 pp_div_reg = PCH_PP_DIVISOR;
5430 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5432 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5433 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5434 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5438 * And finally store the new values in the power sequencer. The
5439 * backlight delays are set to 1 because we do manual waits on them. For
5440 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5441 * we'll end up waiting for the backlight off delay twice: once when we
5442 * do the manual sleep, and once when we disable the panel and wait for
5443 * the PP_STATUS bit to become zero.
5445 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5446 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5447 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5448 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5449 /* Compute the divisor for the pp clock, simply match the Bspec
5451 if (IS_BROXTON(dev)) {
5452 pp_div = I915_READ(pp_ctrl_reg);
5453 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5454 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5455 << BXT_POWER_CYCLE_DELAY_SHIFT);
5457 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5458 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5459 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5462 /* Haswell doesn't have any port selection bits for the panel
5463 * power sequencer any more. */
5464 if (IS_VALLEYVIEW(dev)) {
5465 port_sel = PANEL_PORT_SELECT_VLV(port);
5466 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5468 port_sel = PANEL_PORT_SELECT_DPA;
5470 port_sel = PANEL_PORT_SELECT_DPD;
5475 I915_WRITE(pp_on_reg, pp_on);
5476 I915_WRITE(pp_off_reg, pp_off);
5477 if (IS_BROXTON(dev))
5478 I915_WRITE(pp_ctrl_reg, pp_div);
5480 I915_WRITE(pp_div_reg, pp_div);
5482 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5483 I915_READ(pp_on_reg),
5484 I915_READ(pp_off_reg),
5486 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5487 I915_READ(pp_div_reg));
5491 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5493 * @refresh_rate: RR to be programmed
5495 * This function gets called when refresh rate (RR) has to be changed from
5496 * one frequency to another. Switches can be between high and low RR
5497 * supported by the panel or to any other RR based on media playback (in
5498 * this case, RR value needs to be passed from user space).
5500 * The caller of this function needs to take a lock on dev_priv->drrs.
5502 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5504 struct drm_i915_private *dev_priv = dev->dev_private;
5505 struct intel_encoder *encoder;
5506 struct intel_digital_port *dig_port = NULL;
5507 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5508 struct intel_crtc_state *config = NULL;
5509 struct intel_crtc *intel_crtc = NULL;
5511 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5513 if (refresh_rate <= 0) {
5514 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5518 if (intel_dp == NULL) {
5519 DRM_DEBUG_KMS("DRRS not supported.\n");
5524 * FIXME: This needs proper synchronization with psr state for some
5525 * platforms that cannot have PSR and DRRS enabled at the same time.
5528 dig_port = dp_to_dig_port(intel_dp);
5529 encoder = &dig_port->base;
5530 intel_crtc = to_intel_crtc(encoder->base.crtc);
5533 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5537 config = intel_crtc->config;
5539 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5540 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5544 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5546 index = DRRS_LOW_RR;
5548 if (index == dev_priv->drrs.refresh_rate_type) {
5550 "DRRS requested for previously set RR...ignoring\n");
5554 if (!intel_crtc->active) {
5555 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5559 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5562 intel_dp_set_m_n(intel_crtc, M1_N1);
5565 intel_dp_set_m_n(intel_crtc, M2_N2);
5569 DRM_ERROR("Unsupported refreshrate type\n");
5571 } else if (INTEL_INFO(dev)->gen > 6) {
5572 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5573 val = I915_READ(reg);
5575 if (index > DRRS_HIGH_RR) {
5576 if (IS_VALLEYVIEW(dev))
5577 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5579 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5581 if (IS_VALLEYVIEW(dev))
5582 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5584 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5586 I915_WRITE(reg, val);
5589 dev_priv->drrs.refresh_rate_type = index;
5591 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5595 * intel_edp_drrs_enable - init drrs struct if supported
5596 * @intel_dp: DP struct
5598 * Initializes frontbuffer_bits and drrs.dp
5600 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5602 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5603 struct drm_i915_private *dev_priv = dev->dev_private;
5604 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5605 struct drm_crtc *crtc = dig_port->base.base.crtc;
5606 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5608 if (!intel_crtc->config->has_drrs) {
5609 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5613 mutex_lock(&dev_priv->drrs.mutex);
5614 if (WARN_ON(dev_priv->drrs.dp)) {
5615 DRM_ERROR("DRRS already enabled\n");
5619 dev_priv->drrs.busy_frontbuffer_bits = 0;
5621 dev_priv->drrs.dp = intel_dp;
5624 mutex_unlock(&dev_priv->drrs.mutex);
5628 * intel_edp_drrs_disable - Disable DRRS
5629 * @intel_dp: DP struct
5632 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5634 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5635 struct drm_i915_private *dev_priv = dev->dev_private;
5636 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5637 struct drm_crtc *crtc = dig_port->base.base.crtc;
5638 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5640 if (!intel_crtc->config->has_drrs)
5643 mutex_lock(&dev_priv->drrs.mutex);
5644 if (!dev_priv->drrs.dp) {
5645 mutex_unlock(&dev_priv->drrs.mutex);
5649 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5650 intel_dp_set_drrs_state(dev_priv->dev,
5651 intel_dp->attached_connector->panel.
5652 fixed_mode->vrefresh);
5654 dev_priv->drrs.dp = NULL;
5655 mutex_unlock(&dev_priv->drrs.mutex);
5657 cancel_delayed_work_sync(&dev_priv->drrs.work);
5660 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5662 struct drm_i915_private *dev_priv =
5663 container_of(work, typeof(*dev_priv), drrs.work.work);
5664 struct intel_dp *intel_dp;
5666 mutex_lock(&dev_priv->drrs.mutex);
5668 intel_dp = dev_priv->drrs.dp;
5674 * The delayed work can race with an invalidate hence we need to
5678 if (dev_priv->drrs.busy_frontbuffer_bits)
5681 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5682 intel_dp_set_drrs_state(dev_priv->dev,
5683 intel_dp->attached_connector->panel.
5684 downclock_mode->vrefresh);
5687 mutex_unlock(&dev_priv->drrs.mutex);
5691 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5693 * @frontbuffer_bits: frontbuffer plane tracking bits
5695 * This function gets called everytime rendering on the given planes start.
5696 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5698 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5700 void intel_edp_drrs_invalidate(struct drm_device *dev,
5701 unsigned frontbuffer_bits)
5703 struct drm_i915_private *dev_priv = dev->dev_private;
5704 struct drm_crtc *crtc;
5707 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5710 cancel_delayed_work(&dev_priv->drrs.work);
5712 mutex_lock(&dev_priv->drrs.mutex);
5713 if (!dev_priv->drrs.dp) {
5714 mutex_unlock(&dev_priv->drrs.mutex);
5718 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5719 pipe = to_intel_crtc(crtc)->pipe;
5721 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5722 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5724 /* invalidate means busy screen hence upclock */
5725 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5726 intel_dp_set_drrs_state(dev_priv->dev,
5727 dev_priv->drrs.dp->attached_connector->panel.
5728 fixed_mode->vrefresh);
5730 mutex_unlock(&dev_priv->drrs.mutex);
5734 * intel_edp_drrs_flush - Restart Idleness DRRS
5736 * @frontbuffer_bits: frontbuffer plane tracking bits
5738 * This function gets called every time rendering on the given planes has
5739 * completed or flip on a crtc is completed. So DRRS should be upclocked
5740 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5741 * if no other planes are dirty.
5743 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5745 void intel_edp_drrs_flush(struct drm_device *dev,
5746 unsigned frontbuffer_bits)
5748 struct drm_i915_private *dev_priv = dev->dev_private;
5749 struct drm_crtc *crtc;
5752 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5755 cancel_delayed_work(&dev_priv->drrs.work);
5757 mutex_lock(&dev_priv->drrs.mutex);
5758 if (!dev_priv->drrs.dp) {
5759 mutex_unlock(&dev_priv->drrs.mutex);
5763 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5764 pipe = to_intel_crtc(crtc)->pipe;
5766 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5767 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5769 /* flush means busy screen hence upclock */
5770 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5771 intel_dp_set_drrs_state(dev_priv->dev,
5772 dev_priv->drrs.dp->attached_connector->panel.
5773 fixed_mode->vrefresh);
5776 * flush also means no more activity hence schedule downclock, if all
5777 * other fbs are quiescent too
5779 if (!dev_priv->drrs.busy_frontbuffer_bits)
5780 schedule_delayed_work(&dev_priv->drrs.work,
5781 msecs_to_jiffies(1000));
5782 mutex_unlock(&dev_priv->drrs.mutex);
5786 * DOC: Display Refresh Rate Switching (DRRS)
5788 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5789 * which enables swtching between low and high refresh rates,
5790 * dynamically, based on the usage scenario. This feature is applicable
5791 * for internal panels.
5793 * Indication that the panel supports DRRS is given by the panel EDID, which
5794 * would list multiple refresh rates for one resolution.
5796 * DRRS is of 2 types - static and seamless.
5797 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5798 * (may appear as a blink on screen) and is used in dock-undock scenario.
5799 * Seamless DRRS involves changing RR without any visual effect to the user
5800 * and can be used during normal system usage. This is done by programming
5801 * certain registers.
5803 * Support for static/seamless DRRS may be indicated in the VBT based on
5804 * inputs from the panel spec.
5806 * DRRS saves power by switching to low RR based on usage scenarios.
5809 * The implementation is based on frontbuffer tracking implementation.
5810 * When there is a disturbance on the screen triggered by user activity or a
5811 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5812 * When there is no movement on screen, after a timeout of 1 second, a switch
5813 * to low RR is made.
5814 * For integration with frontbuffer tracking code,
5815 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5817 * DRRS can be further extended to support other internal panels and also
5818 * the scenario of video playback wherein RR is set based on the rate
5819 * requested by userspace.
5823 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5824 * @intel_connector: eDP connector
5825 * @fixed_mode: preferred mode of panel
5827 * This function is called only once at driver load to initialize basic
5831 * Downclock mode if panel supports it, else return NULL.
5832 * DRRS support is determined by the presence of downclock mode (apart
5833 * from VBT setting).
5835 static struct drm_display_mode *
5836 intel_dp_drrs_init(struct intel_connector *intel_connector,
5837 struct drm_display_mode *fixed_mode)
5839 struct drm_connector *connector = &intel_connector->base;
5840 struct drm_device *dev = connector->dev;
5841 struct drm_i915_private *dev_priv = dev->dev_private;
5842 struct drm_display_mode *downclock_mode = NULL;
5844 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5845 mutex_init(&dev_priv->drrs.mutex);
5847 if (INTEL_INFO(dev)->gen <= 6) {
5848 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5852 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5853 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5857 downclock_mode = intel_find_panel_downclock
5858 (dev, fixed_mode, connector);
5860 if (!downclock_mode) {
5861 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5865 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5867 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5868 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5869 return downclock_mode;
5872 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5873 struct intel_connector *intel_connector)
5875 struct drm_connector *connector = &intel_connector->base;
5876 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5877 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5878 struct drm_device *dev = intel_encoder->base.dev;
5879 struct drm_i915_private *dev_priv = dev->dev_private;
5880 struct drm_display_mode *fixed_mode = NULL;
5881 struct drm_display_mode *downclock_mode = NULL;
5883 struct drm_display_mode *scan;
5885 enum pipe pipe = INVALID_PIPE;
5887 if (!is_edp(intel_dp))
5891 intel_edp_panel_vdd_sanitize(intel_dp);
5892 pps_unlock(intel_dp);
5894 /* Cache DPCD and EDID for edp. */
5895 has_dpcd = intel_dp_get_dpcd(intel_dp);
5898 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5899 dev_priv->no_aux_handshake =
5900 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5901 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5903 /* if this fails, presume the device is a ghost */
5904 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5908 /* We now know it's not a ghost, init power sequence regs. */
5910 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5911 pps_unlock(intel_dp);
5913 mutex_lock(&dev->mode_config.mutex);
5914 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5916 if (drm_add_edid_modes(connector, edid)) {
5917 drm_mode_connector_update_edid_property(connector,
5919 drm_edid_to_eld(connector, edid);
5922 edid = ERR_PTR(-EINVAL);
5925 edid = ERR_PTR(-ENOENT);
5927 intel_connector->edid = edid;
5929 /* prefer fixed mode from EDID if available */
5930 list_for_each_entry(scan, &connector->probed_modes, head) {
5931 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5932 fixed_mode = drm_mode_duplicate(dev, scan);
5933 downclock_mode = intel_dp_drrs_init(
5934 intel_connector, fixed_mode);
5939 /* fallback to VBT if available for eDP */
5940 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5941 fixed_mode = drm_mode_duplicate(dev,
5942 dev_priv->vbt.lfp_lvds_vbt_mode);
5944 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5946 mutex_unlock(&dev->mode_config.mutex);
5948 if (IS_VALLEYVIEW(dev)) {
5949 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5950 register_reboot_notifier(&intel_dp->edp_notifier);
5953 * Figure out the current pipe for the initial backlight setup.
5954 * If the current pipe isn't valid, try the PPS pipe, and if that
5955 * fails just assume pipe A.
5957 if (IS_CHERRYVIEW(dev))
5958 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5960 pipe = PORT_TO_PIPE(intel_dp->DP);
5962 if (pipe != PIPE_A && pipe != PIPE_B)
5963 pipe = intel_dp->pps_pipe;
5965 if (pipe != PIPE_A && pipe != PIPE_B)
5968 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5972 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5973 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5974 intel_panel_setup_backlight(connector, pipe);
5980 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5981 struct intel_connector *intel_connector)
5983 struct drm_connector *connector = &intel_connector->base;
5984 struct intel_dp *intel_dp = &intel_dig_port->dp;
5985 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5986 struct drm_device *dev = intel_encoder->base.dev;
5987 struct drm_i915_private *dev_priv = dev->dev_private;
5988 enum port port = intel_dig_port->port;
5991 intel_dp->pps_pipe = INVALID_PIPE;
5993 /* intel_dp vfuncs */
5994 if (INTEL_INFO(dev)->gen >= 9)
5995 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5996 else if (IS_VALLEYVIEW(dev))
5997 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5998 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5999 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6000 else if (HAS_PCH_SPLIT(dev))
6001 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6003 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6005 if (INTEL_INFO(dev)->gen >= 9)
6006 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6008 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6010 /* Preserve the current hw state. */
6011 intel_dp->DP = I915_READ(intel_dp->output_reg);
6012 intel_dp->attached_connector = intel_connector;
6014 if (intel_dp_is_edp(dev, port))
6015 type = DRM_MODE_CONNECTOR_eDP;
6017 type = DRM_MODE_CONNECTOR_DisplayPort;
6020 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6021 * for DP the encoder type can be set by the caller to
6022 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6024 if (type == DRM_MODE_CONNECTOR_eDP)
6025 intel_encoder->type = INTEL_OUTPUT_EDP;
6027 /* eDP only on port B and/or C on vlv/chv */
6028 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6029 port != PORT_B && port != PORT_C))
6032 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6033 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6036 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6037 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6039 connector->interlace_allowed = true;
6040 connector->doublescan_allowed = 0;
6042 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6043 edp_panel_vdd_work);
6045 intel_connector_attach_encoder(intel_connector, intel_encoder);
6046 drm_connector_register(connector);
6049 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6051 intel_connector->get_hw_state = intel_connector_get_hw_state;
6052 intel_connector->unregister = intel_dp_connector_unregister;
6054 /* Set up the hotplug pin. */
6057 intel_encoder->hpd_pin = HPD_PORT_A;
6060 intel_encoder->hpd_pin = HPD_PORT_B;
6061 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6062 intel_encoder->hpd_pin = HPD_PORT_A;
6065 intel_encoder->hpd_pin = HPD_PORT_C;
6068 intel_encoder->hpd_pin = HPD_PORT_D;
6074 if (is_edp(intel_dp)) {
6076 intel_dp_init_panel_power_timestamps(intel_dp);
6077 if (IS_VALLEYVIEW(dev))
6078 vlv_initial_power_sequencer_setup(intel_dp);
6080 intel_dp_init_panel_power_sequencer(dev, intel_dp);
6081 pps_unlock(intel_dp);
6084 intel_dp_aux_init(intel_dp, intel_connector);
6086 /* init MST on ports that can support it */
6087 if (HAS_DP_MST(dev) &&
6088 (port == PORT_B || port == PORT_C || port == PORT_D))
6089 intel_dp_mst_encoder_init(intel_dig_port,
6090 intel_connector->base.base.id);
6092 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6093 drm_dp_aux_unregister(&intel_dp->aux);
6094 if (is_edp(intel_dp)) {
6095 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6097 * vdd might still be enabled do to the delayed vdd off.
6098 * Make sure vdd is actually turned off here.
6101 edp_panel_vdd_off_sync(intel_dp);
6102 pps_unlock(intel_dp);
6104 drm_connector_unregister(connector);
6105 drm_connector_cleanup(connector);
6109 intel_dp_add_properties(intel_dp, connector);
6111 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6112 * 0xd. Failure to do so will result in spurious interrupts being
6113 * generated on the port when a cable is not attached.
6115 if (IS_G4X(dev) && !IS_GM45(dev)) {
6116 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6117 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6120 i915_debugfs_connector_add(connector);
6126 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6128 struct drm_i915_private *dev_priv = dev->dev_private;
6129 struct intel_digital_port *intel_dig_port;
6130 struct intel_encoder *intel_encoder;
6131 struct drm_encoder *encoder;
6132 struct intel_connector *intel_connector;
6134 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6135 if (!intel_dig_port)
6138 intel_connector = intel_connector_alloc();
6139 if (!intel_connector) {
6140 kfree(intel_dig_port);
6144 intel_encoder = &intel_dig_port->base;
6145 encoder = &intel_encoder->base;
6147 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6148 DRM_MODE_ENCODER_TMDS);
6150 intel_encoder->compute_config = intel_dp_compute_config;
6151 intel_encoder->disable = intel_disable_dp;
6152 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6153 intel_encoder->get_config = intel_dp_get_config;
6154 intel_encoder->suspend = intel_dp_encoder_suspend;
6155 if (IS_CHERRYVIEW(dev)) {
6156 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6157 intel_encoder->pre_enable = chv_pre_enable_dp;
6158 intel_encoder->enable = vlv_enable_dp;
6159 intel_encoder->post_disable = chv_post_disable_dp;
6160 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6161 } else if (IS_VALLEYVIEW(dev)) {
6162 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6163 intel_encoder->pre_enable = vlv_pre_enable_dp;
6164 intel_encoder->enable = vlv_enable_dp;
6165 intel_encoder->post_disable = vlv_post_disable_dp;
6167 intel_encoder->pre_enable = g4x_pre_enable_dp;
6168 intel_encoder->enable = g4x_enable_dp;
6169 if (INTEL_INFO(dev)->gen >= 5)
6170 intel_encoder->post_disable = ilk_post_disable_dp;
6173 intel_dig_port->port = port;
6174 intel_dig_port->dp.output_reg = output_reg;
6176 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6177 if (IS_CHERRYVIEW(dev)) {
6179 intel_encoder->crtc_mask = 1 << 2;
6181 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6183 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6185 intel_encoder->cloneable = 0;
6187 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6188 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6190 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6191 drm_encoder_cleanup(encoder);
6192 kfree(intel_dig_port);
6193 kfree(intel_connector);
6197 void intel_dp_mst_suspend(struct drm_device *dev)
6199 struct drm_i915_private *dev_priv = dev->dev_private;
6203 for (i = 0; i < I915_MAX_PORTS; i++) {
6204 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6205 if (!intel_dig_port)
6208 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6209 if (!intel_dig_port->dp.can_mst)
6211 if (intel_dig_port->dp.is_mst)
6212 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6217 void intel_dp_mst_resume(struct drm_device *dev)
6219 struct drm_i915_private *dev_priv = dev->dev_private;
6222 for (i = 0; i < I915_MAX_PORTS; i++) {
6223 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6224 if (!intel_dig_port)
6226 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6229 if (!intel_dig_port->dp.can_mst)
6232 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6234 intel_dp_check_mst_status(&intel_dig_port->dp);