2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
107 static bool is_edp(struct intel_dp *intel_dp)
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118 return intel_dig_port->base.base.dev;
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
134 intel_dp_max_link_bw(struct intel_dp *intel_dp)
136 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138 switch (max_link_bw) {
139 case DP_LINK_BW_1_62:
144 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 max_link_bw = DP_LINK_BW_1_62;
152 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
155 struct drm_device *dev = intel_dig_port->base.base.dev;
156 u8 source_max, sink_max;
159 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
160 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165 return min(source_max, sink_max);
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 * 270000 * 1 * 8 / 10 == 216000
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
186 intel_dp_link_required(int pixel_clock, int bpp)
188 return (pixel_clock * bpp + 9) / 10;
192 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 return (max_link_clock * max_lanes * 8) / 10;
197 static enum drm_mode_status
198 intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
201 struct intel_dp *intel_dp = intel_attached_dp(connector);
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
207 if (is_edp(intel_dp) && fixed_mode) {
208 if (mode->hdisplay > fixed_mode->hdisplay)
211 if (mode->vdisplay > fixed_mode->vdisplay)
214 target_clock = fixed_mode->clock;
217 max_link_clock = intel_dp_max_link_rate(intel_dp);
218 max_lanes = intel_dp_max_lane_count(intel_dp);
220 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221 mode_rate = intel_dp_link_required(target_clock, 18);
223 if (mode_rate > max_rate)
224 return MODE_CLOCK_HIGH;
226 if (mode->clock < 10000)
227 return MODE_CLOCK_LOW;
229 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 return MODE_H_ILLEGAL;
235 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
242 for (i = 0; i < src_bytes; i++)
243 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
252 for (i = 0; i < dst_bytes; i++)
253 dst[i] = src >> ((3-i) * 8);
256 /* hrawclock is 1/4 the FSB frequency */
258 intel_hrawclk(struct drm_device *dev)
260 struct drm_i915_private *dev_priv = dev->dev_private;
263 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
264 if (IS_VALLEYVIEW(dev))
267 clkcfg = I915_READ(CLKCFG);
268 switch (clkcfg & CLKCFG_FSB_MASK) {
277 case CLKCFG_FSB_1067:
279 case CLKCFG_FSB_1333:
281 /* these two are just a guess; one of them might be right */
282 case CLKCFG_FSB_1600:
283 case CLKCFG_FSB_1600_ALT:
291 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
292 struct intel_dp *intel_dp);
294 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
295 struct intel_dp *intel_dp);
297 static void pps_lock(struct intel_dp *intel_dp)
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct intel_encoder *encoder = &intel_dig_port->base;
301 struct drm_device *dev = encoder->base.dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 enum intel_display_power_domain power_domain;
306 * See vlv_power_sequencer_reset() why we need
307 * a power domain reference here.
309 power_domain = intel_display_port_power_domain(encoder);
310 intel_display_power_get(dev_priv, power_domain);
312 mutex_lock(&dev_priv->pps_mutex);
315 static void pps_unlock(struct intel_dp *intel_dp)
317 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
318 struct intel_encoder *encoder = &intel_dig_port->base;
319 struct drm_device *dev = encoder->base.dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321 enum intel_display_power_domain power_domain;
323 mutex_unlock(&dev_priv->pps_mutex);
325 power_domain = intel_display_port_power_domain(encoder);
326 intel_display_power_put(dev_priv, power_domain);
330 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
333 struct drm_device *dev = intel_dig_port->base.base.dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 enum pipe pipe = intel_dp->pps_pipe;
339 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
340 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
341 pipe_name(pipe), port_name(intel_dig_port->port)))
344 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
345 pipe_name(pipe), port_name(intel_dig_port->port));
347 /* Preserve the BIOS-computed detected bit. This is
348 * supposed to be read-only.
350 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
351 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
352 DP |= DP_PORT_WIDTH(1);
353 DP |= DP_LINK_TRAIN_PAT_1;
355 if (IS_CHERRYVIEW(dev))
356 DP |= DP_PIPE_SELECT_CHV(pipe);
357 else if (pipe == PIPE_B)
358 DP |= DP_PIPEB_SELECT;
360 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
363 * The DPLL for the pipe must be enabled for this to work.
364 * So enable temporarily it if it's not already enabled.
367 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
368 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
371 * Similar magic as in intel_dp_enable_port().
372 * We _must_ do this port enable + disable trick
373 * to make this power seqeuencer lock onto the port.
374 * Otherwise even VDD force bit won't work.
376 I915_WRITE(intel_dp->output_reg, DP);
377 POSTING_READ(intel_dp->output_reg);
379 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
380 POSTING_READ(intel_dp->output_reg);
382 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
386 vlv_force_pll_off(dev, pipe);
390 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
393 struct drm_device *dev = intel_dig_port->base.base.dev;
394 struct drm_i915_private *dev_priv = dev->dev_private;
395 struct intel_encoder *encoder;
396 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
399 lockdep_assert_held(&dev_priv->pps_mutex);
401 /* We should never land here with regular DP ports */
402 WARN_ON(!is_edp(intel_dp));
404 if (intel_dp->pps_pipe != INVALID_PIPE)
405 return intel_dp->pps_pipe;
408 * We don't have power sequencer currently.
409 * Pick one that's not used by other ports.
411 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413 struct intel_dp *tmp;
415 if (encoder->type != INTEL_OUTPUT_EDP)
418 tmp = enc_to_intel_dp(&encoder->base);
420 if (tmp->pps_pipe != INVALID_PIPE)
421 pipes &= ~(1 << tmp->pps_pipe);
425 * Didn't find one. This should not happen since there
426 * are two power sequencers and up to two eDP ports.
428 if (WARN_ON(pipes == 0))
431 pipe = ffs(pipes) - 1;
433 vlv_steal_power_sequencer(dev, pipe);
434 intel_dp->pps_pipe = pipe;
436 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
437 pipe_name(intel_dp->pps_pipe),
438 port_name(intel_dig_port->port));
440 /* init power sequencer on this pipe and port */
441 intel_dp_init_panel_power_sequencer(dev, intel_dp);
442 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
445 * Even vdd force doesn't work until we've made
446 * the power sequencer lock in on the port.
448 vlv_power_sequencer_kick(intel_dp);
450 return intel_dp->pps_pipe;
453 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
456 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
459 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
462 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
465 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
468 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
475 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477 vlv_pipe_check pipe_check)
481 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
482 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
483 PANEL_PORT_SELECT_MASK;
485 if (port_sel != PANEL_PORT_SELECT_VLV(port))
488 if (!pipe_check(dev_priv, pipe))
498 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
501 struct drm_device *dev = intel_dig_port->base.base.dev;
502 struct drm_i915_private *dev_priv = dev->dev_private;
503 enum port port = intel_dig_port->port;
505 lockdep_assert_held(&dev_priv->pps_mutex);
507 /* try to find a pipe with this port selected */
508 /* first pick one where the panel is on */
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511 /* didn't find one? pick one where vdd is on */
512 if (intel_dp->pps_pipe == INVALID_PIPE)
513 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
514 vlv_pipe_has_vdd_on);
515 /* didn't find one? pick one with just the correct port */
516 if (intel_dp->pps_pipe == INVALID_PIPE)
517 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
520 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
521 if (intel_dp->pps_pipe == INVALID_PIPE) {
522 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
527 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
528 port_name(port), pipe_name(intel_dp->pps_pipe));
530 intel_dp_init_panel_power_sequencer(dev, intel_dp);
531 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
534 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536 struct drm_device *dev = dev_priv->dev;
537 struct intel_encoder *encoder;
539 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 * We can't grab pps_mutex here due to deadlock with power_domain
544 * mutex when power_domain functions are called while holding pps_mutex.
545 * That also means that in order to use pps_pipe the code needs to
546 * hold both a power domain reference and pps_mutex, and the power domain
547 * reference get/put must be done while _not_ holding pps_mutex.
548 * pps_{lock,unlock}() do these steps in the correct order, so one
549 * should use them always.
552 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
553 struct intel_dp *intel_dp;
555 if (encoder->type != INTEL_OUTPUT_EDP)
558 intel_dp = enc_to_intel_dp(&encoder->base);
559 intel_dp->pps_pipe = INVALID_PIPE;
563 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568 return BXT_PP_CONTROL(0);
569 else if (HAS_PCH_SPLIT(dev))
570 return PCH_PP_CONTROL;
572 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
575 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
580 return BXT_PP_STATUS(0);
581 else if (HAS_PCH_SPLIT(dev))
582 return PCH_PP_STATUS;
584 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
587 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
588 This function only applicable when panel PM state is not to be tracked */
589 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
592 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
595 struct drm_i915_private *dev_priv = dev->dev_private;
597 u32 pp_ctrl_reg, pp_div_reg;
599 if (!is_edp(intel_dp) || code != SYS_RESTART)
604 if (IS_VALLEYVIEW(dev)) {
605 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
607 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
609 pp_div = I915_READ(pp_div_reg);
610 pp_div &= PP_REFERENCE_DIVIDER_MASK;
612 /* 0x1F write to PP_DIV_REG sets max cycle delay */
613 I915_WRITE(pp_div_reg, pp_div | 0x1F);
614 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
615 msleep(intel_dp->panel_power_cycle_delay);
618 pps_unlock(intel_dp);
623 static bool edp_have_panel_power(struct intel_dp *intel_dp)
625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
626 struct drm_i915_private *dev_priv = dev->dev_private;
628 lockdep_assert_held(&dev_priv->pps_mutex);
630 if (IS_VALLEYVIEW(dev) &&
631 intel_dp->pps_pipe == INVALID_PIPE)
634 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
637 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
639 struct drm_device *dev = intel_dp_to_dev(intel_dp);
640 struct drm_i915_private *dev_priv = dev->dev_private;
642 lockdep_assert_held(&dev_priv->pps_mutex);
644 if (IS_VALLEYVIEW(dev) &&
645 intel_dp->pps_pipe == INVALID_PIPE)
648 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
652 intel_dp_check_edp(struct intel_dp *intel_dp)
654 struct drm_device *dev = intel_dp_to_dev(intel_dp);
655 struct drm_i915_private *dev_priv = dev->dev_private;
657 if (!is_edp(intel_dp))
660 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
661 WARN(1, "eDP powered off while attempting aux channel communication.\n");
662 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
663 I915_READ(_pp_stat_reg(intel_dp)),
664 I915_READ(_pp_ctrl_reg(intel_dp)));
669 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
672 struct drm_device *dev = intel_dig_port->base.base.dev;
673 struct drm_i915_private *dev_priv = dev->dev_private;
674 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
678 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
680 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
681 msecs_to_jiffies_timeout(10));
683 done = wait_for_atomic(C, 10) == 0;
685 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
692 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
698 * The clock divider is based off the hrawclk, and would like to run at
699 * 2MHz. So, take the hrawclk value and divide by 2 and use that
701 return index ? 0 : intel_hrawclk(dev) / 2;
704 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
708 struct drm_i915_private *dev_priv = dev->dev_private;
713 if (intel_dig_port->port == PORT_A) {
714 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
717 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
723 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724 struct drm_device *dev = intel_dig_port->base.base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
727 if (intel_dig_port->port == PORT_A) {
730 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
731 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732 /* Workaround for non-ULT HSW */
739 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
743 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745 return index ? 0 : 100;
748 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
755 return index ? 0 : 1;
758 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
761 uint32_t aux_clock_divider)
763 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764 struct drm_device *dev = intel_dig_port->base.base.dev;
765 uint32_t precharge, timeout;
772 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
775 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
777 return DP_AUX_CH_CTL_SEND_BUSY |
779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
782 DP_AUX_CH_CTL_RECEIVE_ERROR |
783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
785 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
788 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
793 return DP_AUX_CH_CTL_SEND_BUSY |
795 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR |
797 DP_AUX_CH_CTL_TIME_OUT_1600us |
798 DP_AUX_CH_CTL_RECEIVE_ERROR |
799 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804 intel_dp_aux_ch(struct intel_dp *intel_dp,
805 const uint8_t *send, int send_bytes,
806 uint8_t *recv, int recv_size)
808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809 struct drm_device *dev = intel_dig_port->base.base.dev;
810 struct drm_i915_private *dev_priv = dev->dev_private;
811 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
812 uint32_t ch_data = ch_ctl + 4;
813 uint32_t aux_clock_divider;
814 int i, ret, recv_bytes;
817 bool has_aux_irq = HAS_AUX_IRQ(dev);
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
828 vdd = edp_panel_vdd_on(intel_dp);
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
834 pm_qos_update_request(&dev_priv->pm_qos, 0);
836 intel_dp_check_edp(intel_dp);
838 intel_aux_display_runtime_get(dev_priv);
840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
842 status = I915_READ_NOTRACE(ch_ctl);
843 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
849 static u32 last_status = -1;
850 const u32 status = I915_READ(ch_ctl);
852 if (status != last_status) {
853 WARN(1, "dp_aux_ch not started status 0x%08x\n",
855 last_status = status;
862 /* Only 5 data registers! */
863 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
868 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
869 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
874 /* Must try at least 3 times according to DP spec */
875 for (try = 0; try < 5; try++) {
876 /* Load the send data into the aux channel data registers */
877 for (i = 0; i < send_bytes; i += 4)
878 I915_WRITE(ch_data + i,
879 intel_dp_pack_aux(send + i,
882 /* Send the command and wait for it to complete */
883 I915_WRITE(ch_ctl, send_ctl);
885 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
887 /* Clear done status and any errors */
891 DP_AUX_CH_CTL_TIME_OUT_ERROR |
892 DP_AUX_CH_CTL_RECEIVE_ERROR);
894 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
897 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
898 * 400us delay required for errors and timeouts
899 * Timeout errors from the HW already meet this
900 * requirement so skip to next iteration
902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903 usleep_range(400, 500);
906 if (status & DP_AUX_CH_CTL_DONE)
911 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
912 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
918 /* Check for timeout or receive error.
919 * Timeouts occur when the sink is not connected
921 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
922 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
927 /* Timeouts occur when the device isn't connected, so they're
928 * "normal" -- don't fill the kernel log with these */
929 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
930 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
935 /* Unload any bytes sent back from the other side */
936 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
937 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
941 for (i = 0; i < recv_bytes; i += 4)
942 intel_dp_unpack_aux(I915_READ(ch_data + i),
943 recv + i, recv_bytes - i);
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
948 intel_aux_display_runtime_put(dev_priv);
951 edp_panel_vdd_off(intel_dp, false);
953 pps_unlock(intel_dp);
958 #define BARE_ADDRESS_SIZE 3
959 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
961 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
963 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
964 uint8_t txbuf[20], rxbuf[20];
965 size_t txsize, rxsize;
968 txbuf[0] = (msg->request << 4) |
969 ((msg->address >> 16) & 0xf);
970 txbuf[1] = (msg->address >> 8) & 0xff;
971 txbuf[2] = msg->address & 0xff;
972 txbuf[3] = msg->size - 1;
974 switch (msg->request & ~DP_AUX_I2C_MOT) {
975 case DP_AUX_NATIVE_WRITE:
976 case DP_AUX_I2C_WRITE:
977 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
978 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
979 rxsize = 2; /* 0 or 1 data bytes */
981 if (WARN_ON(txsize > 20))
984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
986 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988 msg->reply = rxbuf[0] >> 4;
991 /* Number of bytes written in a short write. */
992 ret = clamp_t(int, rxbuf[1], 0, msg->size);
994 /* Return payload size. */
1000 case DP_AUX_NATIVE_READ:
1001 case DP_AUX_I2C_READ:
1002 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1003 rxsize = msg->size + 1;
1005 if (WARN_ON(rxsize > 20))
1008 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1010 msg->reply = rxbuf[0] >> 4;
1012 * Assume happy day, and copy the data. The caller is
1013 * expected to check msg->reply before touching it.
1015 * Return payload size.
1018 memcpy(msg->buffer, rxbuf + 1, ret);
1031 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1033 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1034 struct drm_i915_private *dev_priv = dev->dev_private;
1035 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1036 enum port port = intel_dig_port->port;
1037 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1038 const char *name = NULL;
1039 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1042 /* On SKL we don't have Aux for port E so we rely on VBT to set
1043 * a proper alternate aux channel.
1045 if (IS_SKYLAKE(dev) && port == PORT_E) {
1046 switch (info->alternate_aux_channel) {
1048 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1051 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1054 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1058 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1064 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1068 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1072 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1076 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1080 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1088 * The AUX_CTL register is usually DP_CTL + 0x10.
1090 * On Haswell and Broadwell though:
1091 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1092 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1094 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1096 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1097 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1099 intel_dp->aux.name = name;
1100 intel_dp->aux.dev = dev->dev;
1101 intel_dp->aux.transfer = intel_dp_aux_transfer;
1103 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1104 connector->base.kdev->kobj.name);
1106 ret = drm_dp_aux_register(&intel_dp->aux);
1108 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1113 ret = sysfs_create_link(&connector->base.kdev->kobj,
1114 &intel_dp->aux.ddc.dev.kobj,
1115 intel_dp->aux.ddc.dev.kobj.name);
1117 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1118 drm_dp_aux_unregister(&intel_dp->aux);
1123 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1125 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1127 if (!intel_connector->mst_port)
1128 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1129 intel_dp->aux.ddc.dev.kobj.name);
1130 intel_connector_unregister(intel_connector);
1134 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1138 memset(&pipe_config->dpll_hw_state, 0,
1139 sizeof(pipe_config->dpll_hw_state));
1141 pipe_config->ddi_pll_sel = SKL_DPLL0;
1142 pipe_config->dpll_hw_state.cfgcr1 = 0;
1143 pipe_config->dpll_hw_state.cfgcr2 = 0;
1145 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1146 switch (pipe_config->port_clock / 2) {
1148 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1152 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1156 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1160 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1163 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1164 results in CDCLK change. Need to handle the change of CDCLK by
1165 disabling pipes and re-enabling them */
1167 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1171 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1176 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1180 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1182 memset(&pipe_config->dpll_hw_state, 0,
1183 sizeof(pipe_config->dpll_hw_state));
1185 switch (pipe_config->port_clock / 2) {
1187 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1190 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1193 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1199 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1201 if (intel_dp->num_sink_rates) {
1202 *sink_rates = intel_dp->sink_rates;
1203 return intel_dp->num_sink_rates;
1206 *sink_rates = default_rates;
1208 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1211 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1213 /* WaDisableHBR2:skl */
1214 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1217 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1218 (INTEL_INFO(dev)->gen >= 9))
1225 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1229 if (IS_BROXTON(dev)) {
1230 *source_rates = bxt_rates;
1231 size = ARRAY_SIZE(bxt_rates);
1232 } else if (IS_SKYLAKE(dev)) {
1233 *source_rates = skl_rates;
1234 size = ARRAY_SIZE(skl_rates);
1236 *source_rates = default_rates;
1237 size = ARRAY_SIZE(default_rates);
1240 /* This depends on the fact that 5.4 is last value in the array */
1241 if (!intel_dp_source_supports_hbr2(dev))
1248 intel_dp_set_clock(struct intel_encoder *encoder,
1249 struct intel_crtc_state *pipe_config)
1251 struct drm_device *dev = encoder->base.dev;
1252 const struct dp_link_dpll *divisor = NULL;
1256 divisor = gen4_dpll;
1257 count = ARRAY_SIZE(gen4_dpll);
1258 } else if (HAS_PCH_SPLIT(dev)) {
1260 count = ARRAY_SIZE(pch_dpll);
1261 } else if (IS_CHERRYVIEW(dev)) {
1263 count = ARRAY_SIZE(chv_dpll);
1264 } else if (IS_VALLEYVIEW(dev)) {
1266 count = ARRAY_SIZE(vlv_dpll);
1269 if (divisor && count) {
1270 for (i = 0; i < count; i++) {
1271 if (pipe_config->port_clock == divisor[i].clock) {
1272 pipe_config->dpll = divisor[i].dpll;
1273 pipe_config->clock_set = true;
1280 static int intersect_rates(const int *source_rates, int source_len,
1281 const int *sink_rates, int sink_len,
1284 int i = 0, j = 0, k = 0;
1286 while (i < source_len && j < sink_len) {
1287 if (source_rates[i] == sink_rates[j]) {
1288 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1290 common_rates[k] = source_rates[i];
1294 } else if (source_rates[i] < sink_rates[j]) {
1303 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1306 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1307 const int *source_rates, *sink_rates;
1308 int source_len, sink_len;
1310 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1311 source_len = intel_dp_source_rates(dev, &source_rates);
1313 return intersect_rates(source_rates, source_len,
1314 sink_rates, sink_len,
1318 static void snprintf_int_array(char *str, size_t len,
1319 const int *array, int nelem)
1325 for (i = 0; i < nelem; i++) {
1326 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1334 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1336 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1337 const int *source_rates, *sink_rates;
1338 int source_len, sink_len, common_len;
1339 int common_rates[DP_MAX_SUPPORTED_RATES];
1340 char str[128]; /* FIXME: too big for stack? */
1342 if ((drm_debug & DRM_UT_KMS) == 0)
1345 source_len = intel_dp_source_rates(dev, &source_rates);
1346 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1347 DRM_DEBUG_KMS("source rates: %s\n", str);
1349 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1350 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1351 DRM_DEBUG_KMS("sink rates: %s\n", str);
1353 common_len = intel_dp_common_rates(intel_dp, common_rates);
1354 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1355 DRM_DEBUG_KMS("common rates: %s\n", str);
1358 static int rate_to_index(int find, const int *rates)
1362 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1363 if (find == rates[i])
1370 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1372 int rates[DP_MAX_SUPPORTED_RATES] = {};
1375 len = intel_dp_common_rates(intel_dp, rates);
1376 if (WARN_ON(len <= 0))
1379 return rates[rate_to_index(0, rates) - 1];
1382 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1384 return rate_to_index(rate, intel_dp->sink_rates);
1388 intel_dp_compute_config(struct intel_encoder *encoder,
1389 struct intel_crtc_state *pipe_config)
1391 struct drm_device *dev = encoder->base.dev;
1392 struct drm_i915_private *dev_priv = dev->dev_private;
1393 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1394 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1395 enum port port = dp_to_dig_port(intel_dp)->port;
1396 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1397 struct intel_connector *intel_connector = intel_dp->attached_connector;
1398 int lane_count, clock;
1399 int min_lane_count = 1;
1400 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1401 /* Conveniently, the link BW constants become indices with a shift...*/
1405 int link_avail, link_clock;
1406 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1409 common_len = intel_dp_common_rates(intel_dp, common_rates);
1411 /* No common link rates between source and sink */
1412 WARN_ON(common_len <= 0);
1414 max_clock = common_len - 1;
1416 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1417 pipe_config->has_pch_encoder = true;
1419 pipe_config->has_dp_encoder = true;
1420 pipe_config->has_drrs = false;
1421 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1423 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1424 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1427 if (INTEL_INFO(dev)->gen >= 9) {
1429 ret = skl_update_scaler_crtc(pipe_config);
1434 if (!HAS_PCH_SPLIT(dev))
1435 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1436 intel_connector->panel.fitting_mode);
1438 intel_pch_panel_fitting(intel_crtc, pipe_config,
1439 intel_connector->panel.fitting_mode);
1442 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1445 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1446 "max bw %d pixel clock %iKHz\n",
1447 max_lane_count, common_rates[max_clock],
1448 adjusted_mode->crtc_clock);
1450 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1451 * bpc in between. */
1452 bpp = pipe_config->pipe_bpp;
1453 if (is_edp(intel_dp)) {
1455 /* Get bpp from vbt only for panels that dont have bpp in edid */
1456 if (intel_connector->base.display_info.bpc == 0 &&
1457 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1458 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1459 dev_priv->vbt.edp_bpp);
1460 bpp = dev_priv->vbt.edp_bpp;
1464 * Use the maximum clock and number of lanes the eDP panel
1465 * advertizes being capable of. The panels are generally
1466 * designed to support only a single clock and lane
1467 * configuration, and typically these values correspond to the
1468 * native resolution of the panel.
1470 min_lane_count = max_lane_count;
1471 min_clock = max_clock;
1474 for (; bpp >= 6*3; bpp -= 2*3) {
1475 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1478 for (clock = min_clock; clock <= max_clock; clock++) {
1479 for (lane_count = min_lane_count;
1480 lane_count <= max_lane_count;
1483 link_clock = common_rates[clock];
1484 link_avail = intel_dp_max_data_rate(link_clock,
1487 if (mode_rate <= link_avail) {
1497 if (intel_dp->color_range_auto) {
1500 * CEA-861-E - 5.1 Default Encoding Parameters
1501 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1503 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1504 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1506 intel_dp->color_range = 0;
1509 if (intel_dp->color_range)
1510 pipe_config->limited_color_range = true;
1512 intel_dp->lane_count = lane_count;
1514 if (intel_dp->num_sink_rates) {
1515 intel_dp->link_bw = 0;
1516 intel_dp->rate_select =
1517 intel_dp_rate_select(intel_dp, common_rates[clock]);
1520 drm_dp_link_rate_to_bw_code(common_rates[clock]);
1521 intel_dp->rate_select = 0;
1524 pipe_config->pipe_bpp = bpp;
1525 pipe_config->port_clock = common_rates[clock];
1527 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1528 intel_dp->link_bw, intel_dp->lane_count,
1529 pipe_config->port_clock, bpp);
1530 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1531 mode_rate, link_avail);
1533 intel_link_compute_m_n(bpp, lane_count,
1534 adjusted_mode->crtc_clock,
1535 pipe_config->port_clock,
1536 &pipe_config->dp_m_n);
1538 if (intel_connector->panel.downclock_mode != NULL &&
1539 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1540 pipe_config->has_drrs = true;
1541 intel_link_compute_m_n(bpp, lane_count,
1542 intel_connector->panel.downclock_mode->clock,
1543 pipe_config->port_clock,
1544 &pipe_config->dp_m2_n2);
1547 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1548 skl_edp_set_pll_config(pipe_config);
1549 else if (IS_BROXTON(dev))
1550 /* handled in ddi */;
1551 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1552 hsw_dp_set_ddi_pll_sel(pipe_config);
1554 intel_dp_set_clock(encoder, pipe_config);
1559 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1561 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1562 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1563 struct drm_device *dev = crtc->base.dev;
1564 struct drm_i915_private *dev_priv = dev->dev_private;
1567 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1568 crtc->config->port_clock);
1569 dpa_ctl = I915_READ(DP_A);
1570 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1572 if (crtc->config->port_clock == 162000) {
1573 /* For a long time we've carried around a ILK-DevA w/a for the
1574 * 160MHz clock. If we're really unlucky, it's still required.
1576 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1577 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1578 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1580 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1581 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1584 I915_WRITE(DP_A, dpa_ctl);
1590 static void intel_dp_prepare(struct intel_encoder *encoder)
1592 struct drm_device *dev = encoder->base.dev;
1593 struct drm_i915_private *dev_priv = dev->dev_private;
1594 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1595 enum port port = dp_to_dig_port(intel_dp)->port;
1596 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1597 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1600 * There are four kinds of DP registers:
1607 * IBX PCH and CPU are the same for almost everything,
1608 * except that the CPU DP PLL is configured in this
1611 * CPT PCH is quite different, having many bits moved
1612 * to the TRANS_DP_CTL register instead. That
1613 * configuration happens (oddly) in ironlake_pch_enable
1616 /* Preserve the BIOS-computed detected bit. This is
1617 * supposed to be read-only.
1619 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1621 /* Handle DP bits in common between all three register formats */
1622 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1623 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1625 if (crtc->config->has_audio)
1626 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1628 /* Split out the IBX/CPU vs CPT settings */
1630 if (IS_GEN7(dev) && port == PORT_A) {
1631 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1632 intel_dp->DP |= DP_SYNC_HS_HIGH;
1633 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1634 intel_dp->DP |= DP_SYNC_VS_HIGH;
1635 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1637 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1638 intel_dp->DP |= DP_ENHANCED_FRAMING;
1640 intel_dp->DP |= crtc->pipe << 29;
1641 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1644 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1646 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1647 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1648 trans_dp |= TRANS_DP_ENH_FRAMING;
1650 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1651 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1653 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1654 intel_dp->DP |= intel_dp->color_range;
1656 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1657 intel_dp->DP |= DP_SYNC_HS_HIGH;
1658 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1659 intel_dp->DP |= DP_SYNC_VS_HIGH;
1660 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1662 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1663 intel_dp->DP |= DP_ENHANCED_FRAMING;
1665 if (IS_CHERRYVIEW(dev))
1666 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1667 else if (crtc->pipe == PIPE_B)
1668 intel_dp->DP |= DP_PIPEB_SELECT;
1672 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1673 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1675 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1676 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1678 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1679 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1681 static void wait_panel_status(struct intel_dp *intel_dp,
1685 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1686 struct drm_i915_private *dev_priv = dev->dev_private;
1687 u32 pp_stat_reg, pp_ctrl_reg;
1689 lockdep_assert_held(&dev_priv->pps_mutex);
1691 pp_stat_reg = _pp_stat_reg(intel_dp);
1692 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1694 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1696 I915_READ(pp_stat_reg),
1697 I915_READ(pp_ctrl_reg));
1699 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1700 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1701 I915_READ(pp_stat_reg),
1702 I915_READ(pp_ctrl_reg));
1705 DRM_DEBUG_KMS("Wait complete\n");
1708 static void wait_panel_on(struct intel_dp *intel_dp)
1710 DRM_DEBUG_KMS("Wait for panel power on\n");
1711 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1714 static void wait_panel_off(struct intel_dp *intel_dp)
1716 DRM_DEBUG_KMS("Wait for panel power off time\n");
1717 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1720 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1722 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1724 /* When we disable the VDD override bit last we have to do the manual
1726 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1727 intel_dp->panel_power_cycle_delay);
1729 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1732 static void wait_backlight_on(struct intel_dp *intel_dp)
1734 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1735 intel_dp->backlight_on_delay);
1738 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1740 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1741 intel_dp->backlight_off_delay);
1744 /* Read the current pp_control value, unlocking the register if it
1748 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1750 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1751 struct drm_i915_private *dev_priv = dev->dev_private;
1754 lockdep_assert_held(&dev_priv->pps_mutex);
1756 control = I915_READ(_pp_ctrl_reg(intel_dp));
1757 if (!IS_BROXTON(dev)) {
1758 control &= ~PANEL_UNLOCK_MASK;
1759 control |= PANEL_UNLOCK_REGS;
1765 * Must be paired with edp_panel_vdd_off().
1766 * Must hold pps_mutex around the whole on/off sequence.
1767 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1769 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1771 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1772 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1773 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1774 struct drm_i915_private *dev_priv = dev->dev_private;
1775 enum intel_display_power_domain power_domain;
1777 u32 pp_stat_reg, pp_ctrl_reg;
1778 bool need_to_disable = !intel_dp->want_panel_vdd;
1780 lockdep_assert_held(&dev_priv->pps_mutex);
1782 if (!is_edp(intel_dp))
1785 cancel_delayed_work(&intel_dp->panel_vdd_work);
1786 intel_dp->want_panel_vdd = true;
1788 if (edp_have_panel_vdd(intel_dp))
1789 return need_to_disable;
1791 power_domain = intel_display_port_power_domain(intel_encoder);
1792 intel_display_power_get(dev_priv, power_domain);
1794 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1795 port_name(intel_dig_port->port));
1797 if (!edp_have_panel_power(intel_dp))
1798 wait_panel_power_cycle(intel_dp);
1800 pp = ironlake_get_pp_control(intel_dp);
1801 pp |= EDP_FORCE_VDD;
1803 pp_stat_reg = _pp_stat_reg(intel_dp);
1804 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1806 I915_WRITE(pp_ctrl_reg, pp);
1807 POSTING_READ(pp_ctrl_reg);
1808 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1809 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1811 * If the panel wasn't on, delay before accessing aux channel
1813 if (!edp_have_panel_power(intel_dp)) {
1814 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1815 port_name(intel_dig_port->port));
1816 msleep(intel_dp->panel_power_up_delay);
1819 return need_to_disable;
1823 * Must be paired with intel_edp_panel_vdd_off() or
1824 * intel_edp_panel_off().
1825 * Nested calls to these functions are not allowed since
1826 * we drop the lock. Caller must use some higher level
1827 * locking to prevent nested calls from other threads.
1829 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1833 if (!is_edp(intel_dp))
1837 vdd = edp_panel_vdd_on(intel_dp);
1838 pps_unlock(intel_dp);
1840 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1841 port_name(dp_to_dig_port(intel_dp)->port));
1844 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1846 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1847 struct drm_i915_private *dev_priv = dev->dev_private;
1848 struct intel_digital_port *intel_dig_port =
1849 dp_to_dig_port(intel_dp);
1850 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1851 enum intel_display_power_domain power_domain;
1853 u32 pp_stat_reg, pp_ctrl_reg;
1855 lockdep_assert_held(&dev_priv->pps_mutex);
1857 WARN_ON(intel_dp->want_panel_vdd);
1859 if (!edp_have_panel_vdd(intel_dp))
1862 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1863 port_name(intel_dig_port->port));
1865 pp = ironlake_get_pp_control(intel_dp);
1866 pp &= ~EDP_FORCE_VDD;
1868 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1869 pp_stat_reg = _pp_stat_reg(intel_dp);
1871 I915_WRITE(pp_ctrl_reg, pp);
1872 POSTING_READ(pp_ctrl_reg);
1874 /* Make sure sequencer is idle before allowing subsequent activity */
1875 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1876 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1878 if ((pp & POWER_TARGET_ON) == 0)
1879 intel_dp->last_power_cycle = jiffies;
1881 power_domain = intel_display_port_power_domain(intel_encoder);
1882 intel_display_power_put(dev_priv, power_domain);
1885 static void edp_panel_vdd_work(struct work_struct *__work)
1887 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1888 struct intel_dp, panel_vdd_work);
1891 if (!intel_dp->want_panel_vdd)
1892 edp_panel_vdd_off_sync(intel_dp);
1893 pps_unlock(intel_dp);
1896 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1898 unsigned long delay;
1901 * Queue the timer to fire a long time from now (relative to the power
1902 * down delay) to keep the panel power up across a sequence of
1905 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1906 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1910 * Must be paired with edp_panel_vdd_on().
1911 * Must hold pps_mutex around the whole on/off sequence.
1912 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1914 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1916 struct drm_i915_private *dev_priv =
1917 intel_dp_to_dev(intel_dp)->dev_private;
1919 lockdep_assert_held(&dev_priv->pps_mutex);
1921 if (!is_edp(intel_dp))
1924 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1925 port_name(dp_to_dig_port(intel_dp)->port));
1927 intel_dp->want_panel_vdd = false;
1930 edp_panel_vdd_off_sync(intel_dp);
1932 edp_panel_vdd_schedule_off(intel_dp);
1935 static void edp_panel_on(struct intel_dp *intel_dp)
1937 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1938 struct drm_i915_private *dev_priv = dev->dev_private;
1942 lockdep_assert_held(&dev_priv->pps_mutex);
1944 if (!is_edp(intel_dp))
1947 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1948 port_name(dp_to_dig_port(intel_dp)->port));
1950 if (WARN(edp_have_panel_power(intel_dp),
1951 "eDP port %c panel power already on\n",
1952 port_name(dp_to_dig_port(intel_dp)->port)))
1955 wait_panel_power_cycle(intel_dp);
1957 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1958 pp = ironlake_get_pp_control(intel_dp);
1960 /* ILK workaround: disable reset around power sequence */
1961 pp &= ~PANEL_POWER_RESET;
1962 I915_WRITE(pp_ctrl_reg, pp);
1963 POSTING_READ(pp_ctrl_reg);
1966 pp |= POWER_TARGET_ON;
1968 pp |= PANEL_POWER_RESET;
1970 I915_WRITE(pp_ctrl_reg, pp);
1971 POSTING_READ(pp_ctrl_reg);
1973 wait_panel_on(intel_dp);
1974 intel_dp->last_power_on = jiffies;
1977 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
1983 void intel_edp_panel_on(struct intel_dp *intel_dp)
1985 if (!is_edp(intel_dp))
1989 edp_panel_on(intel_dp);
1990 pps_unlock(intel_dp);
1994 static void edp_panel_off(struct intel_dp *intel_dp)
1996 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1997 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1998 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1999 struct drm_i915_private *dev_priv = dev->dev_private;
2000 enum intel_display_power_domain power_domain;
2004 lockdep_assert_held(&dev_priv->pps_mutex);
2006 if (!is_edp(intel_dp))
2009 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2010 port_name(dp_to_dig_port(intel_dp)->port));
2012 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2013 port_name(dp_to_dig_port(intel_dp)->port));
2015 pp = ironlake_get_pp_control(intel_dp);
2016 /* We need to switch off panel power _and_ force vdd, for otherwise some
2017 * panels get very unhappy and cease to work. */
2018 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2021 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2023 intel_dp->want_panel_vdd = false;
2025 I915_WRITE(pp_ctrl_reg, pp);
2026 POSTING_READ(pp_ctrl_reg);
2028 intel_dp->last_power_cycle = jiffies;
2029 wait_panel_off(intel_dp);
2031 /* We got a reference when we enabled the VDD. */
2032 power_domain = intel_display_port_power_domain(intel_encoder);
2033 intel_display_power_put(dev_priv, power_domain);
2036 void intel_edp_panel_off(struct intel_dp *intel_dp)
2038 if (!is_edp(intel_dp))
2042 edp_panel_off(intel_dp);
2043 pps_unlock(intel_dp);
2046 /* Enable backlight in the panel power control. */
2047 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2049 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2050 struct drm_device *dev = intel_dig_port->base.base.dev;
2051 struct drm_i915_private *dev_priv = dev->dev_private;
2056 * If we enable the backlight right away following a panel power
2057 * on, we may see slight flicker as the panel syncs with the eDP
2058 * link. So delay a bit to make sure the image is solid before
2059 * allowing it to appear.
2061 wait_backlight_on(intel_dp);
2065 pp = ironlake_get_pp_control(intel_dp);
2066 pp |= EDP_BLC_ENABLE;
2068 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2070 I915_WRITE(pp_ctrl_reg, pp);
2071 POSTING_READ(pp_ctrl_reg);
2073 pps_unlock(intel_dp);
2076 /* Enable backlight PWM and backlight PP control. */
2077 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2079 if (!is_edp(intel_dp))
2082 DRM_DEBUG_KMS("\n");
2084 intel_panel_enable_backlight(intel_dp->attached_connector);
2085 _intel_edp_backlight_on(intel_dp);
2088 /* Disable backlight in the panel power control. */
2089 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2091 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2092 struct drm_i915_private *dev_priv = dev->dev_private;
2096 if (!is_edp(intel_dp))
2101 pp = ironlake_get_pp_control(intel_dp);
2102 pp &= ~EDP_BLC_ENABLE;
2104 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2106 I915_WRITE(pp_ctrl_reg, pp);
2107 POSTING_READ(pp_ctrl_reg);
2109 pps_unlock(intel_dp);
2111 intel_dp->last_backlight_off = jiffies;
2112 edp_wait_backlight_off(intel_dp);
2115 /* Disable backlight PP control and backlight PWM. */
2116 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2118 if (!is_edp(intel_dp))
2121 DRM_DEBUG_KMS("\n");
2123 _intel_edp_backlight_off(intel_dp);
2124 intel_panel_disable_backlight(intel_dp->attached_connector);
2128 * Hook for controlling the panel power control backlight through the bl_power
2129 * sysfs attribute. Take care to handle multiple calls.
2131 static void intel_edp_backlight_power(struct intel_connector *connector,
2134 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2138 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2139 pps_unlock(intel_dp);
2141 if (is_enabled == enable)
2144 DRM_DEBUG_KMS("panel power control backlight %s\n",
2145 enable ? "enable" : "disable");
2148 _intel_edp_backlight_on(intel_dp);
2150 _intel_edp_backlight_off(intel_dp);
2153 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2156 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2157 struct drm_device *dev = crtc->dev;
2158 struct drm_i915_private *dev_priv = dev->dev_private;
2161 assert_pipe_disabled(dev_priv,
2162 to_intel_crtc(crtc)->pipe);
2164 DRM_DEBUG_KMS("\n");
2165 dpa_ctl = I915_READ(DP_A);
2166 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2167 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2169 /* We don't adjust intel_dp->DP while tearing down the link, to
2170 * facilitate link retraining (e.g. after hotplug). Hence clear all
2171 * enable bits here to ensure that we don't enable too much. */
2172 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2173 intel_dp->DP |= DP_PLL_ENABLE;
2174 I915_WRITE(DP_A, intel_dp->DP);
2179 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2181 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2182 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2183 struct drm_device *dev = crtc->dev;
2184 struct drm_i915_private *dev_priv = dev->dev_private;
2187 assert_pipe_disabled(dev_priv,
2188 to_intel_crtc(crtc)->pipe);
2190 dpa_ctl = I915_READ(DP_A);
2191 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2192 "dp pll off, should be on\n");
2193 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2195 /* We can't rely on the value tracked for the DP register in
2196 * intel_dp->DP because link_down must not change that (otherwise link
2197 * re-training will fail. */
2198 dpa_ctl &= ~DP_PLL_ENABLE;
2199 I915_WRITE(DP_A, dpa_ctl);
2204 /* If the sink supports it, try to set the power state appropriately */
2205 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2209 /* Should have a valid DPCD by this point */
2210 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2213 if (mode != DRM_MODE_DPMS_ON) {
2214 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2218 * When turning on, we need to retry for 1ms to give the sink
2221 for (i = 0; i < 3; i++) {
2222 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2231 DRM_DEBUG_KMS("failed to %s sink power state\n",
2232 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2235 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2238 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2239 enum port port = dp_to_dig_port(intel_dp)->port;
2240 struct drm_device *dev = encoder->base.dev;
2241 struct drm_i915_private *dev_priv = dev->dev_private;
2242 enum intel_display_power_domain power_domain;
2245 power_domain = intel_display_port_power_domain(encoder);
2246 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2249 tmp = I915_READ(intel_dp->output_reg);
2251 if (!(tmp & DP_PORT_EN))
2254 if (IS_GEN7(dev) && port == PORT_A) {
2255 *pipe = PORT_TO_PIPE_CPT(tmp);
2256 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2259 for_each_pipe(dev_priv, p) {
2260 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2261 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2267 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2268 intel_dp->output_reg);
2269 } else if (IS_CHERRYVIEW(dev)) {
2270 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2272 *pipe = PORT_TO_PIPE(tmp);
2278 static void intel_dp_get_config(struct intel_encoder *encoder,
2279 struct intel_crtc_state *pipe_config)
2281 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2283 struct drm_device *dev = encoder->base.dev;
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 enum port port = dp_to_dig_port(intel_dp)->port;
2286 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2289 tmp = I915_READ(intel_dp->output_reg);
2291 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2293 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2294 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2295 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2296 flags |= DRM_MODE_FLAG_PHSYNC;
2298 flags |= DRM_MODE_FLAG_NHSYNC;
2300 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2301 flags |= DRM_MODE_FLAG_PVSYNC;
2303 flags |= DRM_MODE_FLAG_NVSYNC;
2305 if (tmp & DP_SYNC_HS_HIGH)
2306 flags |= DRM_MODE_FLAG_PHSYNC;
2308 flags |= DRM_MODE_FLAG_NHSYNC;
2310 if (tmp & DP_SYNC_VS_HIGH)
2311 flags |= DRM_MODE_FLAG_PVSYNC;
2313 flags |= DRM_MODE_FLAG_NVSYNC;
2316 pipe_config->base.adjusted_mode.flags |= flags;
2318 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2319 tmp & DP_COLOR_RANGE_16_235)
2320 pipe_config->limited_color_range = true;
2322 pipe_config->has_dp_encoder = true;
2324 intel_dp_get_m_n(crtc, pipe_config);
2326 if (port == PORT_A) {
2327 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2328 pipe_config->port_clock = 162000;
2330 pipe_config->port_clock = 270000;
2333 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2334 &pipe_config->dp_m_n);
2336 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2337 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2339 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2341 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2342 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2344 * This is a big fat ugly hack.
2346 * Some machines in UEFI boot mode provide us a VBT that has 18
2347 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2348 * unknown we fail to light up. Yet the same BIOS boots up with
2349 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2350 * max, not what it tells us to use.
2352 * Note: This will still be broken if the eDP panel is not lit
2353 * up by the BIOS, and thus we can't get the mode at module
2356 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2357 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2358 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2362 static void intel_disable_dp(struct intel_encoder *encoder)
2364 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2365 struct drm_device *dev = encoder->base.dev;
2366 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2368 if (crtc->config->has_audio)
2369 intel_audio_codec_disable(encoder);
2371 if (HAS_PSR(dev) && !HAS_DDI(dev))
2372 intel_psr_disable(intel_dp);
2374 /* Make sure the panel is off before trying to change the mode. But also
2375 * ensure that we have vdd while we switch off the panel. */
2376 intel_edp_panel_vdd_on(intel_dp);
2377 intel_edp_backlight_off(intel_dp);
2378 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2379 intel_edp_panel_off(intel_dp);
2381 /* disable the port before the pipe on g4x */
2382 if (INTEL_INFO(dev)->gen < 5)
2383 intel_dp_link_down(intel_dp);
2386 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2388 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2389 enum port port = dp_to_dig_port(intel_dp)->port;
2391 intel_dp_link_down(intel_dp);
2393 ironlake_edp_pll_off(intel_dp);
2396 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2398 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2400 intel_dp_link_down(intel_dp);
2403 static void chv_post_disable_dp(struct intel_encoder *encoder)
2405 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2406 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2407 struct drm_device *dev = encoder->base.dev;
2408 struct drm_i915_private *dev_priv = dev->dev_private;
2409 struct intel_crtc *intel_crtc =
2410 to_intel_crtc(encoder->base.crtc);
2411 enum dpio_channel ch = vlv_dport_to_channel(dport);
2412 enum pipe pipe = intel_crtc->pipe;
2415 intel_dp_link_down(intel_dp);
2417 mutex_lock(&dev_priv->sb_lock);
2419 /* Propagate soft reset to data lane reset */
2420 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2421 val |= CHV_PCS_REQ_SOFTRESET_EN;
2422 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2424 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2425 val |= CHV_PCS_REQ_SOFTRESET_EN;
2426 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2428 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2429 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2430 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2432 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2433 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2434 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2436 mutex_unlock(&dev_priv->sb_lock);
2440 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2442 uint8_t dp_train_pat)
2444 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2445 struct drm_device *dev = intel_dig_port->base.base.dev;
2446 struct drm_i915_private *dev_priv = dev->dev_private;
2447 enum port port = intel_dig_port->port;
2450 uint32_t temp = I915_READ(DP_TP_CTL(port));
2452 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2453 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2455 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2457 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2458 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2459 case DP_TRAINING_PATTERN_DISABLE:
2460 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2463 case DP_TRAINING_PATTERN_1:
2464 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2466 case DP_TRAINING_PATTERN_2:
2467 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2469 case DP_TRAINING_PATTERN_3:
2470 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2473 I915_WRITE(DP_TP_CTL(port), temp);
2475 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2476 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2477 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2479 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2480 case DP_TRAINING_PATTERN_DISABLE:
2481 *DP |= DP_LINK_TRAIN_OFF_CPT;
2483 case DP_TRAINING_PATTERN_1:
2484 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2486 case DP_TRAINING_PATTERN_2:
2487 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2489 case DP_TRAINING_PATTERN_3:
2490 DRM_ERROR("DP training pattern 3 not supported\n");
2491 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2496 if (IS_CHERRYVIEW(dev))
2497 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2499 *DP &= ~DP_LINK_TRAIN_MASK;
2501 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2502 case DP_TRAINING_PATTERN_DISABLE:
2503 *DP |= DP_LINK_TRAIN_OFF;
2505 case DP_TRAINING_PATTERN_1:
2506 *DP |= DP_LINK_TRAIN_PAT_1;
2508 case DP_TRAINING_PATTERN_2:
2509 *DP |= DP_LINK_TRAIN_PAT_2;
2511 case DP_TRAINING_PATTERN_3:
2512 if (IS_CHERRYVIEW(dev)) {
2513 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2515 DRM_ERROR("DP training pattern 3 not supported\n");
2516 *DP |= DP_LINK_TRAIN_PAT_2;
2523 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2525 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2526 struct drm_i915_private *dev_priv = dev->dev_private;
2528 /* enable with pattern 1 (as per spec) */
2529 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2530 DP_TRAINING_PATTERN_1);
2532 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2533 POSTING_READ(intel_dp->output_reg);
2536 * Magic for VLV/CHV. We _must_ first set up the register
2537 * without actually enabling the port, and then do another
2538 * write to enable the port. Otherwise link training will
2539 * fail when the power sequencer is freshly used for this port.
2541 intel_dp->DP |= DP_PORT_EN;
2543 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2544 POSTING_READ(intel_dp->output_reg);
2547 static void intel_enable_dp(struct intel_encoder *encoder)
2549 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2550 struct drm_device *dev = encoder->base.dev;
2551 struct drm_i915_private *dev_priv = dev->dev_private;
2552 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2553 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2554 unsigned int lane_mask = 0x0;
2556 if (WARN_ON(dp_reg & DP_PORT_EN))
2561 if (IS_VALLEYVIEW(dev))
2562 vlv_init_panel_power_sequencer(intel_dp);
2564 intel_dp_enable_port(intel_dp);
2566 edp_panel_vdd_on(intel_dp);
2567 edp_panel_on(intel_dp);
2568 edp_panel_vdd_off(intel_dp, true);
2570 pps_unlock(intel_dp);
2572 if (IS_VALLEYVIEW(dev))
2573 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2576 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2577 intel_dp_start_link_train(intel_dp);
2578 intel_dp_complete_link_train(intel_dp);
2579 intel_dp_stop_link_train(intel_dp);
2581 if (crtc->config->has_audio) {
2582 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2583 pipe_name(crtc->pipe));
2584 intel_audio_codec_enable(encoder);
2588 static void g4x_enable_dp(struct intel_encoder *encoder)
2590 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2592 intel_enable_dp(encoder);
2593 intel_edp_backlight_on(intel_dp);
2596 static void vlv_enable_dp(struct intel_encoder *encoder)
2598 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2600 intel_edp_backlight_on(intel_dp);
2601 intel_psr_enable(intel_dp);
2604 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2606 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2607 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2609 intel_dp_prepare(encoder);
2611 /* Only ilk+ has port A */
2612 if (dport->port == PORT_A) {
2613 ironlake_set_pll_cpu_edp(intel_dp);
2614 ironlake_edp_pll_on(intel_dp);
2618 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2621 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2622 enum pipe pipe = intel_dp->pps_pipe;
2623 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2625 edp_panel_vdd_off_sync(intel_dp);
2628 * VLV seems to get confused when multiple power seqeuencers
2629 * have the same port selected (even if only one has power/vdd
2630 * enabled). The failure manifests as vlv_wait_port_ready() failing
2631 * CHV on the other hand doesn't seem to mind having the same port
2632 * selected in multiple power seqeuencers, but let's clear the
2633 * port select always when logically disconnecting a power sequencer
2636 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2637 pipe_name(pipe), port_name(intel_dig_port->port));
2638 I915_WRITE(pp_on_reg, 0);
2639 POSTING_READ(pp_on_reg);
2641 intel_dp->pps_pipe = INVALID_PIPE;
2644 static void vlv_steal_power_sequencer(struct drm_device *dev,
2647 struct drm_i915_private *dev_priv = dev->dev_private;
2648 struct intel_encoder *encoder;
2650 lockdep_assert_held(&dev_priv->pps_mutex);
2652 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2655 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2657 struct intel_dp *intel_dp;
2660 if (encoder->type != INTEL_OUTPUT_EDP)
2663 intel_dp = enc_to_intel_dp(&encoder->base);
2664 port = dp_to_dig_port(intel_dp)->port;
2666 if (intel_dp->pps_pipe != pipe)
2669 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2670 pipe_name(pipe), port_name(port));
2672 WARN(encoder->base.crtc,
2673 "stealing pipe %c power sequencer from active eDP port %c\n",
2674 pipe_name(pipe), port_name(port));
2676 /* make sure vdd is off before we steal it */
2677 vlv_detach_power_sequencer(intel_dp);
2681 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2683 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2684 struct intel_encoder *encoder = &intel_dig_port->base;
2685 struct drm_device *dev = encoder->base.dev;
2686 struct drm_i915_private *dev_priv = dev->dev_private;
2687 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2689 lockdep_assert_held(&dev_priv->pps_mutex);
2691 if (!is_edp(intel_dp))
2694 if (intel_dp->pps_pipe == crtc->pipe)
2698 * If another power sequencer was being used on this
2699 * port previously make sure to turn off vdd there while
2700 * we still have control of it.
2702 if (intel_dp->pps_pipe != INVALID_PIPE)
2703 vlv_detach_power_sequencer(intel_dp);
2706 * We may be stealing the power
2707 * sequencer from another port.
2709 vlv_steal_power_sequencer(dev, crtc->pipe);
2711 /* now it's all ours */
2712 intel_dp->pps_pipe = crtc->pipe;
2714 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2715 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2717 /* init power sequencer on this pipe and port */
2718 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2719 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2722 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2724 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2725 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2726 struct drm_device *dev = encoder->base.dev;
2727 struct drm_i915_private *dev_priv = dev->dev_private;
2728 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2729 enum dpio_channel port = vlv_dport_to_channel(dport);
2730 int pipe = intel_crtc->pipe;
2733 mutex_lock(&dev_priv->sb_lock);
2735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2742 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2743 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2744 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2746 mutex_unlock(&dev_priv->sb_lock);
2748 intel_enable_dp(encoder);
2751 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2753 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2754 struct drm_device *dev = encoder->base.dev;
2755 struct drm_i915_private *dev_priv = dev->dev_private;
2756 struct intel_crtc *intel_crtc =
2757 to_intel_crtc(encoder->base.crtc);
2758 enum dpio_channel port = vlv_dport_to_channel(dport);
2759 int pipe = intel_crtc->pipe;
2761 intel_dp_prepare(encoder);
2763 /* Program Tx lane resets to default */
2764 mutex_lock(&dev_priv->sb_lock);
2765 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2766 DPIO_PCS_TX_LANE2_RESET |
2767 DPIO_PCS_TX_LANE1_RESET);
2768 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2769 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2770 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2771 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2772 DPIO_PCS_CLK_SOFT_RESET);
2774 /* Fix up inter-pair skew failure */
2775 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2776 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2777 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2778 mutex_unlock(&dev_priv->sb_lock);
2781 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2783 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2784 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2785 struct drm_device *dev = encoder->base.dev;
2786 struct drm_i915_private *dev_priv = dev->dev_private;
2787 struct intel_crtc *intel_crtc =
2788 to_intel_crtc(encoder->base.crtc);
2789 enum dpio_channel ch = vlv_dport_to_channel(dport);
2790 int pipe = intel_crtc->pipe;
2791 int data, i, stagger;
2794 mutex_lock(&dev_priv->sb_lock);
2796 /* allow hardware to manage TX FIFO reset source */
2797 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2798 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2799 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2801 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2802 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2805 /* Deassert soft data lane reset*/
2806 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2807 val |= CHV_PCS_REQ_SOFTRESET_EN;
2808 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2810 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2811 val |= CHV_PCS_REQ_SOFTRESET_EN;
2812 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2814 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2815 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2816 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2818 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2819 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2820 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2822 /* Program Tx lane latency optimal setting*/
2823 for (i = 0; i < 4; i++) {
2824 /* Set the upar bit */
2825 data = (i == 1) ? 0x0 : 0x1;
2826 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2827 data << DPIO_UPAR_SHIFT);
2830 /* Data lane stagger programming */
2831 if (intel_crtc->config->port_clock > 270000)
2833 else if (intel_crtc->config->port_clock > 135000)
2835 else if (intel_crtc->config->port_clock > 67500)
2837 else if (intel_crtc->config->port_clock > 33750)
2842 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2843 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2844 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2846 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2847 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2848 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2850 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2851 DPIO_LANESTAGGER_STRAP(stagger) |
2852 DPIO_LANESTAGGER_STRAP_OVRD |
2853 DPIO_TX1_STAGGER_MASK(0x1f) |
2854 DPIO_TX1_STAGGER_MULT(6) |
2855 DPIO_TX2_STAGGER_MULT(0));
2857 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2858 DPIO_LANESTAGGER_STRAP(stagger) |
2859 DPIO_LANESTAGGER_STRAP_OVRD |
2860 DPIO_TX1_STAGGER_MASK(0x1f) |
2861 DPIO_TX1_STAGGER_MULT(7) |
2862 DPIO_TX2_STAGGER_MULT(5));
2864 mutex_unlock(&dev_priv->sb_lock);
2866 intel_enable_dp(encoder);
2869 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2871 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2872 struct drm_device *dev = encoder->base.dev;
2873 struct drm_i915_private *dev_priv = dev->dev_private;
2874 struct intel_crtc *intel_crtc =
2875 to_intel_crtc(encoder->base.crtc);
2876 enum dpio_channel ch = vlv_dport_to_channel(dport);
2877 enum pipe pipe = intel_crtc->pipe;
2880 intel_dp_prepare(encoder);
2882 mutex_lock(&dev_priv->sb_lock);
2884 /* program left/right clock distribution */
2885 if (pipe != PIPE_B) {
2886 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2887 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2889 val |= CHV_BUFLEFTENA1_FORCE;
2891 val |= CHV_BUFRIGHTENA1_FORCE;
2892 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2894 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2895 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2897 val |= CHV_BUFLEFTENA2_FORCE;
2899 val |= CHV_BUFRIGHTENA2_FORCE;
2900 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2903 /* program clock channel usage */
2904 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2905 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2907 val &= ~CHV_PCS_USEDCLKCHANNEL;
2909 val |= CHV_PCS_USEDCLKCHANNEL;
2910 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2912 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2913 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2915 val &= ~CHV_PCS_USEDCLKCHANNEL;
2917 val |= CHV_PCS_USEDCLKCHANNEL;
2918 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2921 * This a a bit weird since generally CL
2922 * matches the pipe, but here we need to
2923 * pick the CL based on the port.
2925 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2927 val &= ~CHV_CMN_USEDCLKCHANNEL;
2929 val |= CHV_CMN_USEDCLKCHANNEL;
2930 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2932 mutex_unlock(&dev_priv->sb_lock);
2936 * Native read with retry for link status and receiver capability reads for
2937 * cases where the sink may still be asleep.
2939 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2940 * supposed to retry 3 times per the spec.
2943 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2944 void *buffer, size_t size)
2950 * Sometime we just get the same incorrect byte repeated
2951 * over the entire buffer. Doing just one throw away read
2952 * initially seems to "solve" it.
2954 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2956 for (i = 0; i < 3; i++) {
2957 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2967 * Fetch AUX CH registers 0x202 - 0x207 which contain
2968 * link status information
2971 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2973 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2976 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2979 /* These are source-specific values. */
2981 intel_dp_voltage_max(struct intel_dp *intel_dp)
2983 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2984 struct drm_i915_private *dev_priv = dev->dev_private;
2985 enum port port = dp_to_dig_port(intel_dp)->port;
2987 if (IS_BROXTON(dev))
2988 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2989 else if (INTEL_INFO(dev)->gen >= 9) {
2990 if (dev_priv->edp_low_vswing && port == PORT_A)
2991 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2992 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2993 } else if (IS_VALLEYVIEW(dev))
2994 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2995 else if (IS_GEN7(dev) && port == PORT_A)
2996 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2997 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2998 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3000 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3004 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3006 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3007 enum port port = dp_to_dig_port(intel_dp)->port;
3009 if (INTEL_INFO(dev)->gen >= 9) {
3010 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3012 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3013 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3014 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3015 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3016 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3017 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3018 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3020 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3022 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3023 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3025 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3027 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3028 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3029 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3030 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3032 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3034 } else if (IS_VALLEYVIEW(dev)) {
3035 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3036 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3037 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3039 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3040 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3041 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3044 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3046 } else if (IS_GEN7(dev) && port == PORT_A) {
3047 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3049 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3051 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3052 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3054 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3057 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3058 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3059 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3061 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3062 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3063 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3066 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3071 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3073 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3076 struct intel_crtc *intel_crtc =
3077 to_intel_crtc(dport->base.base.crtc);
3078 unsigned long demph_reg_value, preemph_reg_value,
3079 uniqtranscale_reg_value;
3080 uint8_t train_set = intel_dp->train_set[0];
3081 enum dpio_channel port = vlv_dport_to_channel(dport);
3082 int pipe = intel_crtc->pipe;
3084 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3085 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3086 preemph_reg_value = 0x0004000;
3087 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3088 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3089 demph_reg_value = 0x2B405555;
3090 uniqtranscale_reg_value = 0x552AB83A;
3092 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3093 demph_reg_value = 0x2B404040;
3094 uniqtranscale_reg_value = 0x5548B83A;
3096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3097 demph_reg_value = 0x2B245555;
3098 uniqtranscale_reg_value = 0x5560B83A;
3100 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3101 demph_reg_value = 0x2B405555;
3102 uniqtranscale_reg_value = 0x5598DA3A;
3108 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3109 preemph_reg_value = 0x0002000;
3110 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3111 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3112 demph_reg_value = 0x2B404040;
3113 uniqtranscale_reg_value = 0x5552B83A;
3115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3116 demph_reg_value = 0x2B404848;
3117 uniqtranscale_reg_value = 0x5580B83A;
3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3120 demph_reg_value = 0x2B404040;
3121 uniqtranscale_reg_value = 0x55ADDA3A;
3127 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3128 preemph_reg_value = 0x0000000;
3129 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3131 demph_reg_value = 0x2B305555;
3132 uniqtranscale_reg_value = 0x5570B83A;
3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3135 demph_reg_value = 0x2B2B4040;
3136 uniqtranscale_reg_value = 0x55ADDA3A;
3142 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3143 preemph_reg_value = 0x0006000;
3144 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3145 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3146 demph_reg_value = 0x1B405555;
3147 uniqtranscale_reg_value = 0x55ADDA3A;
3157 mutex_lock(&dev_priv->sb_lock);
3158 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3159 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3160 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3161 uniqtranscale_reg_value);
3162 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3163 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3164 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3165 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3166 mutex_unlock(&dev_priv->sb_lock);
3171 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3173 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3174 struct drm_i915_private *dev_priv = dev->dev_private;
3175 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3176 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3177 u32 deemph_reg_value, margin_reg_value, val;
3178 uint8_t train_set = intel_dp->train_set[0];
3179 enum dpio_channel ch = vlv_dport_to_channel(dport);
3180 enum pipe pipe = intel_crtc->pipe;
3183 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3184 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3185 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3186 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3187 deemph_reg_value = 128;
3188 margin_reg_value = 52;
3190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3191 deemph_reg_value = 128;
3192 margin_reg_value = 77;
3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3195 deemph_reg_value = 128;
3196 margin_reg_value = 102;
3198 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3199 deemph_reg_value = 128;
3200 margin_reg_value = 154;
3201 /* FIXME extra to set for 1200 */
3207 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3208 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3210 deemph_reg_value = 85;
3211 margin_reg_value = 78;
3213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3214 deemph_reg_value = 85;
3215 margin_reg_value = 116;
3217 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3218 deemph_reg_value = 85;
3219 margin_reg_value = 154;
3225 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3226 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3227 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3228 deemph_reg_value = 64;
3229 margin_reg_value = 104;
3231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3232 deemph_reg_value = 64;
3233 margin_reg_value = 154;
3239 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3240 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3242 deemph_reg_value = 43;
3243 margin_reg_value = 154;
3253 mutex_lock(&dev_priv->sb_lock);
3255 /* Clear calc init */
3256 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3257 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3258 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3259 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3260 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3262 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3263 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3264 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3265 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3266 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3268 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3269 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3270 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3271 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3273 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3274 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3275 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3276 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3278 /* Program swing deemph */
3279 for (i = 0; i < 4; i++) {
3280 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3281 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3282 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3283 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3286 /* Program swing margin */
3287 for (i = 0; i < 4; i++) {
3288 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3289 val &= ~DPIO_SWING_MARGIN000_MASK;
3290 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3291 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3294 /* Disable unique transition scale */
3295 for (i = 0; i < 4; i++) {
3296 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3297 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3298 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3301 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3302 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3303 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3304 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3307 * The document said it needs to set bit 27 for ch0 and bit 26
3308 * for ch1. Might be a typo in the doc.
3309 * For now, for this unique transition scale selection, set bit
3310 * 27 for ch0 and ch1.
3312 for (i = 0; i < 4; i++) {
3313 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3314 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3315 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3318 for (i = 0; i < 4; i++) {
3319 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3320 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3321 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3322 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3326 /* Start swing calculation */
3327 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3328 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3329 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3331 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3332 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3333 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3336 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3337 val |= DPIO_LRC_BYPASS;
3338 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3340 mutex_unlock(&dev_priv->sb_lock);
3346 intel_get_adjust_train(struct intel_dp *intel_dp,
3347 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3352 uint8_t voltage_max;
3353 uint8_t preemph_max;
3355 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3356 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3357 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3365 voltage_max = intel_dp_voltage_max(intel_dp);
3366 if (v >= voltage_max)
3367 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3369 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3370 if (p >= preemph_max)
3371 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3373 for (lane = 0; lane < 4; lane++)
3374 intel_dp->train_set[lane] = v | p;
3378 gen4_signal_levels(uint8_t train_set)
3380 uint32_t signal_levels = 0;
3382 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3385 signal_levels |= DP_VOLTAGE_0_4;
3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3388 signal_levels |= DP_VOLTAGE_0_6;
3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3391 signal_levels |= DP_VOLTAGE_0_8;
3393 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3394 signal_levels |= DP_VOLTAGE_1_2;
3397 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3398 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3400 signal_levels |= DP_PRE_EMPHASIS_0;
3402 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3403 signal_levels |= DP_PRE_EMPHASIS_3_5;
3405 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3406 signal_levels |= DP_PRE_EMPHASIS_6;
3408 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3409 signal_levels |= DP_PRE_EMPHASIS_9_5;
3412 return signal_levels;
3415 /* Gen6's DP voltage swing and pre-emphasis control */
3417 gen6_edp_signal_levels(uint8_t train_set)
3419 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3420 DP_TRAIN_PRE_EMPHASIS_MASK);
3421 switch (signal_levels) {
3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3423 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3424 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3426 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3428 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3429 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3432 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3435 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3437 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3438 "0x%x\n", signal_levels);
3439 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3443 /* Gen7's DP voltage swing and pre-emphasis control */
3445 gen7_edp_signal_levels(uint8_t train_set)
3447 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3448 DP_TRAIN_PRE_EMPHASIS_MASK);
3449 switch (signal_levels) {
3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3451 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3452 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3453 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3455 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3458 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3460 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3463 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3464 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3465 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3468 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3469 "0x%x\n", signal_levels);
3470 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3474 /* Properly updates "DP" with the correct signal levels. */
3476 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3478 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3479 enum port port = intel_dig_port->port;
3480 struct drm_device *dev = intel_dig_port->base.base.dev;
3481 uint32_t signal_levels, mask = 0;
3482 uint8_t train_set = intel_dp->train_set[0];
3485 signal_levels = ddi_signal_levels(intel_dp);
3487 if (IS_BROXTON(dev))
3490 mask = DDI_BUF_EMP_MASK;
3491 } else if (IS_CHERRYVIEW(dev)) {
3492 signal_levels = chv_signal_levels(intel_dp);
3493 } else if (IS_VALLEYVIEW(dev)) {
3494 signal_levels = vlv_signal_levels(intel_dp);
3495 } else if (IS_GEN7(dev) && port == PORT_A) {
3496 signal_levels = gen7_edp_signal_levels(train_set);
3497 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3498 } else if (IS_GEN6(dev) && port == PORT_A) {
3499 signal_levels = gen6_edp_signal_levels(train_set);
3500 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3502 signal_levels = gen4_signal_levels(train_set);
3503 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3507 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3509 DRM_DEBUG_KMS("Using vswing level %d\n",
3510 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3511 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3512 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3513 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3515 *DP = (*DP & ~mask) | signal_levels;
3519 intel_dp_set_link_train(struct intel_dp *intel_dp,
3521 uint8_t dp_train_pat)
3523 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3524 struct drm_device *dev = intel_dig_port->base.base.dev;
3525 struct drm_i915_private *dev_priv = dev->dev_private;
3526 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3529 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3531 I915_WRITE(intel_dp->output_reg, *DP);
3532 POSTING_READ(intel_dp->output_reg);
3534 buf[0] = dp_train_pat;
3535 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3536 DP_TRAINING_PATTERN_DISABLE) {
3537 /* don't write DP_TRAINING_LANEx_SET on disable */
3540 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3541 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3542 len = intel_dp->lane_count + 1;
3545 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3552 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3553 uint8_t dp_train_pat)
3555 if (!intel_dp->train_set_valid)
3556 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3557 intel_dp_set_signal_levels(intel_dp, DP);
3558 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3562 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3563 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3565 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3566 struct drm_device *dev = intel_dig_port->base.base.dev;
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3570 intel_get_adjust_train(intel_dp, link_status);
3571 intel_dp_set_signal_levels(intel_dp, DP);
3573 I915_WRITE(intel_dp->output_reg, *DP);
3574 POSTING_READ(intel_dp->output_reg);
3576 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3577 intel_dp->train_set, intel_dp->lane_count);
3579 return ret == intel_dp->lane_count;
3582 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3584 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3585 struct drm_device *dev = intel_dig_port->base.base.dev;
3586 struct drm_i915_private *dev_priv = dev->dev_private;
3587 enum port port = intel_dig_port->port;
3593 val = I915_READ(DP_TP_CTL(port));
3594 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3595 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3596 I915_WRITE(DP_TP_CTL(port), val);
3599 * On PORT_A we can have only eDP in SST mode. There the only reason
3600 * we need to set idle transmission mode is to work around a HW issue
3601 * where we enable the pipe while not in idle link-training mode.
3602 * In this case there is requirement to wait for a minimum number of
3603 * idle patterns to be sent.
3608 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3610 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3613 /* Enable corresponding port and start training pattern 1 */
3615 intel_dp_start_link_train(struct intel_dp *intel_dp)
3617 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3618 struct drm_device *dev = encoder->dev;
3621 int voltage_tries, loop_tries;
3622 uint32_t DP = intel_dp->DP;
3623 uint8_t link_config[2];
3626 intel_ddi_prepare_link_retrain(encoder);
3628 /* Write the link configuration data */
3629 link_config[0] = intel_dp->link_bw;
3630 link_config[1] = intel_dp->lane_count;
3631 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3632 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3633 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3634 if (intel_dp->num_sink_rates)
3635 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3636 &intel_dp->rate_select, 1);
3639 link_config[1] = DP_SET_ANSI_8B10B;
3640 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3644 /* clock recovery */
3645 if (!intel_dp_reset_link_train(intel_dp, &DP,
3646 DP_TRAINING_PATTERN_1 |
3647 DP_LINK_SCRAMBLING_DISABLE)) {
3648 DRM_ERROR("failed to enable link training\n");
3656 uint8_t link_status[DP_LINK_STATUS_SIZE];
3658 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3659 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3660 DRM_ERROR("failed to get link status\n");
3664 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3665 DRM_DEBUG_KMS("clock recovery OK\n");
3670 * if we used previously trained voltage and pre-emphasis values
3671 * and we don't get clock recovery, reset link training values
3673 if (intel_dp->train_set_valid) {
3674 DRM_DEBUG_KMS("clock recovery not ok, reset");
3675 /* clear the flag as we are not reusing train set */
3676 intel_dp->train_set_valid = false;
3677 if (!intel_dp_reset_link_train(intel_dp, &DP,
3678 DP_TRAINING_PATTERN_1 |
3679 DP_LINK_SCRAMBLING_DISABLE)) {
3680 DRM_ERROR("failed to enable link training\n");
3686 /* Check to see if we've tried the max voltage */
3687 for (i = 0; i < intel_dp->lane_count; i++)
3688 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3690 if (i == intel_dp->lane_count) {
3692 if (loop_tries == 5) {
3693 DRM_ERROR("too many full retries, give up\n");
3696 intel_dp_reset_link_train(intel_dp, &DP,
3697 DP_TRAINING_PATTERN_1 |
3698 DP_LINK_SCRAMBLING_DISABLE);
3703 /* Check to see if we've tried the same voltage 5 times */
3704 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3706 if (voltage_tries == 5) {
3707 DRM_ERROR("too many voltage retries, give up\n");
3712 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3714 /* Update training set as requested by target */
3715 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3716 DRM_ERROR("failed to update link training\n");
3725 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3727 bool channel_eq = false;
3728 int tries, cr_tries;
3729 uint32_t DP = intel_dp->DP;
3730 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3732 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3733 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3734 training_pattern = DP_TRAINING_PATTERN_3;
3736 /* channel equalization */
3737 if (!intel_dp_set_link_train(intel_dp, &DP,
3739 DP_LINK_SCRAMBLING_DISABLE)) {
3740 DRM_ERROR("failed to start channel equalization\n");
3748 uint8_t link_status[DP_LINK_STATUS_SIZE];
3751 DRM_ERROR("failed to train DP, aborting\n");
3755 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3756 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3757 DRM_ERROR("failed to get link status\n");
3761 /* Make sure clock is still ok */
3762 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3763 intel_dp->train_set_valid = false;
3764 intel_dp_start_link_train(intel_dp);
3765 intel_dp_set_link_train(intel_dp, &DP,
3767 DP_LINK_SCRAMBLING_DISABLE);
3772 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3777 /* Try 5 times, then try clock recovery if that fails */
3779 intel_dp->train_set_valid = false;
3780 intel_dp_start_link_train(intel_dp);
3781 intel_dp_set_link_train(intel_dp, &DP,
3783 DP_LINK_SCRAMBLING_DISABLE);
3789 /* Update training set as requested by target */
3790 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3791 DRM_ERROR("failed to update link training\n");
3797 intel_dp_set_idle_link_train(intel_dp);
3802 intel_dp->train_set_valid = true;
3803 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3807 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3809 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3810 DP_TRAINING_PATTERN_DISABLE);
3814 intel_dp_link_down(struct intel_dp *intel_dp)
3816 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3817 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3818 enum port port = intel_dig_port->port;
3819 struct drm_device *dev = intel_dig_port->base.base.dev;
3820 struct drm_i915_private *dev_priv = dev->dev_private;
3821 uint32_t DP = intel_dp->DP;
3823 if (WARN_ON(HAS_DDI(dev)))
3826 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3829 DRM_DEBUG_KMS("\n");
3831 if ((IS_GEN7(dev) && port == PORT_A) ||
3832 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3833 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3834 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3836 if (IS_CHERRYVIEW(dev))
3837 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3839 DP &= ~DP_LINK_TRAIN_MASK;
3840 DP |= DP_LINK_TRAIN_PAT_IDLE;
3842 I915_WRITE(intel_dp->output_reg, DP);
3843 POSTING_READ(intel_dp->output_reg);
3845 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3846 I915_WRITE(intel_dp->output_reg, DP);
3847 POSTING_READ(intel_dp->output_reg);
3850 * HW workaround for IBX, we need to move the port
3851 * to transcoder A after disabling it to allow the
3852 * matching HDMI port to be enabled on transcoder A.
3854 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3855 /* always enable with pattern 1 (as per spec) */
3856 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3857 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3858 I915_WRITE(intel_dp->output_reg, DP);
3859 POSTING_READ(intel_dp->output_reg);
3862 I915_WRITE(intel_dp->output_reg, DP);
3863 POSTING_READ(intel_dp->output_reg);
3866 msleep(intel_dp->panel_power_down_delay);
3870 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3872 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3873 struct drm_device *dev = dig_port->base.base.dev;
3874 struct drm_i915_private *dev_priv = dev->dev_private;
3877 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3878 sizeof(intel_dp->dpcd)) < 0)
3879 return false; /* aux transfer failed */
3881 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3883 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3884 return false; /* DPCD not present */
3886 /* Check if the panel supports PSR */
3887 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3888 if (is_edp(intel_dp)) {
3889 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3891 sizeof(intel_dp->psr_dpcd));
3892 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3893 dev_priv->psr.sink_support = true;
3894 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3897 if (INTEL_INFO(dev)->gen >= 9 &&
3898 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3899 uint8_t frame_sync_cap;
3901 dev_priv->psr.sink_support = true;
3902 intel_dp_dpcd_read_wake(&intel_dp->aux,
3903 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3904 &frame_sync_cap, 1);
3905 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3906 /* PSR2 needs frame sync as well */
3907 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3908 DRM_DEBUG_KMS("PSR2 %s on sink",
3909 dev_priv->psr.psr2_support ? "supported" : "not supported");
3913 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3914 * have support for TP3 hence that check is used along with dpcd check
3915 * to ensure TP3 can be enabled.
3916 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3917 * supported but still not enabled.
3919 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3920 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3921 intel_dp_source_supports_hbr2(dev)) {
3922 intel_dp->use_tps3 = true;
3923 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3925 intel_dp->use_tps3 = false;
3927 /* Intermediate frequency support */
3928 if (is_edp(intel_dp) &&
3929 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3930 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3931 (rev >= 0x03)) { /* eDp v1.4 or higher */
3932 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3935 intel_dp_dpcd_read_wake(&intel_dp->aux,
3936 DP_SUPPORTED_LINK_RATES,
3938 sizeof(sink_rates));
3940 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3941 int val = le16_to_cpu(sink_rates[i]);
3946 /* Value read is in kHz while drm clock is saved in deca-kHz */
3947 intel_dp->sink_rates[i] = (val * 200) / 10;
3949 intel_dp->num_sink_rates = i;
3952 intel_dp_print_rates(intel_dp);
3954 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3955 DP_DWN_STRM_PORT_PRESENT))
3956 return true; /* native DP sink */
3958 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3959 return true; /* no per-port downstream info */
3961 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3962 intel_dp->downstream_ports,
3963 DP_MAX_DOWNSTREAM_PORTS) < 0)
3964 return false; /* downstream port status fetch failed */
3970 intel_dp_probe_oui(struct intel_dp *intel_dp)
3974 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3977 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3978 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3979 buf[0], buf[1], buf[2]);
3981 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3982 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3983 buf[0], buf[1], buf[2]);
3987 intel_dp_probe_mst(struct intel_dp *intel_dp)
3991 if (!intel_dp->can_mst)
3994 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3997 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3998 if (buf[0] & DP_MST_CAP) {
3999 DRM_DEBUG_KMS("Sink is MST capable\n");
4000 intel_dp->is_mst = true;
4002 DRM_DEBUG_KMS("Sink is not MST capable\n");
4003 intel_dp->is_mst = false;
4007 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4008 return intel_dp->is_mst;
4011 static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4013 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4014 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4017 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4018 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4022 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4023 buf & ~DP_TEST_SINK_START) < 0)
4024 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4026 hsw_enable_ips(intel_crtc);
4029 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4031 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4032 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4035 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4038 if (!(buf & DP_TEST_CRC_SUPPORTED))
4041 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4044 hsw_disable_ips(intel_crtc);
4046 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4047 buf | DP_TEST_SINK_START) < 0) {
4048 hsw_enable_ips(intel_crtc);
4055 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4057 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4058 struct drm_device *dev = dig_port->base.base.dev;
4059 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4065 ret = intel_dp_sink_crc_start(intel_dp);
4069 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4074 test_crc_count = buf & DP_TEST_COUNT_MASK;
4077 if (drm_dp_dpcd_readb(&intel_dp->aux,
4078 DP_TEST_SINK_MISC, &buf) < 0) {
4082 intel_wait_for_vblank(dev, intel_crtc->pipe);
4083 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4085 if (attempts == 0) {
4086 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4091 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4094 intel_dp_sink_crc_stop(intel_dp);
4099 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4101 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4102 DP_DEVICE_SERVICE_IRQ_VECTOR,
4103 sink_irq_vector, 1) == 1;
4107 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4111 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4113 sink_irq_vector, 14);
4120 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4122 uint8_t test_result = DP_TEST_ACK;
4126 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4128 uint8_t test_result = DP_TEST_NAK;
4132 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4134 uint8_t test_result = DP_TEST_NAK;
4135 struct intel_connector *intel_connector = intel_dp->attached_connector;
4136 struct drm_connector *connector = &intel_connector->base;
4138 if (intel_connector->detect_edid == NULL ||
4139 connector->edid_corrupt ||
4140 intel_dp->aux.i2c_defer_count > 6) {
4141 /* Check EDID read for NACKs, DEFERs and corruption
4142 * (DP CTS 1.2 Core r1.1)
4143 * 4.2.2.4 : Failed EDID read, I2C_NAK
4144 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4145 * 4.2.2.6 : EDID corruption detected
4146 * Use failsafe mode for all cases
4148 if (intel_dp->aux.i2c_nack_count > 0 ||
4149 intel_dp->aux.i2c_defer_count > 0)
4150 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4151 intel_dp->aux.i2c_nack_count,
4152 intel_dp->aux.i2c_defer_count);
4153 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4155 struct edid *block = intel_connector->detect_edid;
4157 /* We have to write the checksum
4158 * of the last block read
4160 block += intel_connector->detect_edid->extensions;
4162 if (!drm_dp_dpcd_write(&intel_dp->aux,
4163 DP_TEST_EDID_CHECKSUM,
4166 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4168 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4169 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4172 /* Set test active flag here so userspace doesn't interrupt things */
4173 intel_dp->compliance_test_active = 1;
4178 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4180 uint8_t test_result = DP_TEST_NAK;
4184 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4186 uint8_t response = DP_TEST_NAK;
4190 intel_dp->compliance_test_active = 0;
4191 intel_dp->compliance_test_type = 0;
4192 intel_dp->compliance_test_data = 0;
4194 intel_dp->aux.i2c_nack_count = 0;
4195 intel_dp->aux.i2c_defer_count = 0;
4197 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4199 DRM_DEBUG_KMS("Could not read test request from sink\n");
4204 case DP_TEST_LINK_TRAINING:
4205 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4206 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4207 response = intel_dp_autotest_link_training(intel_dp);
4209 case DP_TEST_LINK_VIDEO_PATTERN:
4210 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4211 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4212 response = intel_dp_autotest_video_pattern(intel_dp);
4214 case DP_TEST_LINK_EDID_READ:
4215 DRM_DEBUG_KMS("EDID test requested\n");
4216 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4217 response = intel_dp_autotest_edid(intel_dp);
4219 case DP_TEST_LINK_PHY_TEST_PATTERN:
4220 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4221 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4222 response = intel_dp_autotest_phy_pattern(intel_dp);
4225 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4230 status = drm_dp_dpcd_write(&intel_dp->aux,
4234 DRM_DEBUG_KMS("Could not write test response to sink\n");
4238 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4242 if (intel_dp->is_mst) {
4247 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4251 /* check link status - esi[10] = 0x200c */
4252 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4253 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4254 intel_dp_start_link_train(intel_dp);
4255 intel_dp_complete_link_train(intel_dp);
4256 intel_dp_stop_link_train(intel_dp);
4259 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4260 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4263 for (retry = 0; retry < 3; retry++) {
4265 wret = drm_dp_dpcd_write(&intel_dp->aux,
4266 DP_SINK_COUNT_ESI+1,
4273 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4275 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4283 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4284 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4285 intel_dp->is_mst = false;
4286 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4287 /* send a hotplug event */
4288 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4295 * According to DP spec
4298 * 2. Configure link according to Receiver Capabilities
4299 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4300 * 4. Check link status on receipt of hot-plug interrupt
4303 intel_dp_check_link_status(struct intel_dp *intel_dp)
4305 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4306 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4308 u8 link_status[DP_LINK_STATUS_SIZE];
4310 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4312 if (!intel_encoder->base.crtc)
4315 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4318 /* Try to read receiver status if the link appears to be up */
4319 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4323 /* Now read the DPCD to see if it's actually running */
4324 if (!intel_dp_get_dpcd(intel_dp)) {
4328 /* Try to read the source of the interrupt */
4329 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4330 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4331 /* Clear interrupt source */
4332 drm_dp_dpcd_writeb(&intel_dp->aux,
4333 DP_DEVICE_SERVICE_IRQ_VECTOR,
4336 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4337 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4338 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4339 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4342 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4343 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4344 intel_encoder->base.name);
4345 intel_dp_start_link_train(intel_dp);
4346 intel_dp_complete_link_train(intel_dp);
4347 intel_dp_stop_link_train(intel_dp);
4351 /* XXX this is probably wrong for multiple downstream ports */
4352 static enum drm_connector_status
4353 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4355 uint8_t *dpcd = intel_dp->dpcd;
4358 if (!intel_dp_get_dpcd(intel_dp))
4359 return connector_status_disconnected;
4361 /* if there's no downstream port, we're done */
4362 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4363 return connector_status_connected;
4365 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4366 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4367 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4370 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4372 return connector_status_unknown;
4374 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4375 : connector_status_disconnected;
4378 /* If no HPD, poke DDC gently */
4379 if (drm_probe_ddc(&intel_dp->aux.ddc))
4380 return connector_status_connected;
4382 /* Well we tried, say unknown for unreliable port types */
4383 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4384 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4385 if (type == DP_DS_PORT_TYPE_VGA ||
4386 type == DP_DS_PORT_TYPE_NON_EDID)
4387 return connector_status_unknown;
4389 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4390 DP_DWN_STRM_PORT_TYPE_MASK;
4391 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4392 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4393 return connector_status_unknown;
4396 /* Anything else is out of spec, warn and ignore */
4397 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4398 return connector_status_disconnected;
4401 static enum drm_connector_status
4402 edp_detect(struct intel_dp *intel_dp)
4404 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4405 enum drm_connector_status status;
4407 status = intel_panel_detect(dev);
4408 if (status == connector_status_unknown)
4409 status = connector_status_connected;
4414 static enum drm_connector_status
4415 ironlake_dp_detect(struct intel_dp *intel_dp)
4417 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4418 struct drm_i915_private *dev_priv = dev->dev_private;
4419 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4421 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4422 return connector_status_disconnected;
4424 return intel_dp_detect_dpcd(intel_dp);
4427 static int g4x_digital_port_connected(struct drm_device *dev,
4428 struct intel_digital_port *intel_dig_port)
4430 struct drm_i915_private *dev_priv = dev->dev_private;
4433 if (IS_VALLEYVIEW(dev)) {
4434 switch (intel_dig_port->port) {
4436 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4439 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4442 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4448 switch (intel_dig_port->port) {
4450 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4453 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4456 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4463 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4468 static enum drm_connector_status
4469 g4x_dp_detect(struct intel_dp *intel_dp)
4471 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4472 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4475 /* Can't disconnect eDP, but you can close the lid... */
4476 if (is_edp(intel_dp)) {
4477 enum drm_connector_status status;
4479 status = intel_panel_detect(dev);
4480 if (status == connector_status_unknown)
4481 status = connector_status_connected;
4485 ret = g4x_digital_port_connected(dev, intel_dig_port);
4487 return connector_status_unknown;
4489 return connector_status_disconnected;
4491 return intel_dp_detect_dpcd(intel_dp);
4494 static struct edid *
4495 intel_dp_get_edid(struct intel_dp *intel_dp)
4497 struct intel_connector *intel_connector = intel_dp->attached_connector;
4499 /* use cached edid if we have one */
4500 if (intel_connector->edid) {
4502 if (IS_ERR(intel_connector->edid))
4505 return drm_edid_duplicate(intel_connector->edid);
4507 return drm_get_edid(&intel_connector->base,
4508 &intel_dp->aux.ddc);
4512 intel_dp_set_edid(struct intel_dp *intel_dp)
4514 struct intel_connector *intel_connector = intel_dp->attached_connector;
4517 edid = intel_dp_get_edid(intel_dp);
4518 intel_connector->detect_edid = edid;
4520 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4521 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4523 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4527 intel_dp_unset_edid(struct intel_dp *intel_dp)
4529 struct intel_connector *intel_connector = intel_dp->attached_connector;
4531 kfree(intel_connector->detect_edid);
4532 intel_connector->detect_edid = NULL;
4534 intel_dp->has_audio = false;
4537 static enum intel_display_power_domain
4538 intel_dp_power_get(struct intel_dp *dp)
4540 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4541 enum intel_display_power_domain power_domain;
4543 power_domain = intel_display_port_power_domain(encoder);
4544 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4546 return power_domain;
4550 intel_dp_power_put(struct intel_dp *dp,
4551 enum intel_display_power_domain power_domain)
4553 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4554 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4557 static enum drm_connector_status
4558 intel_dp_detect(struct drm_connector *connector, bool force)
4560 struct intel_dp *intel_dp = intel_attached_dp(connector);
4561 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4562 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4563 struct drm_device *dev = connector->dev;
4564 enum drm_connector_status status;
4565 enum intel_display_power_domain power_domain;
4569 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4570 connector->base.id, connector->name);
4571 intel_dp_unset_edid(intel_dp);
4573 if (intel_dp->is_mst) {
4574 /* MST devices are disconnected from a monitor POV */
4575 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4576 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4577 return connector_status_disconnected;
4580 power_domain = intel_dp_power_get(intel_dp);
4582 /* Can't disconnect eDP, but you can close the lid... */
4583 if (is_edp(intel_dp))
4584 status = edp_detect(intel_dp);
4585 else if (HAS_PCH_SPLIT(dev))
4586 status = ironlake_dp_detect(intel_dp);
4588 status = g4x_dp_detect(intel_dp);
4589 if (status != connector_status_connected)
4592 intel_dp_probe_oui(intel_dp);
4594 ret = intel_dp_probe_mst(intel_dp);
4596 /* if we are in MST mode then this connector
4597 won't appear connected or have anything with EDID on it */
4598 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4599 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4600 status = connector_status_disconnected;
4604 intel_dp_set_edid(intel_dp);
4606 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4607 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4608 status = connector_status_connected;
4610 /* Try to read the source of the interrupt */
4611 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4612 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4613 /* Clear interrupt source */
4614 drm_dp_dpcd_writeb(&intel_dp->aux,
4615 DP_DEVICE_SERVICE_IRQ_VECTOR,
4618 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4619 intel_dp_handle_test_request(intel_dp);
4620 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4621 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4625 intel_dp_power_put(intel_dp, power_domain);
4630 intel_dp_force(struct drm_connector *connector)
4632 struct intel_dp *intel_dp = intel_attached_dp(connector);
4633 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4634 enum intel_display_power_domain power_domain;
4636 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4637 connector->base.id, connector->name);
4638 intel_dp_unset_edid(intel_dp);
4640 if (connector->status != connector_status_connected)
4643 power_domain = intel_dp_power_get(intel_dp);
4645 intel_dp_set_edid(intel_dp);
4647 intel_dp_power_put(intel_dp, power_domain);
4649 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4650 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4653 static int intel_dp_get_modes(struct drm_connector *connector)
4655 struct intel_connector *intel_connector = to_intel_connector(connector);
4658 edid = intel_connector->detect_edid;
4660 int ret = intel_connector_update_modes(connector, edid);
4665 /* if eDP has no EDID, fall back to fixed mode */
4666 if (is_edp(intel_attached_dp(connector)) &&
4667 intel_connector->panel.fixed_mode) {
4668 struct drm_display_mode *mode;
4670 mode = drm_mode_duplicate(connector->dev,
4671 intel_connector->panel.fixed_mode);
4673 drm_mode_probed_add(connector, mode);
4682 intel_dp_detect_audio(struct drm_connector *connector)
4684 bool has_audio = false;
4687 edid = to_intel_connector(connector)->detect_edid;
4689 has_audio = drm_detect_monitor_audio(edid);
4695 intel_dp_set_property(struct drm_connector *connector,
4696 struct drm_property *property,
4699 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4700 struct intel_connector *intel_connector = to_intel_connector(connector);
4701 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4702 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4705 ret = drm_object_property_set_value(&connector->base, property, val);
4709 if (property == dev_priv->force_audio_property) {
4713 if (i == intel_dp->force_audio)
4716 intel_dp->force_audio = i;
4718 if (i == HDMI_AUDIO_AUTO)
4719 has_audio = intel_dp_detect_audio(connector);
4721 has_audio = (i == HDMI_AUDIO_ON);
4723 if (has_audio == intel_dp->has_audio)
4726 intel_dp->has_audio = has_audio;
4730 if (property == dev_priv->broadcast_rgb_property) {
4731 bool old_auto = intel_dp->color_range_auto;
4732 uint32_t old_range = intel_dp->color_range;
4735 case INTEL_BROADCAST_RGB_AUTO:
4736 intel_dp->color_range_auto = true;
4738 case INTEL_BROADCAST_RGB_FULL:
4739 intel_dp->color_range_auto = false;
4740 intel_dp->color_range = 0;
4742 case INTEL_BROADCAST_RGB_LIMITED:
4743 intel_dp->color_range_auto = false;
4744 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4750 if (old_auto == intel_dp->color_range_auto &&
4751 old_range == intel_dp->color_range)
4757 if (is_edp(intel_dp) &&
4758 property == connector->dev->mode_config.scaling_mode_property) {
4759 if (val == DRM_MODE_SCALE_NONE) {
4760 DRM_DEBUG_KMS("no scaling not supported\n");
4764 if (intel_connector->panel.fitting_mode == val) {
4765 /* the eDP scaling property is not changed */
4768 intel_connector->panel.fitting_mode = val;
4776 if (intel_encoder->base.crtc)
4777 intel_crtc_restore_mode(intel_encoder->base.crtc);
4783 intel_dp_connector_destroy(struct drm_connector *connector)
4785 struct intel_connector *intel_connector = to_intel_connector(connector);
4787 kfree(intel_connector->detect_edid);
4789 if (!IS_ERR_OR_NULL(intel_connector->edid))
4790 kfree(intel_connector->edid);
4792 /* Can't call is_edp() since the encoder may have been destroyed
4794 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4795 intel_panel_fini(&intel_connector->panel);
4797 drm_connector_cleanup(connector);
4801 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4803 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4804 struct intel_dp *intel_dp = &intel_dig_port->dp;
4806 drm_dp_aux_unregister(&intel_dp->aux);
4807 intel_dp_mst_encoder_cleanup(intel_dig_port);
4808 if (is_edp(intel_dp)) {
4809 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4811 * vdd might still be enabled do to the delayed vdd off.
4812 * Make sure vdd is actually turned off here.
4815 edp_panel_vdd_off_sync(intel_dp);
4816 pps_unlock(intel_dp);
4818 if (intel_dp->edp_notifier.notifier_call) {
4819 unregister_reboot_notifier(&intel_dp->edp_notifier);
4820 intel_dp->edp_notifier.notifier_call = NULL;
4823 drm_encoder_cleanup(encoder);
4824 kfree(intel_dig_port);
4827 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4829 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4831 if (!is_edp(intel_dp))
4835 * vdd might still be enabled do to the delayed vdd off.
4836 * Make sure vdd is actually turned off here.
4838 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4840 edp_panel_vdd_off_sync(intel_dp);
4841 pps_unlock(intel_dp);
4844 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4846 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4847 struct drm_device *dev = intel_dig_port->base.base.dev;
4848 struct drm_i915_private *dev_priv = dev->dev_private;
4849 enum intel_display_power_domain power_domain;
4851 lockdep_assert_held(&dev_priv->pps_mutex);
4853 if (!edp_have_panel_vdd(intel_dp))
4857 * The VDD bit needs a power domain reference, so if the bit is
4858 * already enabled when we boot or resume, grab this reference and
4859 * schedule a vdd off, so we don't hold on to the reference
4862 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4863 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4864 intel_display_power_get(dev_priv, power_domain);
4866 edp_panel_vdd_schedule_off(intel_dp);
4869 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4871 struct intel_dp *intel_dp;
4873 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4876 intel_dp = enc_to_intel_dp(encoder);
4881 * Read out the current power sequencer assignment,
4882 * in case the BIOS did something with it.
4884 if (IS_VALLEYVIEW(encoder->dev))
4885 vlv_initial_power_sequencer_setup(intel_dp);
4887 intel_edp_panel_vdd_sanitize(intel_dp);
4889 pps_unlock(intel_dp);
4892 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4893 .dpms = drm_atomic_helper_connector_dpms,
4894 .detect = intel_dp_detect,
4895 .force = intel_dp_force,
4896 .fill_modes = drm_helper_probe_single_connector_modes,
4897 .set_property = intel_dp_set_property,
4898 .atomic_get_property = intel_connector_atomic_get_property,
4899 .destroy = intel_dp_connector_destroy,
4900 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4901 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4904 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4905 .get_modes = intel_dp_get_modes,
4906 .mode_valid = intel_dp_mode_valid,
4907 .best_encoder = intel_best_encoder,
4910 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4911 .reset = intel_dp_encoder_reset,
4912 .destroy = intel_dp_encoder_destroy,
4916 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4918 struct intel_dp *intel_dp = &intel_dig_port->dp;
4919 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4920 struct drm_device *dev = intel_dig_port->base.base.dev;
4921 struct drm_i915_private *dev_priv = dev->dev_private;
4922 enum intel_display_power_domain power_domain;
4923 enum irqreturn ret = IRQ_NONE;
4925 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4926 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4928 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4930 * vdd off can generate a long pulse on eDP which
4931 * would require vdd on to handle it, and thus we
4932 * would end up in an endless cycle of
4933 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4935 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4936 port_name(intel_dig_port->port));
4940 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4941 port_name(intel_dig_port->port),
4942 long_hpd ? "long" : "short");
4944 power_domain = intel_display_port_power_domain(intel_encoder);
4945 intel_display_power_get(dev_priv, power_domain);
4948 /* indicate that we need to restart link training */
4949 intel_dp->train_set_valid = false;
4951 if (HAS_PCH_SPLIT(dev)) {
4952 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4955 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4959 if (!intel_dp_get_dpcd(intel_dp)) {
4963 intel_dp_probe_oui(intel_dp);
4965 if (!intel_dp_probe_mst(intel_dp)) {
4966 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4967 intel_dp_check_link_status(intel_dp);
4968 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4972 if (intel_dp->is_mst) {
4973 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4977 if (!intel_dp->is_mst) {
4978 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4979 intel_dp_check_link_status(intel_dp);
4980 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4988 /* if we were in MST mode, and device is not there get out of MST mode */
4989 if (intel_dp->is_mst) {
4990 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4991 intel_dp->is_mst = false;
4992 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4995 intel_display_power_put(dev_priv, power_domain);
5000 /* Return which DP Port should be selected for Transcoder DP control */
5002 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5004 struct drm_device *dev = crtc->dev;
5005 struct intel_encoder *intel_encoder;
5006 struct intel_dp *intel_dp;
5008 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5009 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5011 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5012 intel_encoder->type == INTEL_OUTPUT_EDP)
5013 return intel_dp->output_reg;
5019 /* check the VBT to see whether the eDP is on another port */
5020 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5022 struct drm_i915_private *dev_priv = dev->dev_private;
5023 union child_device_config *p_child;
5025 static const short port_mapping[] = {
5026 [PORT_B] = DVO_PORT_DPB,
5027 [PORT_C] = DVO_PORT_DPC,
5028 [PORT_D] = DVO_PORT_DPD,
5029 [PORT_E] = DVO_PORT_DPE,
5035 if (!dev_priv->vbt.child_dev_num)
5038 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5039 p_child = dev_priv->vbt.child_dev + i;
5041 if (p_child->common.dvo_port == port_mapping[port] &&
5042 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5043 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5050 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5052 struct intel_connector *intel_connector = to_intel_connector(connector);
5054 intel_attach_force_audio_property(connector);
5055 intel_attach_broadcast_rgb_property(connector);
5056 intel_dp->color_range_auto = true;
5058 if (is_edp(intel_dp)) {
5059 drm_mode_create_scaling_mode_property(connector->dev);
5060 drm_object_attach_property(
5062 connector->dev->mode_config.scaling_mode_property,
5063 DRM_MODE_SCALE_ASPECT);
5064 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5068 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5070 intel_dp->last_power_cycle = jiffies;
5071 intel_dp->last_power_on = jiffies;
5072 intel_dp->last_backlight_off = jiffies;
5076 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5077 struct intel_dp *intel_dp)
5079 struct drm_i915_private *dev_priv = dev->dev_private;
5080 struct edp_power_seq cur, vbt, spec,
5081 *final = &intel_dp->pps_delays;
5082 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5083 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5085 lockdep_assert_held(&dev_priv->pps_mutex);
5087 /* already initialized? */
5088 if (final->t11_t12 != 0)
5091 if (IS_BROXTON(dev)) {
5093 * TODO: BXT has 2 sets of PPS registers.
5094 * Correct Register for Broxton need to be identified
5095 * using VBT. hardcoding for now
5097 pp_ctrl_reg = BXT_PP_CONTROL(0);
5098 pp_on_reg = BXT_PP_ON_DELAYS(0);
5099 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5100 } else if (HAS_PCH_SPLIT(dev)) {
5101 pp_ctrl_reg = PCH_PP_CONTROL;
5102 pp_on_reg = PCH_PP_ON_DELAYS;
5103 pp_off_reg = PCH_PP_OFF_DELAYS;
5104 pp_div_reg = PCH_PP_DIVISOR;
5106 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5108 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5109 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5110 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5111 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5114 /* Workaround: Need to write PP_CONTROL with the unlock key as
5115 * the very first thing. */
5116 pp_ctl = ironlake_get_pp_control(intel_dp);
5118 pp_on = I915_READ(pp_on_reg);
5119 pp_off = I915_READ(pp_off_reg);
5120 if (!IS_BROXTON(dev)) {
5121 I915_WRITE(pp_ctrl_reg, pp_ctl);
5122 pp_div = I915_READ(pp_div_reg);
5125 /* Pull timing values out of registers */
5126 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5127 PANEL_POWER_UP_DELAY_SHIFT;
5129 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5130 PANEL_LIGHT_ON_DELAY_SHIFT;
5132 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5133 PANEL_LIGHT_OFF_DELAY_SHIFT;
5135 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5136 PANEL_POWER_DOWN_DELAY_SHIFT;
5138 if (IS_BROXTON(dev)) {
5139 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5140 BXT_POWER_CYCLE_DELAY_SHIFT;
5142 cur.t11_t12 = (tmp - 1) * 1000;
5146 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5147 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5150 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5151 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5153 vbt = dev_priv->vbt.edp_pps;
5155 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5156 * our hw here, which are all in 100usec. */
5157 spec.t1_t3 = 210 * 10;
5158 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5159 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5160 spec.t10 = 500 * 10;
5161 /* This one is special and actually in units of 100ms, but zero
5162 * based in the hw (so we need to add 100 ms). But the sw vbt
5163 * table multiplies it with 1000 to make it in units of 100usec,
5165 spec.t11_t12 = (510 + 100) * 10;
5167 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5168 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5170 /* Use the max of the register settings and vbt. If both are
5171 * unset, fall back to the spec limits. */
5172 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5174 max(cur.field, vbt.field))
5175 assign_final(t1_t3);
5179 assign_final(t11_t12);
5182 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5183 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5184 intel_dp->backlight_on_delay = get_delay(t8);
5185 intel_dp->backlight_off_delay = get_delay(t9);
5186 intel_dp->panel_power_down_delay = get_delay(t10);
5187 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5190 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5191 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5192 intel_dp->panel_power_cycle_delay);
5194 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5195 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5199 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5200 struct intel_dp *intel_dp)
5202 struct drm_i915_private *dev_priv = dev->dev_private;
5203 u32 pp_on, pp_off, pp_div, port_sel = 0;
5204 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5205 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5206 enum port port = dp_to_dig_port(intel_dp)->port;
5207 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5209 lockdep_assert_held(&dev_priv->pps_mutex);
5211 if (IS_BROXTON(dev)) {
5213 * TODO: BXT has 2 sets of PPS registers.
5214 * Correct Register for Broxton need to be identified
5215 * using VBT. hardcoding for now
5217 pp_ctrl_reg = BXT_PP_CONTROL(0);
5218 pp_on_reg = BXT_PP_ON_DELAYS(0);
5219 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5221 } else if (HAS_PCH_SPLIT(dev)) {
5222 pp_on_reg = PCH_PP_ON_DELAYS;
5223 pp_off_reg = PCH_PP_OFF_DELAYS;
5224 pp_div_reg = PCH_PP_DIVISOR;
5226 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5228 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5229 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5230 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5234 * And finally store the new values in the power sequencer. The
5235 * backlight delays are set to 1 because we do manual waits on them. For
5236 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5237 * we'll end up waiting for the backlight off delay twice: once when we
5238 * do the manual sleep, and once when we disable the panel and wait for
5239 * the PP_STATUS bit to become zero.
5241 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5242 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5243 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5244 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5245 /* Compute the divisor for the pp clock, simply match the Bspec
5247 if (IS_BROXTON(dev)) {
5248 pp_div = I915_READ(pp_ctrl_reg);
5249 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5250 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5251 << BXT_POWER_CYCLE_DELAY_SHIFT);
5253 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5254 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5255 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5258 /* Haswell doesn't have any port selection bits for the panel
5259 * power sequencer any more. */
5260 if (IS_VALLEYVIEW(dev)) {
5261 port_sel = PANEL_PORT_SELECT_VLV(port);
5262 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5264 port_sel = PANEL_PORT_SELECT_DPA;
5266 port_sel = PANEL_PORT_SELECT_DPD;
5271 I915_WRITE(pp_on_reg, pp_on);
5272 I915_WRITE(pp_off_reg, pp_off);
5273 if (IS_BROXTON(dev))
5274 I915_WRITE(pp_ctrl_reg, pp_div);
5276 I915_WRITE(pp_div_reg, pp_div);
5278 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5279 I915_READ(pp_on_reg),
5280 I915_READ(pp_off_reg),
5282 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5283 I915_READ(pp_div_reg));
5287 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5289 * @refresh_rate: RR to be programmed
5291 * This function gets called when refresh rate (RR) has to be changed from
5292 * one frequency to another. Switches can be between high and low RR
5293 * supported by the panel or to any other RR based on media playback (in
5294 * this case, RR value needs to be passed from user space).
5296 * The caller of this function needs to take a lock on dev_priv->drrs.
5298 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5300 struct drm_i915_private *dev_priv = dev->dev_private;
5301 struct intel_encoder *encoder;
5302 struct intel_digital_port *dig_port = NULL;
5303 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5304 struct intel_crtc_state *config = NULL;
5305 struct intel_crtc *intel_crtc = NULL;
5307 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5309 if (refresh_rate <= 0) {
5310 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5314 if (intel_dp == NULL) {
5315 DRM_DEBUG_KMS("DRRS not supported.\n");
5320 * FIXME: This needs proper synchronization with psr state for some
5321 * platforms that cannot have PSR and DRRS enabled at the same time.
5324 dig_port = dp_to_dig_port(intel_dp);
5325 encoder = &dig_port->base;
5326 intel_crtc = to_intel_crtc(encoder->base.crtc);
5329 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5333 config = intel_crtc->config;
5335 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5336 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5340 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5342 index = DRRS_LOW_RR;
5344 if (index == dev_priv->drrs.refresh_rate_type) {
5346 "DRRS requested for previously set RR...ignoring\n");
5350 if (!intel_crtc->active) {
5351 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5355 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5358 intel_dp_set_m_n(intel_crtc, M1_N1);
5361 intel_dp_set_m_n(intel_crtc, M2_N2);
5365 DRM_ERROR("Unsupported refreshrate type\n");
5367 } else if (INTEL_INFO(dev)->gen > 6) {
5368 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5369 val = I915_READ(reg);
5371 if (index > DRRS_HIGH_RR) {
5372 if (IS_VALLEYVIEW(dev))
5373 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5375 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5377 if (IS_VALLEYVIEW(dev))
5378 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5380 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5382 I915_WRITE(reg, val);
5385 dev_priv->drrs.refresh_rate_type = index;
5387 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5391 * intel_edp_drrs_enable - init drrs struct if supported
5392 * @intel_dp: DP struct
5394 * Initializes frontbuffer_bits and drrs.dp
5396 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5398 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5399 struct drm_i915_private *dev_priv = dev->dev_private;
5400 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5401 struct drm_crtc *crtc = dig_port->base.base.crtc;
5402 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5404 if (!intel_crtc->config->has_drrs) {
5405 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5409 mutex_lock(&dev_priv->drrs.mutex);
5410 if (WARN_ON(dev_priv->drrs.dp)) {
5411 DRM_ERROR("DRRS already enabled\n");
5415 dev_priv->drrs.busy_frontbuffer_bits = 0;
5417 dev_priv->drrs.dp = intel_dp;
5420 mutex_unlock(&dev_priv->drrs.mutex);
5424 * intel_edp_drrs_disable - Disable DRRS
5425 * @intel_dp: DP struct
5428 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5430 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5431 struct drm_i915_private *dev_priv = dev->dev_private;
5432 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5433 struct drm_crtc *crtc = dig_port->base.base.crtc;
5434 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5436 if (!intel_crtc->config->has_drrs)
5439 mutex_lock(&dev_priv->drrs.mutex);
5440 if (!dev_priv->drrs.dp) {
5441 mutex_unlock(&dev_priv->drrs.mutex);
5445 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5446 intel_dp_set_drrs_state(dev_priv->dev,
5447 intel_dp->attached_connector->panel.
5448 fixed_mode->vrefresh);
5450 dev_priv->drrs.dp = NULL;
5451 mutex_unlock(&dev_priv->drrs.mutex);
5453 cancel_delayed_work_sync(&dev_priv->drrs.work);
5456 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5458 struct drm_i915_private *dev_priv =
5459 container_of(work, typeof(*dev_priv), drrs.work.work);
5460 struct intel_dp *intel_dp;
5462 mutex_lock(&dev_priv->drrs.mutex);
5464 intel_dp = dev_priv->drrs.dp;
5470 * The delayed work can race with an invalidate hence we need to
5474 if (dev_priv->drrs.busy_frontbuffer_bits)
5477 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5478 intel_dp_set_drrs_state(dev_priv->dev,
5479 intel_dp->attached_connector->panel.
5480 downclock_mode->vrefresh);
5483 mutex_unlock(&dev_priv->drrs.mutex);
5487 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5489 * @frontbuffer_bits: frontbuffer plane tracking bits
5491 * This function gets called everytime rendering on the given planes start.
5492 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5494 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5496 void intel_edp_drrs_invalidate(struct drm_device *dev,
5497 unsigned frontbuffer_bits)
5499 struct drm_i915_private *dev_priv = dev->dev_private;
5500 struct drm_crtc *crtc;
5503 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5506 cancel_delayed_work(&dev_priv->drrs.work);
5508 mutex_lock(&dev_priv->drrs.mutex);
5509 if (!dev_priv->drrs.dp) {
5510 mutex_unlock(&dev_priv->drrs.mutex);
5514 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5515 pipe = to_intel_crtc(crtc)->pipe;
5517 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5518 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5520 /* invalidate means busy screen hence upclock */
5521 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5522 intel_dp_set_drrs_state(dev_priv->dev,
5523 dev_priv->drrs.dp->attached_connector->panel.
5524 fixed_mode->vrefresh);
5526 mutex_unlock(&dev_priv->drrs.mutex);
5530 * intel_edp_drrs_flush - Restart Idleness DRRS
5532 * @frontbuffer_bits: frontbuffer plane tracking bits
5534 * This function gets called every time rendering on the given planes has
5535 * completed or flip on a crtc is completed. So DRRS should be upclocked
5536 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5537 * if no other planes are dirty.
5539 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5541 void intel_edp_drrs_flush(struct drm_device *dev,
5542 unsigned frontbuffer_bits)
5544 struct drm_i915_private *dev_priv = dev->dev_private;
5545 struct drm_crtc *crtc;
5548 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5551 cancel_delayed_work(&dev_priv->drrs.work);
5553 mutex_lock(&dev_priv->drrs.mutex);
5554 if (!dev_priv->drrs.dp) {
5555 mutex_unlock(&dev_priv->drrs.mutex);
5559 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5560 pipe = to_intel_crtc(crtc)->pipe;
5562 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5563 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5565 /* flush means busy screen hence upclock */
5566 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5567 intel_dp_set_drrs_state(dev_priv->dev,
5568 dev_priv->drrs.dp->attached_connector->panel.
5569 fixed_mode->vrefresh);
5572 * flush also means no more activity hence schedule downclock, if all
5573 * other fbs are quiescent too
5575 if (!dev_priv->drrs.busy_frontbuffer_bits)
5576 schedule_delayed_work(&dev_priv->drrs.work,
5577 msecs_to_jiffies(1000));
5578 mutex_unlock(&dev_priv->drrs.mutex);
5582 * DOC: Display Refresh Rate Switching (DRRS)
5584 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5585 * which enables swtching between low and high refresh rates,
5586 * dynamically, based on the usage scenario. This feature is applicable
5587 * for internal panels.
5589 * Indication that the panel supports DRRS is given by the panel EDID, which
5590 * would list multiple refresh rates for one resolution.
5592 * DRRS is of 2 types - static and seamless.
5593 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5594 * (may appear as a blink on screen) and is used in dock-undock scenario.
5595 * Seamless DRRS involves changing RR without any visual effect to the user
5596 * and can be used during normal system usage. This is done by programming
5597 * certain registers.
5599 * Support for static/seamless DRRS may be indicated in the VBT based on
5600 * inputs from the panel spec.
5602 * DRRS saves power by switching to low RR based on usage scenarios.
5605 * The implementation is based on frontbuffer tracking implementation.
5606 * When there is a disturbance on the screen triggered by user activity or a
5607 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5608 * When there is no movement on screen, after a timeout of 1 second, a switch
5609 * to low RR is made.
5610 * For integration with frontbuffer tracking code,
5611 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5613 * DRRS can be further extended to support other internal panels and also
5614 * the scenario of video playback wherein RR is set based on the rate
5615 * requested by userspace.
5619 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5620 * @intel_connector: eDP connector
5621 * @fixed_mode: preferred mode of panel
5623 * This function is called only once at driver load to initialize basic
5627 * Downclock mode if panel supports it, else return NULL.
5628 * DRRS support is determined by the presence of downclock mode (apart
5629 * from VBT setting).
5631 static struct drm_display_mode *
5632 intel_dp_drrs_init(struct intel_connector *intel_connector,
5633 struct drm_display_mode *fixed_mode)
5635 struct drm_connector *connector = &intel_connector->base;
5636 struct drm_device *dev = connector->dev;
5637 struct drm_i915_private *dev_priv = dev->dev_private;
5638 struct drm_display_mode *downclock_mode = NULL;
5640 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5641 mutex_init(&dev_priv->drrs.mutex);
5643 if (INTEL_INFO(dev)->gen <= 6) {
5644 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5648 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5649 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5653 downclock_mode = intel_find_panel_downclock
5654 (dev, fixed_mode, connector);
5656 if (!downclock_mode) {
5657 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5661 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5663 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5664 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5665 return downclock_mode;
5668 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5669 struct intel_connector *intel_connector)
5671 struct drm_connector *connector = &intel_connector->base;
5672 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5673 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5674 struct drm_device *dev = intel_encoder->base.dev;
5675 struct drm_i915_private *dev_priv = dev->dev_private;
5676 struct drm_display_mode *fixed_mode = NULL;
5677 struct drm_display_mode *downclock_mode = NULL;
5679 struct drm_display_mode *scan;
5681 enum pipe pipe = INVALID_PIPE;
5683 if (!is_edp(intel_dp))
5687 intel_edp_panel_vdd_sanitize(intel_dp);
5688 pps_unlock(intel_dp);
5690 /* Cache DPCD and EDID for edp. */
5691 has_dpcd = intel_dp_get_dpcd(intel_dp);
5694 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5695 dev_priv->no_aux_handshake =
5696 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5697 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5699 /* if this fails, presume the device is a ghost */
5700 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5704 /* We now know it's not a ghost, init power sequence regs. */
5706 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5707 pps_unlock(intel_dp);
5709 mutex_lock(&dev->mode_config.mutex);
5710 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5712 if (drm_add_edid_modes(connector, edid)) {
5713 drm_mode_connector_update_edid_property(connector,
5715 drm_edid_to_eld(connector, edid);
5718 edid = ERR_PTR(-EINVAL);
5721 edid = ERR_PTR(-ENOENT);
5723 intel_connector->edid = edid;
5725 /* prefer fixed mode from EDID if available */
5726 list_for_each_entry(scan, &connector->probed_modes, head) {
5727 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5728 fixed_mode = drm_mode_duplicate(dev, scan);
5729 downclock_mode = intel_dp_drrs_init(
5730 intel_connector, fixed_mode);
5735 /* fallback to VBT if available for eDP */
5736 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5737 fixed_mode = drm_mode_duplicate(dev,
5738 dev_priv->vbt.lfp_lvds_vbt_mode);
5740 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5742 mutex_unlock(&dev->mode_config.mutex);
5744 if (IS_VALLEYVIEW(dev)) {
5745 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5746 register_reboot_notifier(&intel_dp->edp_notifier);
5749 * Figure out the current pipe for the initial backlight setup.
5750 * If the current pipe isn't valid, try the PPS pipe, and if that
5751 * fails just assume pipe A.
5753 if (IS_CHERRYVIEW(dev))
5754 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5756 pipe = PORT_TO_PIPE(intel_dp->DP);
5758 if (pipe != PIPE_A && pipe != PIPE_B)
5759 pipe = intel_dp->pps_pipe;
5761 if (pipe != PIPE_A && pipe != PIPE_B)
5764 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5768 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5769 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5770 intel_panel_setup_backlight(connector, pipe);
5776 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5777 struct intel_connector *intel_connector)
5779 struct drm_connector *connector = &intel_connector->base;
5780 struct intel_dp *intel_dp = &intel_dig_port->dp;
5781 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5782 struct drm_device *dev = intel_encoder->base.dev;
5783 struct drm_i915_private *dev_priv = dev->dev_private;
5784 enum port port = intel_dig_port->port;
5787 intel_dp->pps_pipe = INVALID_PIPE;
5789 /* intel_dp vfuncs */
5790 if (INTEL_INFO(dev)->gen >= 9)
5791 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5792 else if (IS_VALLEYVIEW(dev))
5793 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5794 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5795 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5796 else if (HAS_PCH_SPLIT(dev))
5797 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5799 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5801 if (INTEL_INFO(dev)->gen >= 9)
5802 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5804 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5806 /* Preserve the current hw state. */
5807 intel_dp->DP = I915_READ(intel_dp->output_reg);
5808 intel_dp->attached_connector = intel_connector;
5810 if (intel_dp_is_edp(dev, port))
5811 type = DRM_MODE_CONNECTOR_eDP;
5813 type = DRM_MODE_CONNECTOR_DisplayPort;
5816 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5817 * for DP the encoder type can be set by the caller to
5818 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5820 if (type == DRM_MODE_CONNECTOR_eDP)
5821 intel_encoder->type = INTEL_OUTPUT_EDP;
5823 /* eDP only on port B and/or C on vlv/chv */
5824 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5825 port != PORT_B && port != PORT_C))
5828 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5829 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5832 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5833 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5835 connector->interlace_allowed = true;
5836 connector->doublescan_allowed = 0;
5838 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5839 edp_panel_vdd_work);
5841 intel_connector_attach_encoder(intel_connector, intel_encoder);
5842 drm_connector_register(connector);
5845 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5847 intel_connector->get_hw_state = intel_connector_get_hw_state;
5848 intel_connector->unregister = intel_dp_connector_unregister;
5850 /* Set up the hotplug pin. */
5853 intel_encoder->hpd_pin = HPD_PORT_A;
5856 intel_encoder->hpd_pin = HPD_PORT_B;
5859 intel_encoder->hpd_pin = HPD_PORT_C;
5862 intel_encoder->hpd_pin = HPD_PORT_D;
5865 intel_encoder->hpd_pin = HPD_PORT_E;
5871 if (is_edp(intel_dp)) {
5873 intel_dp_init_panel_power_timestamps(intel_dp);
5874 if (IS_VALLEYVIEW(dev))
5875 vlv_initial_power_sequencer_setup(intel_dp);
5877 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5878 pps_unlock(intel_dp);
5881 intel_dp_aux_init(intel_dp, intel_connector);
5883 /* init MST on ports that can support it */
5884 if (HAS_DP_MST(dev) &&
5885 (port == PORT_B || port == PORT_C || port == PORT_D))
5886 intel_dp_mst_encoder_init(intel_dig_port,
5887 intel_connector->base.base.id);
5889 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5890 drm_dp_aux_unregister(&intel_dp->aux);
5891 if (is_edp(intel_dp)) {
5892 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5894 * vdd might still be enabled do to the delayed vdd off.
5895 * Make sure vdd is actually turned off here.
5898 edp_panel_vdd_off_sync(intel_dp);
5899 pps_unlock(intel_dp);
5901 drm_connector_unregister(connector);
5902 drm_connector_cleanup(connector);
5906 intel_dp_add_properties(intel_dp, connector);
5908 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5909 * 0xd. Failure to do so will result in spurious interrupts being
5910 * generated on the port when a cable is not attached.
5912 if (IS_G4X(dev) && !IS_GM45(dev)) {
5913 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5914 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5917 i915_debugfs_connector_add(connector);
5923 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5925 struct drm_i915_private *dev_priv = dev->dev_private;
5926 struct intel_digital_port *intel_dig_port;
5927 struct intel_encoder *intel_encoder;
5928 struct drm_encoder *encoder;
5929 struct intel_connector *intel_connector;
5931 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5932 if (!intel_dig_port)
5935 intel_connector = intel_connector_alloc();
5936 if (!intel_connector) {
5937 kfree(intel_dig_port);
5941 intel_encoder = &intel_dig_port->base;
5942 encoder = &intel_encoder->base;
5944 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5945 DRM_MODE_ENCODER_TMDS);
5947 intel_encoder->compute_config = intel_dp_compute_config;
5948 intel_encoder->disable = intel_disable_dp;
5949 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5950 intel_encoder->get_config = intel_dp_get_config;
5951 intel_encoder->suspend = intel_dp_encoder_suspend;
5952 if (IS_CHERRYVIEW(dev)) {
5953 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5954 intel_encoder->pre_enable = chv_pre_enable_dp;
5955 intel_encoder->enable = vlv_enable_dp;
5956 intel_encoder->post_disable = chv_post_disable_dp;
5957 } else if (IS_VALLEYVIEW(dev)) {
5958 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5959 intel_encoder->pre_enable = vlv_pre_enable_dp;
5960 intel_encoder->enable = vlv_enable_dp;
5961 intel_encoder->post_disable = vlv_post_disable_dp;
5963 intel_encoder->pre_enable = g4x_pre_enable_dp;
5964 intel_encoder->enable = g4x_enable_dp;
5965 if (INTEL_INFO(dev)->gen >= 5)
5966 intel_encoder->post_disable = ilk_post_disable_dp;
5969 intel_dig_port->port = port;
5970 intel_dig_port->dp.output_reg = output_reg;
5972 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5973 if (IS_CHERRYVIEW(dev)) {
5975 intel_encoder->crtc_mask = 1 << 2;
5977 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5979 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5981 intel_encoder->cloneable = 0;
5983 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5984 dev_priv->hotplug.irq_port[port] = intel_dig_port;
5986 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5987 drm_encoder_cleanup(encoder);
5988 kfree(intel_dig_port);
5989 kfree(intel_connector);
5993 void intel_dp_mst_suspend(struct drm_device *dev)
5995 struct drm_i915_private *dev_priv = dev->dev_private;
5999 for (i = 0; i < I915_MAX_PORTS; i++) {
6000 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6001 if (!intel_dig_port)
6004 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6005 if (!intel_dig_port->dp.can_mst)
6007 if (intel_dig_port->dp.is_mst)
6008 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6013 void intel_dp_mst_resume(struct drm_device *dev)
6015 struct drm_i915_private *dev_priv = dev->dev_private;
6018 for (i = 0; i < I915_MAX_PORTS; i++) {
6019 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6020 if (!intel_dig_port)
6022 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6025 if (!intel_dig_port->dp.can_mst)
6028 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6030 intel_dp_check_mst_status(&intel_dig_port->dp);