2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
49 static const struct dp_link_dpll gen4_dpll[] = {
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
56 static const struct dp_link_dpll pch_dpll[] = {
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
63 static const struct dp_link_dpll vlv_dpll[] = {
65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
74 static const struct dp_link_dpll chv_dpll[] = {
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
102 static bool is_edp(struct intel_dp *intel_dp)
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113 return intel_dig_port->base.base.dev;
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
129 intel_dp_max_link_bw(struct intel_dp *intel_dp)
131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
141 max_link_bw = DP_LINK_BW_1_62;
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
160 return min(source_max, sink_max);
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169 * 270000 * 1 * 8 / 10 == 216000
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
181 intel_dp_link_required(int pixel_clock, int bpp)
183 return (pixel_clock * bpp + 9) / 10;
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 return (max_link_clock * max_lanes * 8) / 10;
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
196 struct intel_dp *intel_dp = intel_attached_dp(connector);
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
206 if (mode->vdisplay > fixed_mode->vdisplay)
209 target_clock = fixed_mode->clock;
212 max_link_clock = intel_dp_max_link_rate(intel_dp);
213 max_lanes = intel_dp_max_lane_count(intel_dp);
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
218 if (mode_rate > max_rate)
219 return MODE_CLOCK_HIGH;
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
251 /* hrawclock is 1/4 the FSB frequency */
253 intel_hrawclk(struct drm_device *dev)
255 struct drm_i915_private *dev_priv = dev->dev_private;
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_1067:
274 case CLKCFG_FSB_1333:
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287 struct intel_dp *intel_dp);
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 struct intel_dp *intel_dp);
292 static void pps_lock(struct intel_dp *intel_dp)
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
307 mutex_lock(&dev_priv->pps_mutex);
310 static void pps_unlock(struct intel_dp *intel_dp)
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
318 mutex_unlock(&dev_priv->pps_mutex);
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
381 vlv_force_pll_off(dev, pipe);
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
394 lockdep_assert_held(&dev_priv->pps_mutex);
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
408 struct intel_dp *tmp;
410 if (encoder->type != INTEL_OUTPUT_EDP)
413 tmp = enc_to_intel_dp(&encoder->base);
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
423 if (WARN_ON(pipes == 0))
426 pipe = ffs(pipes) - 1;
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
435 /* init power sequencer on this pipe and port */
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
443 vlv_power_sequencer_kick(intel_dp);
445 return intel_dp->pps_pipe;
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
472 vlv_pipe_check pipe_check)
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
483 if (!pipe_check(dev_priv, pipe))
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 enum port port = intel_dig_port->port;
500 lockdep_assert_held(&dev_priv->pps_mutex);
502 /* try to find a pipe with this port selected */
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
550 if (encoder->type != INTEL_OUTPUT_EDP)
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
588 u32 pp_ctrl_reg, pp_div_reg;
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
595 if (IS_VALLEYVIEW(dev)) {
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
609 pps_unlock(intel_dp);
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
617 struct drm_i915_private *dev_priv = dev->dev_private;
619 lockdep_assert_held(&dev_priv->pps_mutex);
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
631 struct drm_i915_private *dev_priv = dev->dev_private;
633 lockdep_assert_held(&dev_priv->pps_mutex);
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
643 intel_dp_check_edp(struct intel_dp *intel_dp)
645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
646 struct drm_i915_private *dev_priv = dev->dev_private;
648 if (!is_edp(intel_dp))
651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672 msecs_to_jiffies_timeout(10));
674 done = wait_for_atomic(C, 10) == 0;
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
692 return index ? 0 : intel_hrawclk(dev) / 2;
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
707 return 225; /* eDP input clock at 450Mhz */
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
719 if (intel_dig_port->port == PORT_A) {
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
737 return index ? 0 : 100;
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
747 return index ? 0 : 1;
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
753 uint32_t aux_clock_divider)
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
769 return DP_AUX_CH_CTL_SEND_BUSY |
771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
774 DP_AUX_CH_CTL_RECEIVE_ERROR |
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785 return DP_AUX_CH_CTL_SEND_BUSY |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797 const uint8_t *send, int send_bytes,
798 uint8_t *recv, int recv_size)
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
805 uint32_t aux_clock_divider;
806 int i, ret, recv_bytes;
809 bool has_aux_irq = HAS_AUX_IRQ(dev);
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
820 vdd = edp_panel_vdd_on(intel_dp);
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
828 intel_dp_check_edp(intel_dp);
830 intel_aux_display_runtime_get(dev_priv);
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
834 status = I915_READ_NOTRACE(ch_ctl);
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
864 intel_dp_pack_aux(send + i,
867 /* Send the command and wait for it to complete */
868 I915_WRITE(ch_ctl, send_ctl);
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
872 /* Clear done status and any errors */
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
882 if (status & DP_AUX_CH_CTL_DONE)
885 if (status & DP_AUX_CH_CTL_DONE)
889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
918 for (i = 0; i < recv_bytes; i += 4)
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
925 intel_aux_display_runtime_put(dev_priv);
928 edp_panel_vdd_off(intel_dp, false);
930 pps_unlock(intel_dp);
935 #define BARE_ADDRESS_SIZE 3
936 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
938 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
945 txbuf[0] = msg->request << 4;
946 txbuf[1] = msg->address >> 8;
947 txbuf[2] = msg->address & 0xff;
948 txbuf[3] = msg->size - 1;
950 switch (msg->request & ~DP_AUX_I2C_MOT) {
951 case DP_AUX_NATIVE_WRITE:
952 case DP_AUX_I2C_WRITE:
953 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
956 if (WARN_ON(txsize > 20))
959 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
961 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963 msg->reply = rxbuf[0] >> 4;
965 /* Return payload size. */
970 case DP_AUX_NATIVE_READ:
971 case DP_AUX_I2C_READ:
972 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
973 rxsize = msg->size + 1;
975 if (WARN_ON(rxsize > 20))
978 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
980 msg->reply = rxbuf[0] >> 4;
982 * Assume happy day, and copy the data. The caller is
983 * expected to check msg->reply before touching it.
985 * Return payload size.
988 memcpy(msg->buffer, rxbuf + 1, ret);
1001 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1003 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1004 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1005 enum port port = intel_dig_port->port;
1006 const char *name = NULL;
1011 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1015 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1019 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1023 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1031 * The AUX_CTL register is usually DP_CTL + 0x10.
1033 * On Haswell and Broadwell though:
1034 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1035 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1037 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1039 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1040 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1042 intel_dp->aux.name = name;
1043 intel_dp->aux.dev = dev->dev;
1044 intel_dp->aux.transfer = intel_dp_aux_transfer;
1046 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1047 connector->base.kdev->kobj.name);
1049 ret = drm_dp_aux_register(&intel_dp->aux);
1051 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1056 ret = sysfs_create_link(&connector->base.kdev->kobj,
1057 &intel_dp->aux.ddc.dev.kobj,
1058 intel_dp->aux.ddc.dev.kobj.name);
1060 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1061 drm_dp_aux_unregister(&intel_dp->aux);
1066 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1068 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1070 if (!intel_connector->mst_port)
1071 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1072 intel_dp->aux.ddc.dev.kobj.name);
1073 intel_connector_unregister(intel_connector);
1077 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1081 pipe_config->ddi_pll_sel = SKL_DPLL0;
1082 pipe_config->dpll_hw_state.cfgcr1 = 0;
1083 pipe_config->dpll_hw_state.cfgcr2 = 0;
1085 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1086 switch (link_clock / 2) {
1088 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1092 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1100 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1103 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1104 results in CDCLK change. Need to handle the change of CDCLK by
1105 disabling pipes and re-enabling them */
1107 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1111 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1116 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1120 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1123 case DP_LINK_BW_1_62:
1124 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1126 case DP_LINK_BW_2_7:
1127 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1129 case DP_LINK_BW_5_4:
1130 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1136 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1138 if (intel_dp->num_supported_rates) {
1139 *sink_rates = intel_dp->supported_rates;
1140 return intel_dp->num_supported_rates;
1143 *sink_rates = default_rates;
1145 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1149 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1151 if (INTEL_INFO(dev)->gen >= 9) {
1152 *source_rates = gen9_rates;
1153 return ARRAY_SIZE(gen9_rates);
1154 } else if (IS_CHERRYVIEW(dev)) {
1155 *source_rates = chv_rates;
1156 return ARRAY_SIZE(chv_rates);
1159 *source_rates = default_rates;
1161 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1162 /* WaDisableHBR2:skl */
1163 return (DP_LINK_BW_2_7 >> 3) + 1;
1164 else if (INTEL_INFO(dev)->gen >= 8 ||
1165 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1166 return (DP_LINK_BW_5_4 >> 3) + 1;
1168 return (DP_LINK_BW_2_7 >> 3) + 1;
1172 intel_dp_set_clock(struct intel_encoder *encoder,
1173 struct intel_crtc_state *pipe_config, int link_bw)
1175 struct drm_device *dev = encoder->base.dev;
1176 const struct dp_link_dpll *divisor = NULL;
1180 divisor = gen4_dpll;
1181 count = ARRAY_SIZE(gen4_dpll);
1182 } else if (HAS_PCH_SPLIT(dev)) {
1184 count = ARRAY_SIZE(pch_dpll);
1185 } else if (IS_CHERRYVIEW(dev)) {
1187 count = ARRAY_SIZE(chv_dpll);
1188 } else if (IS_VALLEYVIEW(dev)) {
1190 count = ARRAY_SIZE(vlv_dpll);
1193 if (divisor && count) {
1194 for (i = 0; i < count; i++) {
1195 if (link_bw == divisor[i].link_bw) {
1196 pipe_config->dpll = divisor[i].dpll;
1197 pipe_config->clock_set = true;
1204 static int intersect_rates(const int *source_rates, int source_len,
1205 const int *sink_rates, int sink_len,
1206 int *supported_rates)
1208 int i = 0, j = 0, k = 0;
1210 while (i < source_len && j < sink_len) {
1211 if (source_rates[i] == sink_rates[j]) {
1212 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1214 supported_rates[k] = source_rates[i];
1218 } else if (source_rates[i] < sink_rates[j]) {
1227 static int intel_supported_rates(struct intel_dp *intel_dp,
1228 int *supported_rates)
1230 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1231 const int *source_rates, *sink_rates;
1232 int source_len, sink_len;
1234 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1235 source_len = intel_dp_source_rates(dev, &source_rates);
1237 return intersect_rates(source_rates, source_len,
1238 sink_rates, sink_len,
1242 static int rate_to_index(int find, const int *rates)
1246 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1247 if (find == rates[i])
1254 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1256 int rates[DP_MAX_SUPPORTED_RATES] = {};
1259 len = intel_supported_rates(intel_dp, rates);
1260 if (WARN_ON(len <= 0))
1263 return rates[rate_to_index(0, rates) - 1];
1266 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1268 return rate_to_index(rate, intel_dp->supported_rates);
1272 intel_dp_compute_config(struct intel_encoder *encoder,
1273 struct intel_crtc_state *pipe_config)
1275 struct drm_device *dev = encoder->base.dev;
1276 struct drm_i915_private *dev_priv = dev->dev_private;
1277 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1278 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1279 enum port port = dp_to_dig_port(intel_dp)->port;
1280 struct intel_crtc *intel_crtc = encoder->new_crtc;
1281 struct intel_connector *intel_connector = intel_dp->attached_connector;
1282 int lane_count, clock;
1283 int min_lane_count = 1;
1284 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1285 /* Conveniently, the link BW constants become indices with a shift...*/
1289 int link_avail, link_clock;
1290 int supported_rates[DP_MAX_SUPPORTED_RATES] = {};
1293 supported_len = intel_supported_rates(intel_dp, supported_rates);
1295 /* No common link rates between source and sink */
1296 WARN_ON(supported_len <= 0);
1298 max_clock = supported_len - 1;
1300 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1301 pipe_config->has_pch_encoder = true;
1303 pipe_config->has_dp_encoder = true;
1304 pipe_config->has_drrs = false;
1305 pipe_config->has_audio = intel_dp->has_audio;
1307 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1308 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1310 if (!HAS_PCH_SPLIT(dev))
1311 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1312 intel_connector->panel.fitting_mode);
1314 intel_pch_panel_fitting(intel_crtc, pipe_config,
1315 intel_connector->panel.fitting_mode);
1318 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1321 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1322 "max bw %d pixel clock %iKHz\n",
1323 max_lane_count, supported_rates[max_clock],
1324 adjusted_mode->crtc_clock);
1326 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1327 * bpc in between. */
1328 bpp = pipe_config->pipe_bpp;
1329 if (is_edp(intel_dp)) {
1330 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1331 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1332 dev_priv->vbt.edp_bpp);
1333 bpp = dev_priv->vbt.edp_bpp;
1337 * Use the maximum clock and number of lanes the eDP panel
1338 * advertizes being capable of. The panels are generally
1339 * designed to support only a single clock and lane
1340 * configuration, and typically these values correspond to the
1341 * native resolution of the panel.
1343 min_lane_count = max_lane_count;
1344 min_clock = max_clock;
1347 for (; bpp >= 6*3; bpp -= 2*3) {
1348 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1351 for (clock = min_clock; clock <= max_clock; clock++) {
1352 for (lane_count = min_lane_count;
1353 lane_count <= max_lane_count;
1356 link_clock = supported_rates[clock];
1357 link_avail = intel_dp_max_data_rate(link_clock,
1360 if (mode_rate <= link_avail) {
1370 if (intel_dp->color_range_auto) {
1373 * CEA-861-E - 5.1 Default Encoding Parameters
1374 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1376 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1377 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1379 intel_dp->color_range = 0;
1382 if (intel_dp->color_range)
1383 pipe_config->limited_color_range = true;
1385 intel_dp->lane_count = lane_count;
1387 if (intel_dp->num_supported_rates) {
1388 intel_dp->link_bw = 0;
1389 intel_dp->rate_select =
1390 intel_dp_rate_select(intel_dp, supported_rates[clock]);
1393 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1394 intel_dp->rate_select = 0;
1397 pipe_config->pipe_bpp = bpp;
1398 pipe_config->port_clock = supported_rates[clock];
1400 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1401 intel_dp->link_bw, intel_dp->lane_count,
1402 pipe_config->port_clock, bpp);
1403 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1404 mode_rate, link_avail);
1406 intel_link_compute_m_n(bpp, lane_count,
1407 adjusted_mode->crtc_clock,
1408 pipe_config->port_clock,
1409 &pipe_config->dp_m_n);
1411 if (intel_connector->panel.downclock_mode != NULL &&
1412 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1413 pipe_config->has_drrs = true;
1414 intel_link_compute_m_n(bpp, lane_count,
1415 intel_connector->panel.downclock_mode->clock,
1416 pipe_config->port_clock,
1417 &pipe_config->dp_m2_n2);
1420 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1421 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
1422 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1423 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1425 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1430 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1432 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1433 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1434 struct drm_device *dev = crtc->base.dev;
1435 struct drm_i915_private *dev_priv = dev->dev_private;
1438 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1439 crtc->config->port_clock);
1440 dpa_ctl = I915_READ(DP_A);
1441 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1443 if (crtc->config->port_clock == 162000) {
1444 /* For a long time we've carried around a ILK-DevA w/a for the
1445 * 160MHz clock. If we're really unlucky, it's still required.
1447 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1448 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1449 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1451 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1452 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1455 I915_WRITE(DP_A, dpa_ctl);
1461 static void intel_dp_prepare(struct intel_encoder *encoder)
1463 struct drm_device *dev = encoder->base.dev;
1464 struct drm_i915_private *dev_priv = dev->dev_private;
1465 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1466 enum port port = dp_to_dig_port(intel_dp)->port;
1467 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1468 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1471 * There are four kinds of DP registers:
1478 * IBX PCH and CPU are the same for almost everything,
1479 * except that the CPU DP PLL is configured in this
1482 * CPT PCH is quite different, having many bits moved
1483 * to the TRANS_DP_CTL register instead. That
1484 * configuration happens (oddly) in ironlake_pch_enable
1487 /* Preserve the BIOS-computed detected bit. This is
1488 * supposed to be read-only.
1490 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1492 /* Handle DP bits in common between all three register formats */
1493 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1494 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1496 if (crtc->config->has_audio)
1497 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1499 /* Split out the IBX/CPU vs CPT settings */
1501 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1502 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1503 intel_dp->DP |= DP_SYNC_HS_HIGH;
1504 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1505 intel_dp->DP |= DP_SYNC_VS_HIGH;
1506 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1508 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1509 intel_dp->DP |= DP_ENHANCED_FRAMING;
1511 intel_dp->DP |= crtc->pipe << 29;
1512 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1513 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1514 intel_dp->DP |= intel_dp->color_range;
1516 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1517 intel_dp->DP |= DP_SYNC_HS_HIGH;
1518 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1519 intel_dp->DP |= DP_SYNC_VS_HIGH;
1520 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1522 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1523 intel_dp->DP |= DP_ENHANCED_FRAMING;
1525 if (!IS_CHERRYVIEW(dev)) {
1526 if (crtc->pipe == 1)
1527 intel_dp->DP |= DP_PIPEB_SELECT;
1529 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1532 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1536 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1537 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1539 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1540 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1542 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1543 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1545 static void wait_panel_status(struct intel_dp *intel_dp,
1549 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1550 struct drm_i915_private *dev_priv = dev->dev_private;
1551 u32 pp_stat_reg, pp_ctrl_reg;
1553 lockdep_assert_held(&dev_priv->pps_mutex);
1555 pp_stat_reg = _pp_stat_reg(intel_dp);
1556 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1558 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1560 I915_READ(pp_stat_reg),
1561 I915_READ(pp_ctrl_reg));
1563 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1564 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1565 I915_READ(pp_stat_reg),
1566 I915_READ(pp_ctrl_reg));
1569 DRM_DEBUG_KMS("Wait complete\n");
1572 static void wait_panel_on(struct intel_dp *intel_dp)
1574 DRM_DEBUG_KMS("Wait for panel power on\n");
1575 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1578 static void wait_panel_off(struct intel_dp *intel_dp)
1580 DRM_DEBUG_KMS("Wait for panel power off time\n");
1581 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1584 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1586 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1588 /* When we disable the VDD override bit last we have to do the manual
1590 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1591 intel_dp->panel_power_cycle_delay);
1593 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1596 static void wait_backlight_on(struct intel_dp *intel_dp)
1598 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1599 intel_dp->backlight_on_delay);
1602 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1604 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1605 intel_dp->backlight_off_delay);
1608 /* Read the current pp_control value, unlocking the register if it
1612 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1614 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1615 struct drm_i915_private *dev_priv = dev->dev_private;
1618 lockdep_assert_held(&dev_priv->pps_mutex);
1620 control = I915_READ(_pp_ctrl_reg(intel_dp));
1621 control &= ~PANEL_UNLOCK_MASK;
1622 control |= PANEL_UNLOCK_REGS;
1627 * Must be paired with edp_panel_vdd_off().
1628 * Must hold pps_mutex around the whole on/off sequence.
1629 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1631 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1634 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1635 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1636 struct drm_i915_private *dev_priv = dev->dev_private;
1637 enum intel_display_power_domain power_domain;
1639 u32 pp_stat_reg, pp_ctrl_reg;
1640 bool need_to_disable = !intel_dp->want_panel_vdd;
1642 lockdep_assert_held(&dev_priv->pps_mutex);
1644 if (!is_edp(intel_dp))
1647 cancel_delayed_work(&intel_dp->panel_vdd_work);
1648 intel_dp->want_panel_vdd = true;
1650 if (edp_have_panel_vdd(intel_dp))
1651 return need_to_disable;
1653 power_domain = intel_display_port_power_domain(intel_encoder);
1654 intel_display_power_get(dev_priv, power_domain);
1656 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1657 port_name(intel_dig_port->port));
1659 if (!edp_have_panel_power(intel_dp))
1660 wait_panel_power_cycle(intel_dp);
1662 pp = ironlake_get_pp_control(intel_dp);
1663 pp |= EDP_FORCE_VDD;
1665 pp_stat_reg = _pp_stat_reg(intel_dp);
1666 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1668 I915_WRITE(pp_ctrl_reg, pp);
1669 POSTING_READ(pp_ctrl_reg);
1670 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1671 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1673 * If the panel wasn't on, delay before accessing aux channel
1675 if (!edp_have_panel_power(intel_dp)) {
1676 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1677 port_name(intel_dig_port->port));
1678 msleep(intel_dp->panel_power_up_delay);
1681 return need_to_disable;
1685 * Must be paired with intel_edp_panel_vdd_off() or
1686 * intel_edp_panel_off().
1687 * Nested calls to these functions are not allowed since
1688 * we drop the lock. Caller must use some higher level
1689 * locking to prevent nested calls from other threads.
1691 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1695 if (!is_edp(intel_dp))
1699 vdd = edp_panel_vdd_on(intel_dp);
1700 pps_unlock(intel_dp);
1702 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1703 port_name(dp_to_dig_port(intel_dp)->port));
1706 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1708 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1709 struct drm_i915_private *dev_priv = dev->dev_private;
1710 struct intel_digital_port *intel_dig_port =
1711 dp_to_dig_port(intel_dp);
1712 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1713 enum intel_display_power_domain power_domain;
1715 u32 pp_stat_reg, pp_ctrl_reg;
1717 lockdep_assert_held(&dev_priv->pps_mutex);
1719 WARN_ON(intel_dp->want_panel_vdd);
1721 if (!edp_have_panel_vdd(intel_dp))
1724 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1725 port_name(intel_dig_port->port));
1727 pp = ironlake_get_pp_control(intel_dp);
1728 pp &= ~EDP_FORCE_VDD;
1730 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1731 pp_stat_reg = _pp_stat_reg(intel_dp);
1733 I915_WRITE(pp_ctrl_reg, pp);
1734 POSTING_READ(pp_ctrl_reg);
1736 /* Make sure sequencer is idle before allowing subsequent activity */
1737 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1738 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1740 if ((pp & POWER_TARGET_ON) == 0)
1741 intel_dp->last_power_cycle = jiffies;
1743 power_domain = intel_display_port_power_domain(intel_encoder);
1744 intel_display_power_put(dev_priv, power_domain);
1747 static void edp_panel_vdd_work(struct work_struct *__work)
1749 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1750 struct intel_dp, panel_vdd_work);
1753 if (!intel_dp->want_panel_vdd)
1754 edp_panel_vdd_off_sync(intel_dp);
1755 pps_unlock(intel_dp);
1758 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1760 unsigned long delay;
1763 * Queue the timer to fire a long time from now (relative to the power
1764 * down delay) to keep the panel power up across a sequence of
1767 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1768 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1772 * Must be paired with edp_panel_vdd_on().
1773 * Must hold pps_mutex around the whole on/off sequence.
1774 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1776 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1778 struct drm_i915_private *dev_priv =
1779 intel_dp_to_dev(intel_dp)->dev_private;
1781 lockdep_assert_held(&dev_priv->pps_mutex);
1783 if (!is_edp(intel_dp))
1786 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1787 port_name(dp_to_dig_port(intel_dp)->port));
1789 intel_dp->want_panel_vdd = false;
1792 edp_panel_vdd_off_sync(intel_dp);
1794 edp_panel_vdd_schedule_off(intel_dp);
1797 static void edp_panel_on(struct intel_dp *intel_dp)
1799 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1800 struct drm_i915_private *dev_priv = dev->dev_private;
1804 lockdep_assert_held(&dev_priv->pps_mutex);
1806 if (!is_edp(intel_dp))
1809 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1810 port_name(dp_to_dig_port(intel_dp)->port));
1812 if (WARN(edp_have_panel_power(intel_dp),
1813 "eDP port %c panel power already on\n",
1814 port_name(dp_to_dig_port(intel_dp)->port)))
1817 wait_panel_power_cycle(intel_dp);
1819 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1820 pp = ironlake_get_pp_control(intel_dp);
1822 /* ILK workaround: disable reset around power sequence */
1823 pp &= ~PANEL_POWER_RESET;
1824 I915_WRITE(pp_ctrl_reg, pp);
1825 POSTING_READ(pp_ctrl_reg);
1828 pp |= POWER_TARGET_ON;
1830 pp |= PANEL_POWER_RESET;
1832 I915_WRITE(pp_ctrl_reg, pp);
1833 POSTING_READ(pp_ctrl_reg);
1835 wait_panel_on(intel_dp);
1836 intel_dp->last_power_on = jiffies;
1839 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1840 I915_WRITE(pp_ctrl_reg, pp);
1841 POSTING_READ(pp_ctrl_reg);
1845 void intel_edp_panel_on(struct intel_dp *intel_dp)
1847 if (!is_edp(intel_dp))
1851 edp_panel_on(intel_dp);
1852 pps_unlock(intel_dp);
1856 static void edp_panel_off(struct intel_dp *intel_dp)
1858 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1859 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1860 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1861 struct drm_i915_private *dev_priv = dev->dev_private;
1862 enum intel_display_power_domain power_domain;
1866 lockdep_assert_held(&dev_priv->pps_mutex);
1868 if (!is_edp(intel_dp))
1871 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1872 port_name(dp_to_dig_port(intel_dp)->port));
1874 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1875 port_name(dp_to_dig_port(intel_dp)->port));
1877 pp = ironlake_get_pp_control(intel_dp);
1878 /* We need to switch off panel power _and_ force vdd, for otherwise some
1879 * panels get very unhappy and cease to work. */
1880 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1883 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1885 intel_dp->want_panel_vdd = false;
1887 I915_WRITE(pp_ctrl_reg, pp);
1888 POSTING_READ(pp_ctrl_reg);
1890 intel_dp->last_power_cycle = jiffies;
1891 wait_panel_off(intel_dp);
1893 /* We got a reference when we enabled the VDD. */
1894 power_domain = intel_display_port_power_domain(intel_encoder);
1895 intel_display_power_put(dev_priv, power_domain);
1898 void intel_edp_panel_off(struct intel_dp *intel_dp)
1900 if (!is_edp(intel_dp))
1904 edp_panel_off(intel_dp);
1905 pps_unlock(intel_dp);
1908 /* Enable backlight in the panel power control. */
1909 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1911 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1912 struct drm_device *dev = intel_dig_port->base.base.dev;
1913 struct drm_i915_private *dev_priv = dev->dev_private;
1918 * If we enable the backlight right away following a panel power
1919 * on, we may see slight flicker as the panel syncs with the eDP
1920 * link. So delay a bit to make sure the image is solid before
1921 * allowing it to appear.
1923 wait_backlight_on(intel_dp);
1927 pp = ironlake_get_pp_control(intel_dp);
1928 pp |= EDP_BLC_ENABLE;
1930 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1932 I915_WRITE(pp_ctrl_reg, pp);
1933 POSTING_READ(pp_ctrl_reg);
1935 pps_unlock(intel_dp);
1938 /* Enable backlight PWM and backlight PP control. */
1939 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1941 if (!is_edp(intel_dp))
1944 DRM_DEBUG_KMS("\n");
1946 intel_panel_enable_backlight(intel_dp->attached_connector);
1947 _intel_edp_backlight_on(intel_dp);
1950 /* Disable backlight in the panel power control. */
1951 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1953 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1954 struct drm_i915_private *dev_priv = dev->dev_private;
1958 if (!is_edp(intel_dp))
1963 pp = ironlake_get_pp_control(intel_dp);
1964 pp &= ~EDP_BLC_ENABLE;
1966 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1968 I915_WRITE(pp_ctrl_reg, pp);
1969 POSTING_READ(pp_ctrl_reg);
1971 pps_unlock(intel_dp);
1973 intel_dp->last_backlight_off = jiffies;
1974 edp_wait_backlight_off(intel_dp);
1977 /* Disable backlight PP control and backlight PWM. */
1978 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1980 if (!is_edp(intel_dp))
1983 DRM_DEBUG_KMS("\n");
1985 _intel_edp_backlight_off(intel_dp);
1986 intel_panel_disable_backlight(intel_dp->attached_connector);
1990 * Hook for controlling the panel power control backlight through the bl_power
1991 * sysfs attribute. Take care to handle multiple calls.
1993 static void intel_edp_backlight_power(struct intel_connector *connector,
1996 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2000 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2001 pps_unlock(intel_dp);
2003 if (is_enabled == enable)
2006 DRM_DEBUG_KMS("panel power control backlight %s\n",
2007 enable ? "enable" : "disable");
2010 _intel_edp_backlight_on(intel_dp);
2012 _intel_edp_backlight_off(intel_dp);
2015 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2017 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2018 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2019 struct drm_device *dev = crtc->dev;
2020 struct drm_i915_private *dev_priv = dev->dev_private;
2023 assert_pipe_disabled(dev_priv,
2024 to_intel_crtc(crtc)->pipe);
2026 DRM_DEBUG_KMS("\n");
2027 dpa_ctl = I915_READ(DP_A);
2028 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2029 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2031 /* We don't adjust intel_dp->DP while tearing down the link, to
2032 * facilitate link retraining (e.g. after hotplug). Hence clear all
2033 * enable bits here to ensure that we don't enable too much. */
2034 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2035 intel_dp->DP |= DP_PLL_ENABLE;
2036 I915_WRITE(DP_A, intel_dp->DP);
2041 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2043 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2044 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2045 struct drm_device *dev = crtc->dev;
2046 struct drm_i915_private *dev_priv = dev->dev_private;
2049 assert_pipe_disabled(dev_priv,
2050 to_intel_crtc(crtc)->pipe);
2052 dpa_ctl = I915_READ(DP_A);
2053 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2054 "dp pll off, should be on\n");
2055 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2057 /* We can't rely on the value tracked for the DP register in
2058 * intel_dp->DP because link_down must not change that (otherwise link
2059 * re-training will fail. */
2060 dpa_ctl &= ~DP_PLL_ENABLE;
2061 I915_WRITE(DP_A, dpa_ctl);
2066 /* If the sink supports it, try to set the power state appropriately */
2067 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2071 /* Should have a valid DPCD by this point */
2072 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2075 if (mode != DRM_MODE_DPMS_ON) {
2076 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2080 * When turning on, we need to retry for 1ms to give the sink
2083 for (i = 0; i < 3; i++) {
2084 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2093 DRM_DEBUG_KMS("failed to %s sink power state\n",
2094 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2097 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2100 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2101 enum port port = dp_to_dig_port(intel_dp)->port;
2102 struct drm_device *dev = encoder->base.dev;
2103 struct drm_i915_private *dev_priv = dev->dev_private;
2104 enum intel_display_power_domain power_domain;
2107 power_domain = intel_display_port_power_domain(encoder);
2108 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2111 tmp = I915_READ(intel_dp->output_reg);
2113 if (!(tmp & DP_PORT_EN))
2116 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2117 *pipe = PORT_TO_PIPE_CPT(tmp);
2118 } else if (IS_CHERRYVIEW(dev)) {
2119 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2120 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2121 *pipe = PORT_TO_PIPE(tmp);
2127 switch (intel_dp->output_reg) {
2129 trans_sel = TRANS_DP_PORT_SEL_B;
2132 trans_sel = TRANS_DP_PORT_SEL_C;
2135 trans_sel = TRANS_DP_PORT_SEL_D;
2141 for_each_pipe(dev_priv, i) {
2142 trans_dp = I915_READ(TRANS_DP_CTL(i));
2143 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2149 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2150 intel_dp->output_reg);
2156 static void intel_dp_get_config(struct intel_encoder *encoder,
2157 struct intel_crtc_state *pipe_config)
2159 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2161 struct drm_device *dev = encoder->base.dev;
2162 struct drm_i915_private *dev_priv = dev->dev_private;
2163 enum port port = dp_to_dig_port(intel_dp)->port;
2164 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2167 tmp = I915_READ(intel_dp->output_reg);
2168 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2169 pipe_config->has_audio = true;
2171 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2172 if (tmp & DP_SYNC_HS_HIGH)
2173 flags |= DRM_MODE_FLAG_PHSYNC;
2175 flags |= DRM_MODE_FLAG_NHSYNC;
2177 if (tmp & DP_SYNC_VS_HIGH)
2178 flags |= DRM_MODE_FLAG_PVSYNC;
2180 flags |= DRM_MODE_FLAG_NVSYNC;
2182 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2183 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2184 flags |= DRM_MODE_FLAG_PHSYNC;
2186 flags |= DRM_MODE_FLAG_NHSYNC;
2188 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2189 flags |= DRM_MODE_FLAG_PVSYNC;
2191 flags |= DRM_MODE_FLAG_NVSYNC;
2194 pipe_config->base.adjusted_mode.flags |= flags;
2196 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2197 tmp & DP_COLOR_RANGE_16_235)
2198 pipe_config->limited_color_range = true;
2200 pipe_config->has_dp_encoder = true;
2202 intel_dp_get_m_n(crtc, pipe_config);
2204 if (port == PORT_A) {
2205 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2206 pipe_config->port_clock = 162000;
2208 pipe_config->port_clock = 270000;
2211 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2212 &pipe_config->dp_m_n);
2214 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2215 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2217 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2219 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2220 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2222 * This is a big fat ugly hack.
2224 * Some machines in UEFI boot mode provide us a VBT that has 18
2225 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2226 * unknown we fail to light up. Yet the same BIOS boots up with
2227 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2228 * max, not what it tells us to use.
2230 * Note: This will still be broken if the eDP panel is not lit
2231 * up by the BIOS, and thus we can't get the mode at module
2234 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2235 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2236 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2240 static void intel_disable_dp(struct intel_encoder *encoder)
2242 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2243 struct drm_device *dev = encoder->base.dev;
2244 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2246 if (crtc->config->has_audio)
2247 intel_audio_codec_disable(encoder);
2249 if (HAS_PSR(dev) && !HAS_DDI(dev))
2250 intel_psr_disable(intel_dp);
2252 /* Make sure the panel is off before trying to change the mode. But also
2253 * ensure that we have vdd while we switch off the panel. */
2254 intel_edp_panel_vdd_on(intel_dp);
2255 intel_edp_backlight_off(intel_dp);
2256 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2257 intel_edp_panel_off(intel_dp);
2259 /* disable the port before the pipe on g4x */
2260 if (INTEL_INFO(dev)->gen < 5)
2261 intel_dp_link_down(intel_dp);
2264 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2266 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2267 enum port port = dp_to_dig_port(intel_dp)->port;
2269 intel_dp_link_down(intel_dp);
2271 ironlake_edp_pll_off(intel_dp);
2274 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2276 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2278 intel_dp_link_down(intel_dp);
2281 static void chv_post_disable_dp(struct intel_encoder *encoder)
2283 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2284 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2285 struct drm_device *dev = encoder->base.dev;
2286 struct drm_i915_private *dev_priv = dev->dev_private;
2287 struct intel_crtc *intel_crtc =
2288 to_intel_crtc(encoder->base.crtc);
2289 enum dpio_channel ch = vlv_dport_to_channel(dport);
2290 enum pipe pipe = intel_crtc->pipe;
2293 intel_dp_link_down(intel_dp);
2295 mutex_lock(&dev_priv->dpio_lock);
2297 /* Propagate soft reset to data lane reset */
2298 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2299 val |= CHV_PCS_REQ_SOFTRESET_EN;
2300 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2302 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2303 val |= CHV_PCS_REQ_SOFTRESET_EN;
2304 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2306 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2307 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2308 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2310 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2311 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2312 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2314 mutex_unlock(&dev_priv->dpio_lock);
2318 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2320 uint8_t dp_train_pat)
2322 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2323 struct drm_device *dev = intel_dig_port->base.base.dev;
2324 struct drm_i915_private *dev_priv = dev->dev_private;
2325 enum port port = intel_dig_port->port;
2328 uint32_t temp = I915_READ(DP_TP_CTL(port));
2330 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2331 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2333 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2335 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2336 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2337 case DP_TRAINING_PATTERN_DISABLE:
2338 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2341 case DP_TRAINING_PATTERN_1:
2342 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2344 case DP_TRAINING_PATTERN_2:
2345 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2347 case DP_TRAINING_PATTERN_3:
2348 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2351 I915_WRITE(DP_TP_CTL(port), temp);
2353 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2354 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2356 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2357 case DP_TRAINING_PATTERN_DISABLE:
2358 *DP |= DP_LINK_TRAIN_OFF_CPT;
2360 case DP_TRAINING_PATTERN_1:
2361 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2363 case DP_TRAINING_PATTERN_2:
2364 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2366 case DP_TRAINING_PATTERN_3:
2367 DRM_ERROR("DP training pattern 3 not supported\n");
2368 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2373 if (IS_CHERRYVIEW(dev))
2374 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2376 *DP &= ~DP_LINK_TRAIN_MASK;
2378 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2379 case DP_TRAINING_PATTERN_DISABLE:
2380 *DP |= DP_LINK_TRAIN_OFF;
2382 case DP_TRAINING_PATTERN_1:
2383 *DP |= DP_LINK_TRAIN_PAT_1;
2385 case DP_TRAINING_PATTERN_2:
2386 *DP |= DP_LINK_TRAIN_PAT_2;
2388 case DP_TRAINING_PATTERN_3:
2389 if (IS_CHERRYVIEW(dev)) {
2390 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2392 DRM_ERROR("DP training pattern 3 not supported\n");
2393 *DP |= DP_LINK_TRAIN_PAT_2;
2400 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2402 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2403 struct drm_i915_private *dev_priv = dev->dev_private;
2405 /* enable with pattern 1 (as per spec) */
2406 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2407 DP_TRAINING_PATTERN_1);
2409 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2410 POSTING_READ(intel_dp->output_reg);
2413 * Magic for VLV/CHV. We _must_ first set up the register
2414 * without actually enabling the port, and then do another
2415 * write to enable the port. Otherwise link training will
2416 * fail when the power sequencer is freshly used for this port.
2418 intel_dp->DP |= DP_PORT_EN;
2420 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2421 POSTING_READ(intel_dp->output_reg);
2424 static void intel_enable_dp(struct intel_encoder *encoder)
2426 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2427 struct drm_device *dev = encoder->base.dev;
2428 struct drm_i915_private *dev_priv = dev->dev_private;
2429 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2430 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2432 if (WARN_ON(dp_reg & DP_PORT_EN))
2437 if (IS_VALLEYVIEW(dev))
2438 vlv_init_panel_power_sequencer(intel_dp);
2440 intel_dp_enable_port(intel_dp);
2442 edp_panel_vdd_on(intel_dp);
2443 edp_panel_on(intel_dp);
2444 edp_panel_vdd_off(intel_dp, true);
2446 pps_unlock(intel_dp);
2448 if (IS_VALLEYVIEW(dev))
2449 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2451 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2452 intel_dp_start_link_train(intel_dp);
2453 intel_dp_complete_link_train(intel_dp);
2454 intel_dp_stop_link_train(intel_dp);
2456 if (crtc->config->has_audio) {
2457 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2458 pipe_name(crtc->pipe));
2459 intel_audio_codec_enable(encoder);
2463 static void g4x_enable_dp(struct intel_encoder *encoder)
2465 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2467 intel_enable_dp(encoder);
2468 intel_edp_backlight_on(intel_dp);
2471 static void vlv_enable_dp(struct intel_encoder *encoder)
2473 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2475 intel_edp_backlight_on(intel_dp);
2476 intel_psr_enable(intel_dp);
2479 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2481 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2482 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2484 intel_dp_prepare(encoder);
2486 /* Only ilk+ has port A */
2487 if (dport->port == PORT_A) {
2488 ironlake_set_pll_cpu_edp(intel_dp);
2489 ironlake_edp_pll_on(intel_dp);
2493 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2496 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2497 enum pipe pipe = intel_dp->pps_pipe;
2498 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2500 edp_panel_vdd_off_sync(intel_dp);
2503 * VLV seems to get confused when multiple power seqeuencers
2504 * have the same port selected (even if only one has power/vdd
2505 * enabled). The failure manifests as vlv_wait_port_ready() failing
2506 * CHV on the other hand doesn't seem to mind having the same port
2507 * selected in multiple power seqeuencers, but let's clear the
2508 * port select always when logically disconnecting a power sequencer
2511 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2512 pipe_name(pipe), port_name(intel_dig_port->port));
2513 I915_WRITE(pp_on_reg, 0);
2514 POSTING_READ(pp_on_reg);
2516 intel_dp->pps_pipe = INVALID_PIPE;
2519 static void vlv_steal_power_sequencer(struct drm_device *dev,
2522 struct drm_i915_private *dev_priv = dev->dev_private;
2523 struct intel_encoder *encoder;
2525 lockdep_assert_held(&dev_priv->pps_mutex);
2527 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2530 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2532 struct intel_dp *intel_dp;
2535 if (encoder->type != INTEL_OUTPUT_EDP)
2538 intel_dp = enc_to_intel_dp(&encoder->base);
2539 port = dp_to_dig_port(intel_dp)->port;
2541 if (intel_dp->pps_pipe != pipe)
2544 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2545 pipe_name(pipe), port_name(port));
2547 WARN(encoder->connectors_active,
2548 "stealing pipe %c power sequencer from active eDP port %c\n",
2549 pipe_name(pipe), port_name(port));
2551 /* make sure vdd is off before we steal it */
2552 vlv_detach_power_sequencer(intel_dp);
2556 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2558 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2559 struct intel_encoder *encoder = &intel_dig_port->base;
2560 struct drm_device *dev = encoder->base.dev;
2561 struct drm_i915_private *dev_priv = dev->dev_private;
2562 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2564 lockdep_assert_held(&dev_priv->pps_mutex);
2566 if (!is_edp(intel_dp))
2569 if (intel_dp->pps_pipe == crtc->pipe)
2573 * If another power sequencer was being used on this
2574 * port previously make sure to turn off vdd there while
2575 * we still have control of it.
2577 if (intel_dp->pps_pipe != INVALID_PIPE)
2578 vlv_detach_power_sequencer(intel_dp);
2581 * We may be stealing the power
2582 * sequencer from another port.
2584 vlv_steal_power_sequencer(dev, crtc->pipe);
2586 /* now it's all ours */
2587 intel_dp->pps_pipe = crtc->pipe;
2589 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2590 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2592 /* init power sequencer on this pipe and port */
2593 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2594 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2597 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2599 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2600 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2601 struct drm_device *dev = encoder->base.dev;
2602 struct drm_i915_private *dev_priv = dev->dev_private;
2603 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2604 enum dpio_channel port = vlv_dport_to_channel(dport);
2605 int pipe = intel_crtc->pipe;
2608 mutex_lock(&dev_priv->dpio_lock);
2610 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2617 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2618 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2619 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2621 mutex_unlock(&dev_priv->dpio_lock);
2623 intel_enable_dp(encoder);
2626 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2628 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2629 struct drm_device *dev = encoder->base.dev;
2630 struct drm_i915_private *dev_priv = dev->dev_private;
2631 struct intel_crtc *intel_crtc =
2632 to_intel_crtc(encoder->base.crtc);
2633 enum dpio_channel port = vlv_dport_to_channel(dport);
2634 int pipe = intel_crtc->pipe;
2636 intel_dp_prepare(encoder);
2638 /* Program Tx lane resets to default */
2639 mutex_lock(&dev_priv->dpio_lock);
2640 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2641 DPIO_PCS_TX_LANE2_RESET |
2642 DPIO_PCS_TX_LANE1_RESET);
2643 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2644 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2645 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2646 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2647 DPIO_PCS_CLK_SOFT_RESET);
2649 /* Fix up inter-pair skew failure */
2650 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2651 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2652 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2653 mutex_unlock(&dev_priv->dpio_lock);
2656 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2658 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2659 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2660 struct drm_device *dev = encoder->base.dev;
2661 struct drm_i915_private *dev_priv = dev->dev_private;
2662 struct intel_crtc *intel_crtc =
2663 to_intel_crtc(encoder->base.crtc);
2664 enum dpio_channel ch = vlv_dport_to_channel(dport);
2665 int pipe = intel_crtc->pipe;
2669 mutex_lock(&dev_priv->dpio_lock);
2671 /* allow hardware to manage TX FIFO reset source */
2672 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2673 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2674 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2676 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2677 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2678 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2680 /* Deassert soft data lane reset*/
2681 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2682 val |= CHV_PCS_REQ_SOFTRESET_EN;
2683 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2685 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2686 val |= CHV_PCS_REQ_SOFTRESET_EN;
2687 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2689 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2690 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2691 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2693 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2694 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2695 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2697 /* Program Tx lane latency optimal setting*/
2698 for (i = 0; i < 4; i++) {
2699 /* Set the latency optimal bit */
2700 data = (i == 1) ? 0x0 : 0x6;
2701 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2702 data << DPIO_FRC_LATENCY_SHFIT);
2704 /* Set the upar bit */
2705 data = (i == 1) ? 0x0 : 0x1;
2706 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2707 data << DPIO_UPAR_SHIFT);
2710 /* Data lane stagger programming */
2711 /* FIXME: Fix up value only after power analysis */
2713 mutex_unlock(&dev_priv->dpio_lock);
2715 intel_enable_dp(encoder);
2718 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2720 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2721 struct drm_device *dev = encoder->base.dev;
2722 struct drm_i915_private *dev_priv = dev->dev_private;
2723 struct intel_crtc *intel_crtc =
2724 to_intel_crtc(encoder->base.crtc);
2725 enum dpio_channel ch = vlv_dport_to_channel(dport);
2726 enum pipe pipe = intel_crtc->pipe;
2729 intel_dp_prepare(encoder);
2731 mutex_lock(&dev_priv->dpio_lock);
2733 /* program left/right clock distribution */
2734 if (pipe != PIPE_B) {
2735 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2736 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2738 val |= CHV_BUFLEFTENA1_FORCE;
2740 val |= CHV_BUFRIGHTENA1_FORCE;
2741 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2743 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2744 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2746 val |= CHV_BUFLEFTENA2_FORCE;
2748 val |= CHV_BUFRIGHTENA2_FORCE;
2749 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2752 /* program clock channel usage */
2753 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2754 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2756 val &= ~CHV_PCS_USEDCLKCHANNEL;
2758 val |= CHV_PCS_USEDCLKCHANNEL;
2759 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2761 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2762 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2764 val &= ~CHV_PCS_USEDCLKCHANNEL;
2766 val |= CHV_PCS_USEDCLKCHANNEL;
2767 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2770 * This a a bit weird since generally CL
2771 * matches the pipe, but here we need to
2772 * pick the CL based on the port.
2774 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2776 val &= ~CHV_CMN_USEDCLKCHANNEL;
2778 val |= CHV_CMN_USEDCLKCHANNEL;
2779 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2781 mutex_unlock(&dev_priv->dpio_lock);
2785 * Native read with retry for link status and receiver capability reads for
2786 * cases where the sink may still be asleep.
2788 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2789 * supposed to retry 3 times per the spec.
2792 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2793 void *buffer, size_t size)
2799 * Sometime we just get the same incorrect byte repeated
2800 * over the entire buffer. Doing just one throw away read
2801 * initially seems to "solve" it.
2803 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2805 for (i = 0; i < 3; i++) {
2806 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2816 * Fetch AUX CH registers 0x202 - 0x207 which contain
2817 * link status information
2820 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2822 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2825 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2828 /* These are source-specific values. */
2830 intel_dp_voltage_max(struct intel_dp *intel_dp)
2832 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2833 struct drm_i915_private *dev_priv = dev->dev_private;
2834 enum port port = dp_to_dig_port(intel_dp)->port;
2836 if (INTEL_INFO(dev)->gen >= 9) {
2837 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2838 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2839 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2840 } else if (IS_VALLEYVIEW(dev))
2841 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2842 else if (IS_GEN7(dev) && port == PORT_A)
2843 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2844 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2845 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2847 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2851 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2853 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2854 enum port port = dp_to_dig_port(intel_dp)->port;
2856 if (INTEL_INFO(dev)->gen >= 9) {
2857 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2859 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2860 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2861 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2863 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2864 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2865 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2867 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2869 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2870 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2872 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2873 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2874 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2876 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2879 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2881 } else if (IS_VALLEYVIEW(dev)) {
2882 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2884 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2885 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2886 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2887 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2888 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2889 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2891 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2893 } else if (IS_GEN7(dev) && port == PORT_A) {
2894 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2895 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2896 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2897 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2898 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2899 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2901 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2904 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2905 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2906 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2907 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2908 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2909 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2910 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2911 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2913 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2918 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2920 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2921 struct drm_i915_private *dev_priv = dev->dev_private;
2922 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2923 struct intel_crtc *intel_crtc =
2924 to_intel_crtc(dport->base.base.crtc);
2925 unsigned long demph_reg_value, preemph_reg_value,
2926 uniqtranscale_reg_value;
2927 uint8_t train_set = intel_dp->train_set[0];
2928 enum dpio_channel port = vlv_dport_to_channel(dport);
2929 int pipe = intel_crtc->pipe;
2931 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2932 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2933 preemph_reg_value = 0x0004000;
2934 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2936 demph_reg_value = 0x2B405555;
2937 uniqtranscale_reg_value = 0x552AB83A;
2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2940 demph_reg_value = 0x2B404040;
2941 uniqtranscale_reg_value = 0x5548B83A;
2943 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2944 demph_reg_value = 0x2B245555;
2945 uniqtranscale_reg_value = 0x5560B83A;
2947 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2948 demph_reg_value = 0x2B405555;
2949 uniqtranscale_reg_value = 0x5598DA3A;
2955 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2956 preemph_reg_value = 0x0002000;
2957 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2958 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2959 demph_reg_value = 0x2B404040;
2960 uniqtranscale_reg_value = 0x5552B83A;
2962 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2963 demph_reg_value = 0x2B404848;
2964 uniqtranscale_reg_value = 0x5580B83A;
2966 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2967 demph_reg_value = 0x2B404040;
2968 uniqtranscale_reg_value = 0x55ADDA3A;
2974 case DP_TRAIN_PRE_EMPH_LEVEL_2:
2975 preemph_reg_value = 0x0000000;
2976 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2978 demph_reg_value = 0x2B305555;
2979 uniqtranscale_reg_value = 0x5570B83A;
2981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2982 demph_reg_value = 0x2B2B4040;
2983 uniqtranscale_reg_value = 0x55ADDA3A;
2989 case DP_TRAIN_PRE_EMPH_LEVEL_3:
2990 preemph_reg_value = 0x0006000;
2991 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2992 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2993 demph_reg_value = 0x1B405555;
2994 uniqtranscale_reg_value = 0x55ADDA3A;
3004 mutex_lock(&dev_priv->dpio_lock);
3005 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3006 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3007 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3008 uniqtranscale_reg_value);
3009 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3010 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3011 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3012 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3013 mutex_unlock(&dev_priv->dpio_lock);
3018 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3020 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3021 struct drm_i915_private *dev_priv = dev->dev_private;
3022 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3023 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3024 u32 deemph_reg_value, margin_reg_value, val;
3025 uint8_t train_set = intel_dp->train_set[0];
3026 enum dpio_channel ch = vlv_dport_to_channel(dport);
3027 enum pipe pipe = intel_crtc->pipe;
3030 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3031 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3032 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3034 deemph_reg_value = 128;
3035 margin_reg_value = 52;
3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3038 deemph_reg_value = 128;
3039 margin_reg_value = 77;
3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3042 deemph_reg_value = 128;
3043 margin_reg_value = 102;
3045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3046 deemph_reg_value = 128;
3047 margin_reg_value = 154;
3048 /* FIXME extra to set for 1200 */
3054 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3055 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3057 deemph_reg_value = 85;
3058 margin_reg_value = 78;
3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3061 deemph_reg_value = 85;
3062 margin_reg_value = 116;
3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3065 deemph_reg_value = 85;
3066 margin_reg_value = 154;
3072 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3073 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3074 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3075 deemph_reg_value = 64;
3076 margin_reg_value = 104;
3078 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3079 deemph_reg_value = 64;
3080 margin_reg_value = 154;
3086 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3087 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3088 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3089 deemph_reg_value = 43;
3090 margin_reg_value = 154;
3100 mutex_lock(&dev_priv->dpio_lock);
3102 /* Clear calc init */
3103 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3104 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3105 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3106 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3107 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3109 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3110 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3111 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3112 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3113 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3115 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3116 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3117 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3118 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3120 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3121 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3122 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3123 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3125 /* Program swing deemph */
3126 for (i = 0; i < 4; i++) {
3127 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3128 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3129 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3130 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3133 /* Program swing margin */
3134 for (i = 0; i < 4; i++) {
3135 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3136 val &= ~DPIO_SWING_MARGIN000_MASK;
3137 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3138 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3141 /* Disable unique transition scale */
3142 for (i = 0; i < 4; i++) {
3143 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3144 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3145 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3148 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3149 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3150 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3151 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3154 * The document said it needs to set bit 27 for ch0 and bit 26
3155 * for ch1. Might be a typo in the doc.
3156 * For now, for this unique transition scale selection, set bit
3157 * 27 for ch0 and ch1.
3159 for (i = 0; i < 4; i++) {
3160 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3161 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3162 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3165 for (i = 0; i < 4; i++) {
3166 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3167 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3168 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3169 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3173 /* Start swing calculation */
3174 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3175 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3176 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3178 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3179 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3180 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3183 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3184 val |= DPIO_LRC_BYPASS;
3185 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3187 mutex_unlock(&dev_priv->dpio_lock);
3193 intel_get_adjust_train(struct intel_dp *intel_dp,
3194 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3199 uint8_t voltage_max;
3200 uint8_t preemph_max;
3202 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3203 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3204 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3212 voltage_max = intel_dp_voltage_max(intel_dp);
3213 if (v >= voltage_max)
3214 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3216 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3217 if (p >= preemph_max)
3218 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3220 for (lane = 0; lane < 4; lane++)
3221 intel_dp->train_set[lane] = v | p;
3225 intel_gen4_signal_levels(uint8_t train_set)
3227 uint32_t signal_levels = 0;
3229 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3232 signal_levels |= DP_VOLTAGE_0_4;
3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3235 signal_levels |= DP_VOLTAGE_0_6;
3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3238 signal_levels |= DP_VOLTAGE_0_8;
3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3241 signal_levels |= DP_VOLTAGE_1_2;
3244 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3245 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3247 signal_levels |= DP_PRE_EMPHASIS_0;
3249 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3250 signal_levels |= DP_PRE_EMPHASIS_3_5;
3252 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3253 signal_levels |= DP_PRE_EMPHASIS_6;
3255 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3256 signal_levels |= DP_PRE_EMPHASIS_9_5;
3259 return signal_levels;
3262 /* Gen6's DP voltage swing and pre-emphasis control */
3264 intel_gen6_edp_signal_levels(uint8_t train_set)
3266 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3267 DP_TRAIN_PRE_EMPHASIS_MASK);
3268 switch (signal_levels) {
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3271 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3273 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3276 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3279 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3282 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3284 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3285 "0x%x\n", signal_levels);
3286 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3290 /* Gen7's DP voltage swing and pre-emphasis control */
3292 intel_gen7_edp_signal_levels(uint8_t train_set)
3294 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3295 DP_TRAIN_PRE_EMPHASIS_MASK);
3296 switch (signal_levels) {
3297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3298 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3300 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3302 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3305 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3307 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3310 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3312 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3315 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3316 "0x%x\n", signal_levels);
3317 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3321 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3323 intel_hsw_signal_levels(uint8_t train_set)
3325 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3326 DP_TRAIN_PRE_EMPHASIS_MASK);
3327 switch (signal_levels) {
3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3329 return DDI_BUF_TRANS_SELECT(0);
3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3331 return DDI_BUF_TRANS_SELECT(1);
3332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3333 return DDI_BUF_TRANS_SELECT(2);
3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3335 return DDI_BUF_TRANS_SELECT(3);
3337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3338 return DDI_BUF_TRANS_SELECT(4);
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3340 return DDI_BUF_TRANS_SELECT(5);
3341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3342 return DDI_BUF_TRANS_SELECT(6);
3344 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3345 return DDI_BUF_TRANS_SELECT(7);
3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3347 return DDI_BUF_TRANS_SELECT(8);
3349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3350 return DDI_BUF_TRANS_SELECT(9);
3352 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3353 "0x%x\n", signal_levels);
3354 return DDI_BUF_TRANS_SELECT(0);
3358 /* Properly updates "DP" with the correct signal levels. */
3360 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3362 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3363 enum port port = intel_dig_port->port;
3364 struct drm_device *dev = intel_dig_port->base.base.dev;
3365 uint32_t signal_levels, mask;
3366 uint8_t train_set = intel_dp->train_set[0];
3368 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3369 signal_levels = intel_hsw_signal_levels(train_set);
3370 mask = DDI_BUF_EMP_MASK;
3371 } else if (IS_CHERRYVIEW(dev)) {
3372 signal_levels = intel_chv_signal_levels(intel_dp);
3374 } else if (IS_VALLEYVIEW(dev)) {
3375 signal_levels = intel_vlv_signal_levels(intel_dp);
3377 } else if (IS_GEN7(dev) && port == PORT_A) {
3378 signal_levels = intel_gen7_edp_signal_levels(train_set);
3379 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3380 } else if (IS_GEN6(dev) && port == PORT_A) {
3381 signal_levels = intel_gen6_edp_signal_levels(train_set);
3382 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3384 signal_levels = intel_gen4_signal_levels(train_set);
3385 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3388 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3390 *DP = (*DP & ~mask) | signal_levels;
3394 intel_dp_set_link_train(struct intel_dp *intel_dp,
3396 uint8_t dp_train_pat)
3398 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3399 struct drm_device *dev = intel_dig_port->base.base.dev;
3400 struct drm_i915_private *dev_priv = dev->dev_private;
3401 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3404 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3406 I915_WRITE(intel_dp->output_reg, *DP);
3407 POSTING_READ(intel_dp->output_reg);
3409 buf[0] = dp_train_pat;
3410 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3411 DP_TRAINING_PATTERN_DISABLE) {
3412 /* don't write DP_TRAINING_LANEx_SET on disable */
3415 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3416 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3417 len = intel_dp->lane_count + 1;
3420 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3427 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3428 uint8_t dp_train_pat)
3430 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3431 intel_dp_set_signal_levels(intel_dp, DP);
3432 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3436 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3437 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3439 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3440 struct drm_device *dev = intel_dig_port->base.base.dev;
3441 struct drm_i915_private *dev_priv = dev->dev_private;
3444 intel_get_adjust_train(intel_dp, link_status);
3445 intel_dp_set_signal_levels(intel_dp, DP);
3447 I915_WRITE(intel_dp->output_reg, *DP);
3448 POSTING_READ(intel_dp->output_reg);
3450 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3451 intel_dp->train_set, intel_dp->lane_count);
3453 return ret == intel_dp->lane_count;
3456 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3458 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3459 struct drm_device *dev = intel_dig_port->base.base.dev;
3460 struct drm_i915_private *dev_priv = dev->dev_private;
3461 enum port port = intel_dig_port->port;
3467 val = I915_READ(DP_TP_CTL(port));
3468 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3469 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3470 I915_WRITE(DP_TP_CTL(port), val);
3473 * On PORT_A we can have only eDP in SST mode. There the only reason
3474 * we need to set idle transmission mode is to work around a HW issue
3475 * where we enable the pipe while not in idle link-training mode.
3476 * In this case there is requirement to wait for a minimum number of
3477 * idle patterns to be sent.
3482 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3484 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3487 /* Enable corresponding port and start training pattern 1 */
3489 intel_dp_start_link_train(struct intel_dp *intel_dp)
3491 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3492 struct drm_device *dev = encoder->dev;
3495 int voltage_tries, loop_tries;
3496 uint32_t DP = intel_dp->DP;
3497 uint8_t link_config[2];
3500 intel_ddi_prepare_link_retrain(encoder);
3502 /* Write the link configuration data */
3503 link_config[0] = intel_dp->link_bw;
3504 link_config[1] = intel_dp->lane_count;
3505 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3506 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3507 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3508 if (intel_dp->num_supported_rates)
3509 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3510 &intel_dp->rate_select, 1);
3513 link_config[1] = DP_SET_ANSI_8B10B;
3514 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3518 /* clock recovery */
3519 if (!intel_dp_reset_link_train(intel_dp, &DP,
3520 DP_TRAINING_PATTERN_1 |
3521 DP_LINK_SCRAMBLING_DISABLE)) {
3522 DRM_ERROR("failed to enable link training\n");
3530 uint8_t link_status[DP_LINK_STATUS_SIZE];
3532 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3533 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3534 DRM_ERROR("failed to get link status\n");
3538 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3539 DRM_DEBUG_KMS("clock recovery OK\n");
3543 /* Check to see if we've tried the max voltage */
3544 for (i = 0; i < intel_dp->lane_count; i++)
3545 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3547 if (i == intel_dp->lane_count) {
3549 if (loop_tries == 5) {
3550 DRM_ERROR("too many full retries, give up\n");
3553 intel_dp_reset_link_train(intel_dp, &DP,
3554 DP_TRAINING_PATTERN_1 |
3555 DP_LINK_SCRAMBLING_DISABLE);
3560 /* Check to see if we've tried the same voltage 5 times */
3561 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3563 if (voltage_tries == 5) {
3564 DRM_ERROR("too many voltage retries, give up\n");
3569 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3571 /* Update training set as requested by target */
3572 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3573 DRM_ERROR("failed to update link training\n");
3582 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3584 bool channel_eq = false;
3585 int tries, cr_tries;
3586 uint32_t DP = intel_dp->DP;
3587 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3589 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3590 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3591 training_pattern = DP_TRAINING_PATTERN_3;
3593 /* channel equalization */
3594 if (!intel_dp_set_link_train(intel_dp, &DP,
3596 DP_LINK_SCRAMBLING_DISABLE)) {
3597 DRM_ERROR("failed to start channel equalization\n");
3605 uint8_t link_status[DP_LINK_STATUS_SIZE];
3608 DRM_ERROR("failed to train DP, aborting\n");
3612 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3613 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3614 DRM_ERROR("failed to get link status\n");
3618 /* Make sure clock is still ok */
3619 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3620 intel_dp_start_link_train(intel_dp);
3621 intel_dp_set_link_train(intel_dp, &DP,
3623 DP_LINK_SCRAMBLING_DISABLE);
3628 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3633 /* Try 5 times, then try clock recovery if that fails */
3635 intel_dp_start_link_train(intel_dp);
3636 intel_dp_set_link_train(intel_dp, &DP,
3638 DP_LINK_SCRAMBLING_DISABLE);
3644 /* Update training set as requested by target */
3645 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3646 DRM_ERROR("failed to update link training\n");
3652 intel_dp_set_idle_link_train(intel_dp);
3657 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3661 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3663 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3664 DP_TRAINING_PATTERN_DISABLE);
3668 intel_dp_link_down(struct intel_dp *intel_dp)
3670 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3671 enum port port = intel_dig_port->port;
3672 struct drm_device *dev = intel_dig_port->base.base.dev;
3673 struct drm_i915_private *dev_priv = dev->dev_private;
3674 uint32_t DP = intel_dp->DP;
3676 if (WARN_ON(HAS_DDI(dev)))
3679 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3682 DRM_DEBUG_KMS("\n");
3684 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3685 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3686 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3688 if (IS_CHERRYVIEW(dev))
3689 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3691 DP &= ~DP_LINK_TRAIN_MASK;
3692 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3694 POSTING_READ(intel_dp->output_reg);
3696 if (HAS_PCH_IBX(dev) &&
3697 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3698 /* Hardware workaround: leaving our transcoder select
3699 * set to transcoder B while it's off will prevent the
3700 * corresponding HDMI output on transcoder A.
3702 * Combine this with another hardware workaround:
3703 * transcoder select bit can only be cleared while the
3706 DP &= ~DP_PIPEB_SELECT;
3707 I915_WRITE(intel_dp->output_reg, DP);
3708 POSTING_READ(intel_dp->output_reg);
3711 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3712 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3713 POSTING_READ(intel_dp->output_reg);
3714 msleep(intel_dp->panel_power_down_delay);
3718 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3720 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3721 struct drm_device *dev = dig_port->base.base.dev;
3722 struct drm_i915_private *dev_priv = dev->dev_private;
3725 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3726 sizeof(intel_dp->dpcd)) < 0)
3727 return false; /* aux transfer failed */
3729 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3731 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3732 return false; /* DPCD not present */
3734 /* Check if the panel supports PSR */
3735 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3736 if (is_edp(intel_dp)) {
3737 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3739 sizeof(intel_dp->psr_dpcd));
3740 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3741 dev_priv->psr.sink_support = true;
3742 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3746 /* Training Pattern 3 support, both source and sink */
3747 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3748 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3749 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3750 intel_dp->use_tps3 = true;
3751 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3753 intel_dp->use_tps3 = false;
3755 /* Intermediate frequency support */
3756 if (is_edp(intel_dp) &&
3757 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3758 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3759 (rev >= 0x03)) { /* eDp v1.4 or higher */
3760 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3763 intel_dp_dpcd_read_wake(&intel_dp->aux,
3764 DP_SUPPORTED_LINK_RATES,
3766 sizeof(supported_rates));
3768 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3769 int val = le16_to_cpu(supported_rates[i]);
3774 intel_dp->supported_rates[i] = val * 200;
3776 intel_dp->num_supported_rates = i;
3778 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3779 DP_DWN_STRM_PORT_PRESENT))
3780 return true; /* native DP sink */
3782 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3783 return true; /* no per-port downstream info */
3785 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3786 intel_dp->downstream_ports,
3787 DP_MAX_DOWNSTREAM_PORTS) < 0)
3788 return false; /* downstream port status fetch failed */
3794 intel_dp_probe_oui(struct intel_dp *intel_dp)
3798 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3801 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3802 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3803 buf[0], buf[1], buf[2]);
3805 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3806 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3807 buf[0], buf[1], buf[2]);
3811 intel_dp_probe_mst(struct intel_dp *intel_dp)
3815 if (!intel_dp->can_mst)
3818 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3821 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3822 if (buf[0] & DP_MST_CAP) {
3823 DRM_DEBUG_KMS("Sink is MST capable\n");
3824 intel_dp->is_mst = true;
3826 DRM_DEBUG_KMS("Sink is not MST capable\n");
3827 intel_dp->is_mst = false;
3831 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3832 return intel_dp->is_mst;
3835 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3837 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3838 struct drm_device *dev = intel_dig_port->base.base.dev;
3839 struct intel_crtc *intel_crtc =
3840 to_intel_crtc(intel_dig_port->base.base.crtc);
3845 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3848 if (!(buf & DP_TEST_CRC_SUPPORTED))
3851 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3854 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3855 buf | DP_TEST_SINK_START) < 0)
3858 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3860 test_crc_count = buf & DP_TEST_COUNT_MASK;
3863 if (drm_dp_dpcd_readb(&intel_dp->aux,
3864 DP_TEST_SINK_MISC, &buf) < 0)
3866 intel_wait_for_vblank(dev, intel_crtc->pipe);
3867 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3869 if (attempts == 0) {
3870 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3874 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3877 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3879 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3880 buf & ~DP_TEST_SINK_START) < 0)
3887 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3889 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3890 DP_DEVICE_SERVICE_IRQ_VECTOR,
3891 sink_irq_vector, 1) == 1;
3895 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3899 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3901 sink_irq_vector, 14);
3909 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3911 /* NAK by default */
3912 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3916 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3920 if (intel_dp->is_mst) {
3925 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3929 /* check link status - esi[10] = 0x200c */
3930 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3931 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3932 intel_dp_start_link_train(intel_dp);
3933 intel_dp_complete_link_train(intel_dp);
3934 intel_dp_stop_link_train(intel_dp);
3937 DRM_DEBUG_KMS("got esi %3ph\n", esi);
3938 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3941 for (retry = 0; retry < 3; retry++) {
3943 wret = drm_dp_dpcd_write(&intel_dp->aux,
3944 DP_SINK_COUNT_ESI+1,
3951 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3953 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3961 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3962 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3963 intel_dp->is_mst = false;
3964 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3965 /* send a hotplug event */
3966 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3973 * According to DP spec
3976 * 2. Configure link according to Receiver Capabilities
3977 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3978 * 4. Check link status on receipt of hot-plug interrupt
3981 intel_dp_check_link_status(struct intel_dp *intel_dp)
3983 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3984 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3986 u8 link_status[DP_LINK_STATUS_SIZE];
3988 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3990 if (!intel_encoder->connectors_active)
3993 if (WARN_ON(!intel_encoder->base.crtc))
3996 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3999 /* Try to read receiver status if the link appears to be up */
4000 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4004 /* Now read the DPCD to see if it's actually running */
4005 if (!intel_dp_get_dpcd(intel_dp)) {
4009 /* Try to read the source of the interrupt */
4010 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4011 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4012 /* Clear interrupt source */
4013 drm_dp_dpcd_writeb(&intel_dp->aux,
4014 DP_DEVICE_SERVICE_IRQ_VECTOR,
4017 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4018 intel_dp_handle_test_request(intel_dp);
4019 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4020 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4023 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4024 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4025 intel_encoder->base.name);
4026 intel_dp_start_link_train(intel_dp);
4027 intel_dp_complete_link_train(intel_dp);
4028 intel_dp_stop_link_train(intel_dp);
4032 /* XXX this is probably wrong for multiple downstream ports */
4033 static enum drm_connector_status
4034 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4036 uint8_t *dpcd = intel_dp->dpcd;
4039 if (!intel_dp_get_dpcd(intel_dp))
4040 return connector_status_disconnected;
4042 /* if there's no downstream port, we're done */
4043 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4044 return connector_status_connected;
4046 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4047 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4048 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4051 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4053 return connector_status_unknown;
4055 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4056 : connector_status_disconnected;
4059 /* If no HPD, poke DDC gently */
4060 if (drm_probe_ddc(&intel_dp->aux.ddc))
4061 return connector_status_connected;
4063 /* Well we tried, say unknown for unreliable port types */
4064 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4065 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4066 if (type == DP_DS_PORT_TYPE_VGA ||
4067 type == DP_DS_PORT_TYPE_NON_EDID)
4068 return connector_status_unknown;
4070 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4071 DP_DWN_STRM_PORT_TYPE_MASK;
4072 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4073 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4074 return connector_status_unknown;
4077 /* Anything else is out of spec, warn and ignore */
4078 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4079 return connector_status_disconnected;
4082 static enum drm_connector_status
4083 edp_detect(struct intel_dp *intel_dp)
4085 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4086 enum drm_connector_status status;
4088 status = intel_panel_detect(dev);
4089 if (status == connector_status_unknown)
4090 status = connector_status_connected;
4095 static enum drm_connector_status
4096 ironlake_dp_detect(struct intel_dp *intel_dp)
4098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4099 struct drm_i915_private *dev_priv = dev->dev_private;
4100 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4102 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4103 return connector_status_disconnected;
4105 return intel_dp_detect_dpcd(intel_dp);
4108 static int g4x_digital_port_connected(struct drm_device *dev,
4109 struct intel_digital_port *intel_dig_port)
4111 struct drm_i915_private *dev_priv = dev->dev_private;
4114 if (IS_VALLEYVIEW(dev)) {
4115 switch (intel_dig_port->port) {
4117 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4120 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4123 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4129 switch (intel_dig_port->port) {
4131 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4134 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4137 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4144 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4149 static enum drm_connector_status
4150 g4x_dp_detect(struct intel_dp *intel_dp)
4152 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4153 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4156 /* Can't disconnect eDP, but you can close the lid... */
4157 if (is_edp(intel_dp)) {
4158 enum drm_connector_status status;
4160 status = intel_panel_detect(dev);
4161 if (status == connector_status_unknown)
4162 status = connector_status_connected;
4166 ret = g4x_digital_port_connected(dev, intel_dig_port);
4168 return connector_status_unknown;
4170 return connector_status_disconnected;
4172 return intel_dp_detect_dpcd(intel_dp);
4175 static struct edid *
4176 intel_dp_get_edid(struct intel_dp *intel_dp)
4178 struct intel_connector *intel_connector = intel_dp->attached_connector;
4180 /* use cached edid if we have one */
4181 if (intel_connector->edid) {
4183 if (IS_ERR(intel_connector->edid))
4186 return drm_edid_duplicate(intel_connector->edid);
4188 return drm_get_edid(&intel_connector->base,
4189 &intel_dp->aux.ddc);
4193 intel_dp_set_edid(struct intel_dp *intel_dp)
4195 struct intel_connector *intel_connector = intel_dp->attached_connector;
4198 edid = intel_dp_get_edid(intel_dp);
4199 intel_connector->detect_edid = edid;
4201 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4202 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4204 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4208 intel_dp_unset_edid(struct intel_dp *intel_dp)
4210 struct intel_connector *intel_connector = intel_dp->attached_connector;
4212 kfree(intel_connector->detect_edid);
4213 intel_connector->detect_edid = NULL;
4215 intel_dp->has_audio = false;
4218 static enum intel_display_power_domain
4219 intel_dp_power_get(struct intel_dp *dp)
4221 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4222 enum intel_display_power_domain power_domain;
4224 power_domain = intel_display_port_power_domain(encoder);
4225 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4227 return power_domain;
4231 intel_dp_power_put(struct intel_dp *dp,
4232 enum intel_display_power_domain power_domain)
4234 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4235 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4238 static enum drm_connector_status
4239 intel_dp_detect(struct drm_connector *connector, bool force)
4241 struct intel_dp *intel_dp = intel_attached_dp(connector);
4242 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4243 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4244 struct drm_device *dev = connector->dev;
4245 enum drm_connector_status status;
4246 enum intel_display_power_domain power_domain;
4249 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4250 connector->base.id, connector->name);
4251 intel_dp_unset_edid(intel_dp);
4253 if (intel_dp->is_mst) {
4254 /* MST devices are disconnected from a monitor POV */
4255 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4256 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4257 return connector_status_disconnected;
4260 power_domain = intel_dp_power_get(intel_dp);
4262 /* Can't disconnect eDP, but you can close the lid... */
4263 if (is_edp(intel_dp))
4264 status = edp_detect(intel_dp);
4265 else if (HAS_PCH_SPLIT(dev))
4266 status = ironlake_dp_detect(intel_dp);
4268 status = g4x_dp_detect(intel_dp);
4269 if (status != connector_status_connected)
4272 intel_dp_probe_oui(intel_dp);
4274 ret = intel_dp_probe_mst(intel_dp);
4276 /* if we are in MST mode then this connector
4277 won't appear connected or have anything with EDID on it */
4278 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4279 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4280 status = connector_status_disconnected;
4284 intel_dp_set_edid(intel_dp);
4286 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4287 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4288 status = connector_status_connected;
4291 intel_dp_power_put(intel_dp, power_domain);
4296 intel_dp_force(struct drm_connector *connector)
4298 struct intel_dp *intel_dp = intel_attached_dp(connector);
4299 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4300 enum intel_display_power_domain power_domain;
4302 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4303 connector->base.id, connector->name);
4304 intel_dp_unset_edid(intel_dp);
4306 if (connector->status != connector_status_connected)
4309 power_domain = intel_dp_power_get(intel_dp);
4311 intel_dp_set_edid(intel_dp);
4313 intel_dp_power_put(intel_dp, power_domain);
4315 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4316 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4319 static int intel_dp_get_modes(struct drm_connector *connector)
4321 struct intel_connector *intel_connector = to_intel_connector(connector);
4324 edid = intel_connector->detect_edid;
4326 int ret = intel_connector_update_modes(connector, edid);
4331 /* if eDP has no EDID, fall back to fixed mode */
4332 if (is_edp(intel_attached_dp(connector)) &&
4333 intel_connector->panel.fixed_mode) {
4334 struct drm_display_mode *mode;
4336 mode = drm_mode_duplicate(connector->dev,
4337 intel_connector->panel.fixed_mode);
4339 drm_mode_probed_add(connector, mode);
4348 intel_dp_detect_audio(struct drm_connector *connector)
4350 bool has_audio = false;
4353 edid = to_intel_connector(connector)->detect_edid;
4355 has_audio = drm_detect_monitor_audio(edid);
4361 intel_dp_set_property(struct drm_connector *connector,
4362 struct drm_property *property,
4365 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4366 struct intel_connector *intel_connector = to_intel_connector(connector);
4367 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4368 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4371 ret = drm_object_property_set_value(&connector->base, property, val);
4375 if (property == dev_priv->force_audio_property) {
4379 if (i == intel_dp->force_audio)
4382 intel_dp->force_audio = i;
4384 if (i == HDMI_AUDIO_AUTO)
4385 has_audio = intel_dp_detect_audio(connector);
4387 has_audio = (i == HDMI_AUDIO_ON);
4389 if (has_audio == intel_dp->has_audio)
4392 intel_dp->has_audio = has_audio;
4396 if (property == dev_priv->broadcast_rgb_property) {
4397 bool old_auto = intel_dp->color_range_auto;
4398 uint32_t old_range = intel_dp->color_range;
4401 case INTEL_BROADCAST_RGB_AUTO:
4402 intel_dp->color_range_auto = true;
4404 case INTEL_BROADCAST_RGB_FULL:
4405 intel_dp->color_range_auto = false;
4406 intel_dp->color_range = 0;
4408 case INTEL_BROADCAST_RGB_LIMITED:
4409 intel_dp->color_range_auto = false;
4410 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4416 if (old_auto == intel_dp->color_range_auto &&
4417 old_range == intel_dp->color_range)
4423 if (is_edp(intel_dp) &&
4424 property == connector->dev->mode_config.scaling_mode_property) {
4425 if (val == DRM_MODE_SCALE_NONE) {
4426 DRM_DEBUG_KMS("no scaling not supported\n");
4430 if (intel_connector->panel.fitting_mode == val) {
4431 /* the eDP scaling property is not changed */
4434 intel_connector->panel.fitting_mode = val;
4442 if (intel_encoder->base.crtc)
4443 intel_crtc_restore_mode(intel_encoder->base.crtc);
4449 intel_dp_connector_destroy(struct drm_connector *connector)
4451 struct intel_connector *intel_connector = to_intel_connector(connector);
4453 kfree(intel_connector->detect_edid);
4455 if (!IS_ERR_OR_NULL(intel_connector->edid))
4456 kfree(intel_connector->edid);
4458 /* Can't call is_edp() since the encoder may have been destroyed
4460 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4461 intel_panel_fini(&intel_connector->panel);
4463 drm_connector_cleanup(connector);
4467 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4469 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4470 struct intel_dp *intel_dp = &intel_dig_port->dp;
4472 drm_dp_aux_unregister(&intel_dp->aux);
4473 intel_dp_mst_encoder_cleanup(intel_dig_port);
4474 if (is_edp(intel_dp)) {
4475 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4477 * vdd might still be enabled do to the delayed vdd off.
4478 * Make sure vdd is actually turned off here.
4481 edp_panel_vdd_off_sync(intel_dp);
4482 pps_unlock(intel_dp);
4484 if (intel_dp->edp_notifier.notifier_call) {
4485 unregister_reboot_notifier(&intel_dp->edp_notifier);
4486 intel_dp->edp_notifier.notifier_call = NULL;
4489 drm_encoder_cleanup(encoder);
4490 kfree(intel_dig_port);
4493 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4495 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4497 if (!is_edp(intel_dp))
4501 * vdd might still be enabled do to the delayed vdd off.
4502 * Make sure vdd is actually turned off here.
4504 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4506 edp_panel_vdd_off_sync(intel_dp);
4507 pps_unlock(intel_dp);
4510 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4512 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4513 struct drm_device *dev = intel_dig_port->base.base.dev;
4514 struct drm_i915_private *dev_priv = dev->dev_private;
4515 enum intel_display_power_domain power_domain;
4517 lockdep_assert_held(&dev_priv->pps_mutex);
4519 if (!edp_have_panel_vdd(intel_dp))
4523 * The VDD bit needs a power domain reference, so if the bit is
4524 * already enabled when we boot or resume, grab this reference and
4525 * schedule a vdd off, so we don't hold on to the reference
4528 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4529 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4530 intel_display_power_get(dev_priv, power_domain);
4532 edp_panel_vdd_schedule_off(intel_dp);
4535 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4537 struct intel_dp *intel_dp;
4539 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4542 intel_dp = enc_to_intel_dp(encoder);
4547 * Read out the current power sequencer assignment,
4548 * in case the BIOS did something with it.
4550 if (IS_VALLEYVIEW(encoder->dev))
4551 vlv_initial_power_sequencer_setup(intel_dp);
4553 intel_edp_panel_vdd_sanitize(intel_dp);
4555 pps_unlock(intel_dp);
4558 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4559 .dpms = intel_connector_dpms,
4560 .detect = intel_dp_detect,
4561 .force = intel_dp_force,
4562 .fill_modes = drm_helper_probe_single_connector_modes,
4563 .set_property = intel_dp_set_property,
4564 .atomic_get_property = intel_connector_atomic_get_property,
4565 .destroy = intel_dp_connector_destroy,
4566 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4569 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4570 .get_modes = intel_dp_get_modes,
4571 .mode_valid = intel_dp_mode_valid,
4572 .best_encoder = intel_best_encoder,
4575 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4576 .reset = intel_dp_encoder_reset,
4577 .destroy = intel_dp_encoder_destroy,
4581 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4587 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4589 struct intel_dp *intel_dp = &intel_dig_port->dp;
4590 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4591 struct drm_device *dev = intel_dig_port->base.base.dev;
4592 struct drm_i915_private *dev_priv = dev->dev_private;
4593 enum intel_display_power_domain power_domain;
4594 enum irqreturn ret = IRQ_NONE;
4596 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4597 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4599 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4601 * vdd off can generate a long pulse on eDP which
4602 * would require vdd on to handle it, and thus we
4603 * would end up in an endless cycle of
4604 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4606 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4607 port_name(intel_dig_port->port));
4611 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4612 port_name(intel_dig_port->port),
4613 long_hpd ? "long" : "short");
4615 power_domain = intel_display_port_power_domain(intel_encoder);
4616 intel_display_power_get(dev_priv, power_domain);
4620 if (HAS_PCH_SPLIT(dev)) {
4621 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4624 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4628 if (!intel_dp_get_dpcd(intel_dp)) {
4632 intel_dp_probe_oui(intel_dp);
4634 if (!intel_dp_probe_mst(intel_dp))
4638 if (intel_dp->is_mst) {
4639 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4643 if (!intel_dp->is_mst) {
4645 * we'll check the link status via the normal hot plug path later -
4646 * but for short hpds we should check it now
4648 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4649 intel_dp_check_link_status(intel_dp);
4650 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4658 /* if we were in MST mode, and device is not there get out of MST mode */
4659 if (intel_dp->is_mst) {
4660 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4661 intel_dp->is_mst = false;
4662 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4665 intel_display_power_put(dev_priv, power_domain);
4670 /* Return which DP Port should be selected for Transcoder DP control */
4672 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4674 struct drm_device *dev = crtc->dev;
4675 struct intel_encoder *intel_encoder;
4676 struct intel_dp *intel_dp;
4678 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4679 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4681 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4682 intel_encoder->type == INTEL_OUTPUT_EDP)
4683 return intel_dp->output_reg;
4689 /* check the VBT to see whether the eDP is on DP-D port */
4690 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4692 struct drm_i915_private *dev_priv = dev->dev_private;
4693 union child_device_config *p_child;
4695 static const short port_mapping[] = {
4696 [PORT_B] = PORT_IDPB,
4697 [PORT_C] = PORT_IDPC,
4698 [PORT_D] = PORT_IDPD,
4704 if (!dev_priv->vbt.child_dev_num)
4707 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4708 p_child = dev_priv->vbt.child_dev + i;
4710 if (p_child->common.dvo_port == port_mapping[port] &&
4711 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4712 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4719 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4721 struct intel_connector *intel_connector = to_intel_connector(connector);
4723 intel_attach_force_audio_property(connector);
4724 intel_attach_broadcast_rgb_property(connector);
4725 intel_dp->color_range_auto = true;
4727 if (is_edp(intel_dp)) {
4728 drm_mode_create_scaling_mode_property(connector->dev);
4729 drm_object_attach_property(
4731 connector->dev->mode_config.scaling_mode_property,
4732 DRM_MODE_SCALE_ASPECT);
4733 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4737 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4739 intel_dp->last_power_cycle = jiffies;
4740 intel_dp->last_power_on = jiffies;
4741 intel_dp->last_backlight_off = jiffies;
4745 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4746 struct intel_dp *intel_dp)
4748 struct drm_i915_private *dev_priv = dev->dev_private;
4749 struct edp_power_seq cur, vbt, spec,
4750 *final = &intel_dp->pps_delays;
4751 u32 pp_on, pp_off, pp_div, pp;
4752 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4754 lockdep_assert_held(&dev_priv->pps_mutex);
4756 /* already initialized? */
4757 if (final->t11_t12 != 0)
4760 if (HAS_PCH_SPLIT(dev)) {
4761 pp_ctrl_reg = PCH_PP_CONTROL;
4762 pp_on_reg = PCH_PP_ON_DELAYS;
4763 pp_off_reg = PCH_PP_OFF_DELAYS;
4764 pp_div_reg = PCH_PP_DIVISOR;
4766 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4768 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4769 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4770 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4771 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4774 /* Workaround: Need to write PP_CONTROL with the unlock key as
4775 * the very first thing. */
4776 pp = ironlake_get_pp_control(intel_dp);
4777 I915_WRITE(pp_ctrl_reg, pp);
4779 pp_on = I915_READ(pp_on_reg);
4780 pp_off = I915_READ(pp_off_reg);
4781 pp_div = I915_READ(pp_div_reg);
4783 /* Pull timing values out of registers */
4784 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4785 PANEL_POWER_UP_DELAY_SHIFT;
4787 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4788 PANEL_LIGHT_ON_DELAY_SHIFT;
4790 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4791 PANEL_LIGHT_OFF_DELAY_SHIFT;
4793 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4794 PANEL_POWER_DOWN_DELAY_SHIFT;
4796 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4797 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4799 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4800 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4802 vbt = dev_priv->vbt.edp_pps;
4804 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4805 * our hw here, which are all in 100usec. */
4806 spec.t1_t3 = 210 * 10;
4807 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4808 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4809 spec.t10 = 500 * 10;
4810 /* This one is special and actually in units of 100ms, but zero
4811 * based in the hw (so we need to add 100 ms). But the sw vbt
4812 * table multiplies it with 1000 to make it in units of 100usec,
4814 spec.t11_t12 = (510 + 100) * 10;
4816 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4817 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4819 /* Use the max of the register settings and vbt. If both are
4820 * unset, fall back to the spec limits. */
4821 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
4823 max(cur.field, vbt.field))
4824 assign_final(t1_t3);
4828 assign_final(t11_t12);
4831 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
4832 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4833 intel_dp->backlight_on_delay = get_delay(t8);
4834 intel_dp->backlight_off_delay = get_delay(t9);
4835 intel_dp->panel_power_down_delay = get_delay(t10);
4836 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4839 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4840 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4841 intel_dp->panel_power_cycle_delay);
4843 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4844 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4848 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4849 struct intel_dp *intel_dp)
4851 struct drm_i915_private *dev_priv = dev->dev_private;
4852 u32 pp_on, pp_off, pp_div, port_sel = 0;
4853 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4854 int pp_on_reg, pp_off_reg, pp_div_reg;
4855 enum port port = dp_to_dig_port(intel_dp)->port;
4856 const struct edp_power_seq *seq = &intel_dp->pps_delays;
4858 lockdep_assert_held(&dev_priv->pps_mutex);
4860 if (HAS_PCH_SPLIT(dev)) {
4861 pp_on_reg = PCH_PP_ON_DELAYS;
4862 pp_off_reg = PCH_PP_OFF_DELAYS;
4863 pp_div_reg = PCH_PP_DIVISOR;
4865 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4867 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4868 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4869 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4873 * And finally store the new values in the power sequencer. The
4874 * backlight delays are set to 1 because we do manual waits on them. For
4875 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4876 * we'll end up waiting for the backlight off delay twice: once when we
4877 * do the manual sleep, and once when we disable the panel and wait for
4878 * the PP_STATUS bit to become zero.
4880 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4881 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4882 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4883 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4884 /* Compute the divisor for the pp clock, simply match the Bspec
4886 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4887 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4888 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4890 /* Haswell doesn't have any port selection bits for the panel
4891 * power sequencer any more. */
4892 if (IS_VALLEYVIEW(dev)) {
4893 port_sel = PANEL_PORT_SELECT_VLV(port);
4894 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4896 port_sel = PANEL_PORT_SELECT_DPA;
4898 port_sel = PANEL_PORT_SELECT_DPD;
4903 I915_WRITE(pp_on_reg, pp_on);
4904 I915_WRITE(pp_off_reg, pp_off);
4905 I915_WRITE(pp_div_reg, pp_div);
4907 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4908 I915_READ(pp_on_reg),
4909 I915_READ(pp_off_reg),
4910 I915_READ(pp_div_reg));
4914 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4916 * @refresh_rate: RR to be programmed
4918 * This function gets called when refresh rate (RR) has to be changed from
4919 * one frequency to another. Switches can be between high and low RR
4920 * supported by the panel or to any other RR based on media playback (in
4921 * this case, RR value needs to be passed from user space).
4923 * The caller of this function needs to take a lock on dev_priv->drrs.
4925 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4927 struct drm_i915_private *dev_priv = dev->dev_private;
4928 struct intel_encoder *encoder;
4929 struct intel_digital_port *dig_port = NULL;
4930 struct intel_dp *intel_dp = dev_priv->drrs.dp;
4931 struct intel_crtc_state *config = NULL;
4932 struct intel_crtc *intel_crtc = NULL;
4934 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4936 if (refresh_rate <= 0) {
4937 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4941 if (intel_dp == NULL) {
4942 DRM_DEBUG_KMS("DRRS not supported.\n");
4947 * FIXME: This needs proper synchronization with psr state for some
4948 * platforms that cannot have PSR and DRRS enabled at the same time.
4951 dig_port = dp_to_dig_port(intel_dp);
4952 encoder = &dig_port->base;
4953 intel_crtc = encoder->new_crtc;
4956 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4960 config = intel_crtc->config;
4962 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4963 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4967 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4969 index = DRRS_LOW_RR;
4971 if (index == dev_priv->drrs.refresh_rate_type) {
4973 "DRRS requested for previously set RR...ignoring\n");
4977 if (!intel_crtc->active) {
4978 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4982 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4985 intel_dp_set_m_n(intel_crtc, M1_N1);
4988 intel_dp_set_m_n(intel_crtc, M2_N2);
4992 DRM_ERROR("Unsupported refreshrate type\n");
4994 } else if (INTEL_INFO(dev)->gen > 6) {
4995 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4996 val = I915_READ(reg);
4998 if (index > DRRS_HIGH_RR) {
4999 if (IS_VALLEYVIEW(dev))
5000 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5002 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5004 if (IS_VALLEYVIEW(dev))
5005 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5007 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5009 I915_WRITE(reg, val);
5012 dev_priv->drrs.refresh_rate_type = index;
5014 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5018 * intel_edp_drrs_enable - init drrs struct if supported
5019 * @intel_dp: DP struct
5021 * Initializes frontbuffer_bits and drrs.dp
5023 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5025 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5026 struct drm_i915_private *dev_priv = dev->dev_private;
5027 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5028 struct drm_crtc *crtc = dig_port->base.base.crtc;
5029 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5031 if (!intel_crtc->config->has_drrs) {
5032 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5036 mutex_lock(&dev_priv->drrs.mutex);
5037 if (WARN_ON(dev_priv->drrs.dp)) {
5038 DRM_ERROR("DRRS already enabled\n");
5042 dev_priv->drrs.busy_frontbuffer_bits = 0;
5044 dev_priv->drrs.dp = intel_dp;
5047 mutex_unlock(&dev_priv->drrs.mutex);
5051 * intel_edp_drrs_disable - Disable DRRS
5052 * @intel_dp: DP struct
5055 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5057 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5058 struct drm_i915_private *dev_priv = dev->dev_private;
5059 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5060 struct drm_crtc *crtc = dig_port->base.base.crtc;
5061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5063 if (!intel_crtc->config->has_drrs)
5066 mutex_lock(&dev_priv->drrs.mutex);
5067 if (!dev_priv->drrs.dp) {
5068 mutex_unlock(&dev_priv->drrs.mutex);
5072 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5073 intel_dp_set_drrs_state(dev_priv->dev,
5074 intel_dp->attached_connector->panel.
5075 fixed_mode->vrefresh);
5077 dev_priv->drrs.dp = NULL;
5078 mutex_unlock(&dev_priv->drrs.mutex);
5080 cancel_delayed_work_sync(&dev_priv->drrs.work);
5083 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5085 struct drm_i915_private *dev_priv =
5086 container_of(work, typeof(*dev_priv), drrs.work.work);
5087 struct intel_dp *intel_dp;
5089 mutex_lock(&dev_priv->drrs.mutex);
5091 intel_dp = dev_priv->drrs.dp;
5097 * The delayed work can race with an invalidate hence we need to
5101 if (dev_priv->drrs.busy_frontbuffer_bits)
5104 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5105 intel_dp_set_drrs_state(dev_priv->dev,
5106 intel_dp->attached_connector->panel.
5107 downclock_mode->vrefresh);
5111 mutex_unlock(&dev_priv->drrs.mutex);
5115 * intel_edp_drrs_invalidate - Invalidate DRRS
5117 * @frontbuffer_bits: frontbuffer plane tracking bits
5119 * When there is a disturbance on screen (due to cursor movement/time
5120 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5123 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5125 void intel_edp_drrs_invalidate(struct drm_device *dev,
5126 unsigned frontbuffer_bits)
5128 struct drm_i915_private *dev_priv = dev->dev_private;
5129 struct drm_crtc *crtc;
5132 if (!dev_priv->drrs.dp)
5135 cancel_delayed_work_sync(&dev_priv->drrs.work);
5137 mutex_lock(&dev_priv->drrs.mutex);
5138 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5139 pipe = to_intel_crtc(crtc)->pipe;
5141 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5142 intel_dp_set_drrs_state(dev_priv->dev,
5143 dev_priv->drrs.dp->attached_connector->panel.
5144 fixed_mode->vrefresh);
5147 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5149 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5150 mutex_unlock(&dev_priv->drrs.mutex);
5154 * intel_edp_drrs_flush - Flush DRRS
5156 * @frontbuffer_bits: frontbuffer plane tracking bits
5158 * When there is no movement on screen, DRRS work can be scheduled.
5159 * This DRRS work is responsible for setting relevant registers after a
5160 * timeout of 1 second.
5162 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5164 void intel_edp_drrs_flush(struct drm_device *dev,
5165 unsigned frontbuffer_bits)
5167 struct drm_i915_private *dev_priv = dev->dev_private;
5168 struct drm_crtc *crtc;
5171 if (!dev_priv->drrs.dp)
5174 cancel_delayed_work_sync(&dev_priv->drrs.work);
5176 mutex_lock(&dev_priv->drrs.mutex);
5177 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5178 pipe = to_intel_crtc(crtc)->pipe;
5179 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5181 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5182 !dev_priv->drrs.busy_frontbuffer_bits)
5183 schedule_delayed_work(&dev_priv->drrs.work,
5184 msecs_to_jiffies(1000));
5185 mutex_unlock(&dev_priv->drrs.mutex);
5189 * DOC: Display Refresh Rate Switching (DRRS)
5191 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5192 * which enables swtching between low and high refresh rates,
5193 * dynamically, based on the usage scenario. This feature is applicable
5194 * for internal panels.
5196 * Indication that the panel supports DRRS is given by the panel EDID, which
5197 * would list multiple refresh rates for one resolution.
5199 * DRRS is of 2 types - static and seamless.
5200 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5201 * (may appear as a blink on screen) and is used in dock-undock scenario.
5202 * Seamless DRRS involves changing RR without any visual effect to the user
5203 * and can be used during normal system usage. This is done by programming
5204 * certain registers.
5206 * Support for static/seamless DRRS may be indicated in the VBT based on
5207 * inputs from the panel spec.
5209 * DRRS saves power by switching to low RR based on usage scenarios.
5212 * The implementation is based on frontbuffer tracking implementation.
5213 * When there is a disturbance on the screen triggered by user activity or a
5214 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5215 * When there is no movement on screen, after a timeout of 1 second, a switch
5216 * to low RR is made.
5217 * For integration with frontbuffer tracking code,
5218 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5220 * DRRS can be further extended to support other internal panels and also
5221 * the scenario of video playback wherein RR is set based on the rate
5222 * requested by userspace.
5226 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5227 * @intel_connector: eDP connector
5228 * @fixed_mode: preferred mode of panel
5230 * This function is called only once at driver load to initialize basic
5234 * Downclock mode if panel supports it, else return NULL.
5235 * DRRS support is determined by the presence of downclock mode (apart
5236 * from VBT setting).
5238 static struct drm_display_mode *
5239 intel_dp_drrs_init(struct intel_connector *intel_connector,
5240 struct drm_display_mode *fixed_mode)
5242 struct drm_connector *connector = &intel_connector->base;
5243 struct drm_device *dev = connector->dev;
5244 struct drm_i915_private *dev_priv = dev->dev_private;
5245 struct drm_display_mode *downclock_mode = NULL;
5247 if (INTEL_INFO(dev)->gen <= 6) {
5248 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5252 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5253 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5257 downclock_mode = intel_find_panel_downclock
5258 (dev, fixed_mode, connector);
5260 if (!downclock_mode) {
5261 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5265 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5267 mutex_init(&dev_priv->drrs.mutex);
5269 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5271 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5272 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5273 return downclock_mode;
5276 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5277 struct intel_connector *intel_connector)
5279 struct drm_connector *connector = &intel_connector->base;
5280 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5281 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5282 struct drm_device *dev = intel_encoder->base.dev;
5283 struct drm_i915_private *dev_priv = dev->dev_private;
5284 struct drm_display_mode *fixed_mode = NULL;
5285 struct drm_display_mode *downclock_mode = NULL;
5287 struct drm_display_mode *scan;
5289 enum pipe pipe = INVALID_PIPE;
5291 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
5293 if (!is_edp(intel_dp))
5297 intel_edp_panel_vdd_sanitize(intel_dp);
5298 pps_unlock(intel_dp);
5300 /* Cache DPCD and EDID for edp. */
5301 has_dpcd = intel_dp_get_dpcd(intel_dp);
5304 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5305 dev_priv->no_aux_handshake =
5306 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5307 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5309 /* if this fails, presume the device is a ghost */
5310 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5314 /* We now know it's not a ghost, init power sequence regs. */
5316 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5317 pps_unlock(intel_dp);
5319 mutex_lock(&dev->mode_config.mutex);
5320 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5322 if (drm_add_edid_modes(connector, edid)) {
5323 drm_mode_connector_update_edid_property(connector,
5325 drm_edid_to_eld(connector, edid);
5328 edid = ERR_PTR(-EINVAL);
5331 edid = ERR_PTR(-ENOENT);
5333 intel_connector->edid = edid;
5335 /* prefer fixed mode from EDID if available */
5336 list_for_each_entry(scan, &connector->probed_modes, head) {
5337 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5338 fixed_mode = drm_mode_duplicate(dev, scan);
5339 downclock_mode = intel_dp_drrs_init(
5340 intel_connector, fixed_mode);
5345 /* fallback to VBT if available for eDP */
5346 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5347 fixed_mode = drm_mode_duplicate(dev,
5348 dev_priv->vbt.lfp_lvds_vbt_mode);
5350 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5352 mutex_unlock(&dev->mode_config.mutex);
5354 if (IS_VALLEYVIEW(dev)) {
5355 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5356 register_reboot_notifier(&intel_dp->edp_notifier);
5359 * Figure out the current pipe for the initial backlight setup.
5360 * If the current pipe isn't valid, try the PPS pipe, and if that
5361 * fails just assume pipe A.
5363 if (IS_CHERRYVIEW(dev))
5364 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5366 pipe = PORT_TO_PIPE(intel_dp->DP);
5368 if (pipe != PIPE_A && pipe != PIPE_B)
5369 pipe = intel_dp->pps_pipe;
5371 if (pipe != PIPE_A && pipe != PIPE_B)
5374 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5378 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5379 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5380 intel_panel_setup_backlight(connector, pipe);
5386 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5387 struct intel_connector *intel_connector)
5389 struct drm_connector *connector = &intel_connector->base;
5390 struct intel_dp *intel_dp = &intel_dig_port->dp;
5391 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5392 struct drm_device *dev = intel_encoder->base.dev;
5393 struct drm_i915_private *dev_priv = dev->dev_private;
5394 enum port port = intel_dig_port->port;
5397 intel_dp->pps_pipe = INVALID_PIPE;
5399 /* intel_dp vfuncs */
5400 if (INTEL_INFO(dev)->gen >= 9)
5401 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5402 else if (IS_VALLEYVIEW(dev))
5403 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5404 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5405 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5406 else if (HAS_PCH_SPLIT(dev))
5407 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5409 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5411 if (INTEL_INFO(dev)->gen >= 9)
5412 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5414 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5416 /* Preserve the current hw state. */
5417 intel_dp->DP = I915_READ(intel_dp->output_reg);
5418 intel_dp->attached_connector = intel_connector;
5420 if (intel_dp_is_edp(dev, port))
5421 type = DRM_MODE_CONNECTOR_eDP;
5423 type = DRM_MODE_CONNECTOR_DisplayPort;
5426 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5427 * for DP the encoder type can be set by the caller to
5428 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5430 if (type == DRM_MODE_CONNECTOR_eDP)
5431 intel_encoder->type = INTEL_OUTPUT_EDP;
5433 /* eDP only on port B and/or C on vlv/chv */
5434 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5435 port != PORT_B && port != PORT_C))
5438 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5439 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5442 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5443 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5445 connector->interlace_allowed = true;
5446 connector->doublescan_allowed = 0;
5448 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5449 edp_panel_vdd_work);
5451 intel_connector_attach_encoder(intel_connector, intel_encoder);
5452 drm_connector_register(connector);
5455 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5457 intel_connector->get_hw_state = intel_connector_get_hw_state;
5458 intel_connector->unregister = intel_dp_connector_unregister;
5460 /* Set up the hotplug pin. */
5463 intel_encoder->hpd_pin = HPD_PORT_A;
5466 intel_encoder->hpd_pin = HPD_PORT_B;
5469 intel_encoder->hpd_pin = HPD_PORT_C;
5472 intel_encoder->hpd_pin = HPD_PORT_D;
5478 if (is_edp(intel_dp)) {
5480 intel_dp_init_panel_power_timestamps(intel_dp);
5481 if (IS_VALLEYVIEW(dev))
5482 vlv_initial_power_sequencer_setup(intel_dp);
5484 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5485 pps_unlock(intel_dp);
5488 intel_dp_aux_init(intel_dp, intel_connector);
5490 /* init MST on ports that can support it */
5491 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5492 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5493 intel_dp_mst_encoder_init(intel_dig_port,
5494 intel_connector->base.base.id);
5498 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5499 drm_dp_aux_unregister(&intel_dp->aux);
5500 if (is_edp(intel_dp)) {
5501 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5503 * vdd might still be enabled do to the delayed vdd off.
5504 * Make sure vdd is actually turned off here.
5507 edp_panel_vdd_off_sync(intel_dp);
5508 pps_unlock(intel_dp);
5510 drm_connector_unregister(connector);
5511 drm_connector_cleanup(connector);
5515 intel_dp_add_properties(intel_dp, connector);
5517 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5518 * 0xd. Failure to do so will result in spurious interrupts being
5519 * generated on the port when a cable is not attached.
5521 if (IS_G4X(dev) && !IS_GM45(dev)) {
5522 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5523 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5530 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5532 struct drm_i915_private *dev_priv = dev->dev_private;
5533 struct intel_digital_port *intel_dig_port;
5534 struct intel_encoder *intel_encoder;
5535 struct drm_encoder *encoder;
5536 struct intel_connector *intel_connector;
5538 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5539 if (!intel_dig_port)
5542 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5543 if (!intel_connector) {
5544 kfree(intel_dig_port);
5548 intel_encoder = &intel_dig_port->base;
5549 encoder = &intel_encoder->base;
5551 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5552 DRM_MODE_ENCODER_TMDS);
5554 intel_encoder->compute_config = intel_dp_compute_config;
5555 intel_encoder->disable = intel_disable_dp;
5556 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5557 intel_encoder->get_config = intel_dp_get_config;
5558 intel_encoder->suspend = intel_dp_encoder_suspend;
5559 if (IS_CHERRYVIEW(dev)) {
5560 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5561 intel_encoder->pre_enable = chv_pre_enable_dp;
5562 intel_encoder->enable = vlv_enable_dp;
5563 intel_encoder->post_disable = chv_post_disable_dp;
5564 } else if (IS_VALLEYVIEW(dev)) {
5565 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5566 intel_encoder->pre_enable = vlv_pre_enable_dp;
5567 intel_encoder->enable = vlv_enable_dp;
5568 intel_encoder->post_disable = vlv_post_disable_dp;
5570 intel_encoder->pre_enable = g4x_pre_enable_dp;
5571 intel_encoder->enable = g4x_enable_dp;
5572 if (INTEL_INFO(dev)->gen >= 5)
5573 intel_encoder->post_disable = ilk_post_disable_dp;
5576 intel_dig_port->port = port;
5577 intel_dig_port->dp.output_reg = output_reg;
5579 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5580 if (IS_CHERRYVIEW(dev)) {
5582 intel_encoder->crtc_mask = 1 << 2;
5584 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5586 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5588 intel_encoder->cloneable = 0;
5589 intel_encoder->hot_plug = intel_dp_hot_plug;
5591 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5592 dev_priv->hpd_irq_port[port] = intel_dig_port;
5594 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5595 drm_encoder_cleanup(encoder);
5596 kfree(intel_dig_port);
5597 kfree(intel_connector);
5601 void intel_dp_mst_suspend(struct drm_device *dev)
5603 struct drm_i915_private *dev_priv = dev->dev_private;
5607 for (i = 0; i < I915_MAX_PORTS; i++) {
5608 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5609 if (!intel_dig_port)
5612 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5613 if (!intel_dig_port->dp.can_mst)
5615 if (intel_dig_port->dp.is_mst)
5616 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5621 void intel_dp_mst_resume(struct drm_device *dev)
5623 struct drm_i915_private *dev_priv = dev->dev_private;
5626 for (i = 0; i < I915_MAX_PORTS; i++) {
5627 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5628 if (!intel_dig_port)
5630 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5633 if (!intel_dig_port->dp.can_mst)
5636 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5638 intel_dp_check_mst_status(&intel_dig_port->dp);