drm/i915: Also record time difference if vblank evasion fails, v2.
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int clock;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { 162000,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { 270000,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { 162000,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { 270000,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { 162000,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { 270000,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { 270000,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { 540000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95                                   324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97                                   324000, 432000, 540000 };
98 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99                                  243000, 270000, 324000, 405000,
100                                  420000, 432000, 540000 };
101 static const int default_rates[] = { 162000, 270000, 540000 };
102
103 /**
104  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105  * @intel_dp: DP struct
106  *
107  * If a CPU or PCH DP output is attached to an eDP panel, this function
108  * will return true, and false otherwise.
109  */
110 static bool is_edp(struct intel_dp *intel_dp)
111 {
112         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
115 }
116
117 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
118 {
119         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121         return intel_dig_port->base.base.dev;
122 }
123
124 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125 {
126         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
127 }
128
129 static void intel_dp_link_down(struct intel_dp *intel_dp);
130 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
131 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
132 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
133 static void vlv_steal_power_sequencer(struct drm_device *dev,
134                                       enum pipe pipe);
135
136 static unsigned int intel_dp_unused_lane_mask(int lane_count)
137 {
138         return ~((1 << lane_count) - 1) & 0xf;
139 }
140
141 static int
142 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
143 {
144         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
145
146         switch (max_link_bw) {
147         case DP_LINK_BW_1_62:
148         case DP_LINK_BW_2_7:
149         case DP_LINK_BW_5_4:
150                 break;
151         default:
152                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
153                      max_link_bw);
154                 max_link_bw = DP_LINK_BW_1_62;
155                 break;
156         }
157         return max_link_bw;
158 }
159
160 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
161 {
162         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
163         struct drm_device *dev = intel_dig_port->base.base.dev;
164         u8 source_max, sink_max;
165
166         source_max = 4;
167         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
168             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
169                 source_max = 2;
170
171         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
172
173         return min(source_max, sink_max);
174 }
175
176 /*
177  * The units on the numbers in the next two are... bizarre.  Examples will
178  * make it clearer; this one parallels an example in the eDP spec.
179  *
180  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
181  *
182  *     270000 * 1 * 8 / 10 == 216000
183  *
184  * The actual data capacity of that configuration is 2.16Gbit/s, so the
185  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
186  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
187  * 119000.  At 18bpp that's 2142000 kilobits per second.
188  *
189  * Thus the strange-looking division by 10 in intel_dp_link_required, to
190  * get the result in decakilobits instead of kilobits.
191  */
192
193 static int
194 intel_dp_link_required(int pixel_clock, int bpp)
195 {
196         return (pixel_clock * bpp + 9) / 10;
197 }
198
199 static int
200 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
201 {
202         return (max_link_clock * max_lanes * 8) / 10;
203 }
204
205 static enum drm_mode_status
206 intel_dp_mode_valid(struct drm_connector *connector,
207                     struct drm_display_mode *mode)
208 {
209         struct intel_dp *intel_dp = intel_attached_dp(connector);
210         struct intel_connector *intel_connector = to_intel_connector(connector);
211         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
212         int target_clock = mode->clock;
213         int max_rate, mode_rate, max_lanes, max_link_clock;
214
215         if (is_edp(intel_dp) && fixed_mode) {
216                 if (mode->hdisplay > fixed_mode->hdisplay)
217                         return MODE_PANEL;
218
219                 if (mode->vdisplay > fixed_mode->vdisplay)
220                         return MODE_PANEL;
221
222                 target_clock = fixed_mode->clock;
223         }
224
225         max_link_clock = intel_dp_max_link_rate(intel_dp);
226         max_lanes = intel_dp_max_lane_count(intel_dp);
227
228         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
229         mode_rate = intel_dp_link_required(target_clock, 18);
230
231         if (mode_rate > max_rate)
232                 return MODE_CLOCK_HIGH;
233
234         if (mode->clock < 10000)
235                 return MODE_CLOCK_LOW;
236
237         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
238                 return MODE_H_ILLEGAL;
239
240         return MODE_OK;
241 }
242
243 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
244 {
245         int     i;
246         uint32_t v = 0;
247
248         if (src_bytes > 4)
249                 src_bytes = 4;
250         for (i = 0; i < src_bytes; i++)
251                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
252         return v;
253 }
254
255 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
256 {
257         int i;
258         if (dst_bytes > 4)
259                 dst_bytes = 4;
260         for (i = 0; i < dst_bytes; i++)
261                 dst[i] = src >> ((3-i) * 8);
262 }
263
264 static void
265 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
266                                     struct intel_dp *intel_dp);
267 static void
268 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
269                                               struct intel_dp *intel_dp);
270
271 static void pps_lock(struct intel_dp *intel_dp)
272 {
273         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
274         struct intel_encoder *encoder = &intel_dig_port->base;
275         struct drm_device *dev = encoder->base.dev;
276         struct drm_i915_private *dev_priv = dev->dev_private;
277         enum intel_display_power_domain power_domain;
278
279         /*
280          * See vlv_power_sequencer_reset() why we need
281          * a power domain reference here.
282          */
283         power_domain = intel_display_port_power_domain(encoder);
284         intel_display_power_get(dev_priv, power_domain);
285
286         mutex_lock(&dev_priv->pps_mutex);
287 }
288
289 static void pps_unlock(struct intel_dp *intel_dp)
290 {
291         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292         struct intel_encoder *encoder = &intel_dig_port->base;
293         struct drm_device *dev = encoder->base.dev;
294         struct drm_i915_private *dev_priv = dev->dev_private;
295         enum intel_display_power_domain power_domain;
296
297         mutex_unlock(&dev_priv->pps_mutex);
298
299         power_domain = intel_display_port_power_domain(encoder);
300         intel_display_power_put(dev_priv, power_domain);
301 }
302
303 static void
304 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
305 {
306         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
307         struct drm_device *dev = intel_dig_port->base.base.dev;
308         struct drm_i915_private *dev_priv = dev->dev_private;
309         enum pipe pipe = intel_dp->pps_pipe;
310         bool pll_enabled, release_cl_override = false;
311         enum dpio_phy phy = DPIO_PHY(pipe);
312         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
313         uint32_t DP;
314
315         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
316                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
317                  pipe_name(pipe), port_name(intel_dig_port->port)))
318                 return;
319
320         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
321                       pipe_name(pipe), port_name(intel_dig_port->port));
322
323         /* Preserve the BIOS-computed detected bit. This is
324          * supposed to be read-only.
325          */
326         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
327         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
328         DP |= DP_PORT_WIDTH(1);
329         DP |= DP_LINK_TRAIN_PAT_1;
330
331         if (IS_CHERRYVIEW(dev))
332                 DP |= DP_PIPE_SELECT_CHV(pipe);
333         else if (pipe == PIPE_B)
334                 DP |= DP_PIPEB_SELECT;
335
336         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
337
338         /*
339          * The DPLL for the pipe must be enabled for this to work.
340          * So enable temporarily it if it's not already enabled.
341          */
342         if (!pll_enabled) {
343                 release_cl_override = IS_CHERRYVIEW(dev) &&
344                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
345
346                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
347                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
348         }
349
350         /*
351          * Similar magic as in intel_dp_enable_port().
352          * We _must_ do this port enable + disable trick
353          * to make this power seqeuencer lock onto the port.
354          * Otherwise even VDD force bit won't work.
355          */
356         I915_WRITE(intel_dp->output_reg, DP);
357         POSTING_READ(intel_dp->output_reg);
358
359         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
360         POSTING_READ(intel_dp->output_reg);
361
362         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
363         POSTING_READ(intel_dp->output_reg);
364
365         if (!pll_enabled) {
366                 vlv_force_pll_off(dev, pipe);
367
368                 if (release_cl_override)
369                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
370         }
371 }
372
373 static enum pipe
374 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
375 {
376         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
377         struct drm_device *dev = intel_dig_port->base.base.dev;
378         struct drm_i915_private *dev_priv = dev->dev_private;
379         struct intel_encoder *encoder;
380         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
381         enum pipe pipe;
382
383         lockdep_assert_held(&dev_priv->pps_mutex);
384
385         /* We should never land here with regular DP ports */
386         WARN_ON(!is_edp(intel_dp));
387
388         if (intel_dp->pps_pipe != INVALID_PIPE)
389                 return intel_dp->pps_pipe;
390
391         /*
392          * We don't have power sequencer currently.
393          * Pick one that's not used by other ports.
394          */
395         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
396                             base.head) {
397                 struct intel_dp *tmp;
398
399                 if (encoder->type != INTEL_OUTPUT_EDP)
400                         continue;
401
402                 tmp = enc_to_intel_dp(&encoder->base);
403
404                 if (tmp->pps_pipe != INVALID_PIPE)
405                         pipes &= ~(1 << tmp->pps_pipe);
406         }
407
408         /*
409          * Didn't find one. This should not happen since there
410          * are two power sequencers and up to two eDP ports.
411          */
412         if (WARN_ON(pipes == 0))
413                 pipe = PIPE_A;
414         else
415                 pipe = ffs(pipes) - 1;
416
417         vlv_steal_power_sequencer(dev, pipe);
418         intel_dp->pps_pipe = pipe;
419
420         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
421                       pipe_name(intel_dp->pps_pipe),
422                       port_name(intel_dig_port->port));
423
424         /* init power sequencer on this pipe and port */
425         intel_dp_init_panel_power_sequencer(dev, intel_dp);
426         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
427
428         /*
429          * Even vdd force doesn't work until we've made
430          * the power sequencer lock in on the port.
431          */
432         vlv_power_sequencer_kick(intel_dp);
433
434         return intel_dp->pps_pipe;
435 }
436
437 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
438                                enum pipe pipe);
439
440 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
441                                enum pipe pipe)
442 {
443         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
444 }
445
446 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
447                                 enum pipe pipe)
448 {
449         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
450 }
451
452 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
453                          enum pipe pipe)
454 {
455         return true;
456 }
457
458 static enum pipe
459 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
460                      enum port port,
461                      vlv_pipe_check pipe_check)
462 {
463         enum pipe pipe;
464
465         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
466                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
467                         PANEL_PORT_SELECT_MASK;
468
469                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
470                         continue;
471
472                 if (!pipe_check(dev_priv, pipe))
473                         continue;
474
475                 return pipe;
476         }
477
478         return INVALID_PIPE;
479 }
480
481 static void
482 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
483 {
484         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
485         struct drm_device *dev = intel_dig_port->base.base.dev;
486         struct drm_i915_private *dev_priv = dev->dev_private;
487         enum port port = intel_dig_port->port;
488
489         lockdep_assert_held(&dev_priv->pps_mutex);
490
491         /* try to find a pipe with this port selected */
492         /* first pick one where the panel is on */
493         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494                                                   vlv_pipe_has_pp_on);
495         /* didn't find one? pick one where vdd is on */
496         if (intel_dp->pps_pipe == INVALID_PIPE)
497                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498                                                           vlv_pipe_has_vdd_on);
499         /* didn't find one? pick one with just the correct port */
500         if (intel_dp->pps_pipe == INVALID_PIPE)
501                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502                                                           vlv_pipe_any);
503
504         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
505         if (intel_dp->pps_pipe == INVALID_PIPE) {
506                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
507                               port_name(port));
508                 return;
509         }
510
511         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
512                       port_name(port), pipe_name(intel_dp->pps_pipe));
513
514         intel_dp_init_panel_power_sequencer(dev, intel_dp);
515         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
516 }
517
518 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
519 {
520         struct drm_device *dev = dev_priv->dev;
521         struct intel_encoder *encoder;
522
523         if (WARN_ON(!IS_VALLEYVIEW(dev)))
524                 return;
525
526         /*
527          * We can't grab pps_mutex here due to deadlock with power_domain
528          * mutex when power_domain functions are called while holding pps_mutex.
529          * That also means that in order to use pps_pipe the code needs to
530          * hold both a power domain reference and pps_mutex, and the power domain
531          * reference get/put must be done while _not_ holding pps_mutex.
532          * pps_{lock,unlock}() do these steps in the correct order, so one
533          * should use them always.
534          */
535
536         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
537                 struct intel_dp *intel_dp;
538
539                 if (encoder->type != INTEL_OUTPUT_EDP)
540                         continue;
541
542                 intel_dp = enc_to_intel_dp(&encoder->base);
543                 intel_dp->pps_pipe = INVALID_PIPE;
544         }
545 }
546
547 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
548 {
549         struct drm_device *dev = intel_dp_to_dev(intel_dp);
550
551         if (IS_BROXTON(dev))
552                 return BXT_PP_CONTROL(0);
553         else if (HAS_PCH_SPLIT(dev))
554                 return PCH_PP_CONTROL;
555         else
556                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
557 }
558
559 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
560 {
561         struct drm_device *dev = intel_dp_to_dev(intel_dp);
562
563         if (IS_BROXTON(dev))
564                 return BXT_PP_STATUS(0);
565         else if (HAS_PCH_SPLIT(dev))
566                 return PCH_PP_STATUS;
567         else
568                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
569 }
570
571 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
572    This function only applicable when panel PM state is not to be tracked */
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
574                               void *unused)
575 {
576         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
577                                                  edp_notifier);
578         struct drm_device *dev = intel_dp_to_dev(intel_dp);
579         struct drm_i915_private *dev_priv = dev->dev_private;
580         u32 pp_div;
581         u32 pp_ctrl_reg, pp_div_reg;
582
583         if (!is_edp(intel_dp) || code != SYS_RESTART)
584                 return 0;
585
586         pps_lock(intel_dp);
587
588         if (IS_VALLEYVIEW(dev)) {
589                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
590
591                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
592                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
593                 pp_div = I915_READ(pp_div_reg);
594                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
595
596                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
597                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
598                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
599                 msleep(intel_dp->panel_power_cycle_delay);
600         }
601
602         pps_unlock(intel_dp);
603
604         return 0;
605 }
606
607 static bool edp_have_panel_power(struct intel_dp *intel_dp)
608 {
609         struct drm_device *dev = intel_dp_to_dev(intel_dp);
610         struct drm_i915_private *dev_priv = dev->dev_private;
611
612         lockdep_assert_held(&dev_priv->pps_mutex);
613
614         if (IS_VALLEYVIEW(dev) &&
615             intel_dp->pps_pipe == INVALID_PIPE)
616                 return false;
617
618         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
619 }
620
621 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
622 {
623         struct drm_device *dev = intel_dp_to_dev(intel_dp);
624         struct drm_i915_private *dev_priv = dev->dev_private;
625
626         lockdep_assert_held(&dev_priv->pps_mutex);
627
628         if (IS_VALLEYVIEW(dev) &&
629             intel_dp->pps_pipe == INVALID_PIPE)
630                 return false;
631
632         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
633 }
634
635 static void
636 intel_dp_check_edp(struct intel_dp *intel_dp)
637 {
638         struct drm_device *dev = intel_dp_to_dev(intel_dp);
639         struct drm_i915_private *dev_priv = dev->dev_private;
640
641         if (!is_edp(intel_dp))
642                 return;
643
644         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
645                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
646                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
647                               I915_READ(_pp_stat_reg(intel_dp)),
648                               I915_READ(_pp_ctrl_reg(intel_dp)));
649         }
650 }
651
652 static uint32_t
653 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
654 {
655         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
656         struct drm_device *dev = intel_dig_port->base.base.dev;
657         struct drm_i915_private *dev_priv = dev->dev_private;
658         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
659         uint32_t status;
660         bool done;
661
662 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
663         if (has_aux_irq)
664                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
665                                           msecs_to_jiffies_timeout(10));
666         else
667                 done = wait_for_atomic(C, 10) == 0;
668         if (!done)
669                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
670                           has_aux_irq);
671 #undef C
672
673         return status;
674 }
675
676 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
677 {
678         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
679         struct drm_device *dev = intel_dig_port->base.base.dev;
680
681         /*
682          * The clock divider is based off the hrawclk, and would like to run at
683          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
684          */
685         return index ? 0 : intel_hrawclk(dev) / 2;
686 }
687
688 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
689 {
690         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
691         struct drm_device *dev = intel_dig_port->base.base.dev;
692         struct drm_i915_private *dev_priv = dev->dev_private;
693
694         if (index)
695                 return 0;
696
697         if (intel_dig_port->port == PORT_A) {
698                 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
699
700         } else {
701                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
702         }
703 }
704
705 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
706 {
707         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
708         struct drm_device *dev = intel_dig_port->base.base.dev;
709         struct drm_i915_private *dev_priv = dev->dev_private;
710
711         if (intel_dig_port->port == PORT_A) {
712                 if (index)
713                         return 0;
714                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
715         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
716                 /* Workaround for non-ULT HSW */
717                 switch (index) {
718                 case 0: return 63;
719                 case 1: return 72;
720                 default: return 0;
721                 }
722         } else  {
723                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
724         }
725 }
726
727 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
728 {
729         return index ? 0 : 100;
730 }
731
732 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733 {
734         /*
735          * SKL doesn't need us to program the AUX clock divider (Hardware will
736          * derive the clock from CDCLK automatically). We still implement the
737          * get_aux_clock_divider vfunc to plug-in into the existing code.
738          */
739         return index ? 0 : 1;
740 }
741
742 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
743                                       bool has_aux_irq,
744                                       int send_bytes,
745                                       uint32_t aux_clock_divider)
746 {
747         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
748         struct drm_device *dev = intel_dig_port->base.base.dev;
749         uint32_t precharge, timeout;
750
751         if (IS_GEN6(dev))
752                 precharge = 3;
753         else
754                 precharge = 5;
755
756         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
757                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
758         else
759                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
760
761         return DP_AUX_CH_CTL_SEND_BUSY |
762                DP_AUX_CH_CTL_DONE |
763                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
764                DP_AUX_CH_CTL_TIME_OUT_ERROR |
765                timeout |
766                DP_AUX_CH_CTL_RECEIVE_ERROR |
767                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
768                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
769                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
770 }
771
772 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
773                                       bool has_aux_irq,
774                                       int send_bytes,
775                                       uint32_t unused)
776 {
777         return DP_AUX_CH_CTL_SEND_BUSY |
778                DP_AUX_CH_CTL_DONE |
779                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
780                DP_AUX_CH_CTL_TIME_OUT_ERROR |
781                DP_AUX_CH_CTL_TIME_OUT_1600us |
782                DP_AUX_CH_CTL_RECEIVE_ERROR |
783                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
785 }
786
787 static int
788 intel_dp_aux_ch(struct intel_dp *intel_dp,
789                 const uint8_t *send, int send_bytes,
790                 uint8_t *recv, int recv_size)
791 {
792         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
793         struct drm_device *dev = intel_dig_port->base.base.dev;
794         struct drm_i915_private *dev_priv = dev->dev_private;
795         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
796         uint32_t ch_data = ch_ctl + 4;
797         uint32_t aux_clock_divider;
798         int i, ret, recv_bytes;
799         uint32_t status;
800         int try, clock = 0;
801         bool has_aux_irq = HAS_AUX_IRQ(dev);
802         bool vdd;
803
804         pps_lock(intel_dp);
805
806         /*
807          * We will be called with VDD already enabled for dpcd/edid/oui reads.
808          * In such cases we want to leave VDD enabled and it's up to upper layers
809          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
810          * ourselves.
811          */
812         vdd = edp_panel_vdd_on(intel_dp);
813
814         /* dp aux is extremely sensitive to irq latency, hence request the
815          * lowest possible wakeup latency and so prevent the cpu from going into
816          * deep sleep states.
817          */
818         pm_qos_update_request(&dev_priv->pm_qos, 0);
819
820         intel_dp_check_edp(intel_dp);
821
822         intel_aux_display_runtime_get(dev_priv);
823
824         /* Try to wait for any previous AUX channel activity */
825         for (try = 0; try < 3; try++) {
826                 status = I915_READ_NOTRACE(ch_ctl);
827                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
828                         break;
829                 msleep(1);
830         }
831
832         if (try == 3) {
833                 static u32 last_status = -1;
834                 const u32 status = I915_READ(ch_ctl);
835
836                 if (status != last_status) {
837                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
838                              status);
839                         last_status = status;
840                 }
841
842                 ret = -EBUSY;
843                 goto out;
844         }
845
846         /* Only 5 data registers! */
847         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
848                 ret = -E2BIG;
849                 goto out;
850         }
851
852         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
853                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
854                                                           has_aux_irq,
855                                                           send_bytes,
856                                                           aux_clock_divider);
857
858                 /* Must try at least 3 times according to DP spec */
859                 for (try = 0; try < 5; try++) {
860                         /* Load the send data into the aux channel data registers */
861                         for (i = 0; i < send_bytes; i += 4)
862                                 I915_WRITE(ch_data + i,
863                                            intel_dp_pack_aux(send + i,
864                                                              send_bytes - i));
865
866                         /* Send the command and wait for it to complete */
867                         I915_WRITE(ch_ctl, send_ctl);
868
869                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
870
871                         /* Clear done status and any errors */
872                         I915_WRITE(ch_ctl,
873                                    status |
874                                    DP_AUX_CH_CTL_DONE |
875                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
876                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
877
878                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
879                                 continue;
880
881                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
882                          *   400us delay required for errors and timeouts
883                          *   Timeout errors from the HW already meet this
884                          *   requirement so skip to next iteration
885                          */
886                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
887                                 usleep_range(400, 500);
888                                 continue;
889                         }
890                         if (status & DP_AUX_CH_CTL_DONE)
891                                 goto done;
892                 }
893         }
894
895         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
896                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
897                 ret = -EBUSY;
898                 goto out;
899         }
900
901 done:
902         /* Check for timeout or receive error.
903          * Timeouts occur when the sink is not connected
904          */
905         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
906                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
907                 ret = -EIO;
908                 goto out;
909         }
910
911         /* Timeouts occur when the device isn't connected, so they're
912          * "normal" -- don't fill the kernel log with these */
913         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
914                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
915                 ret = -ETIMEDOUT;
916                 goto out;
917         }
918
919         /* Unload any bytes sent back from the other side */
920         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
921                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
922         if (recv_bytes > recv_size)
923                 recv_bytes = recv_size;
924
925         for (i = 0; i < recv_bytes; i += 4)
926                 intel_dp_unpack_aux(I915_READ(ch_data + i),
927                                     recv + i, recv_bytes - i);
928
929         ret = recv_bytes;
930 out:
931         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
932         intel_aux_display_runtime_put(dev_priv);
933
934         if (vdd)
935                 edp_panel_vdd_off(intel_dp, false);
936
937         pps_unlock(intel_dp);
938
939         return ret;
940 }
941
942 #define BARE_ADDRESS_SIZE       3
943 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
944 static ssize_t
945 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
946 {
947         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
948         uint8_t txbuf[20], rxbuf[20];
949         size_t txsize, rxsize;
950         int ret;
951
952         txbuf[0] = (msg->request << 4) |
953                 ((msg->address >> 16) & 0xf);
954         txbuf[1] = (msg->address >> 8) & 0xff;
955         txbuf[2] = msg->address & 0xff;
956         txbuf[3] = msg->size - 1;
957
958         switch (msg->request & ~DP_AUX_I2C_MOT) {
959         case DP_AUX_NATIVE_WRITE:
960         case DP_AUX_I2C_WRITE:
961                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
962                 rxsize = 2; /* 0 or 1 data bytes */
963
964                 if (WARN_ON(txsize > 20))
965                         return -E2BIG;
966
967                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
968
969                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
970                 if (ret > 0) {
971                         msg->reply = rxbuf[0] >> 4;
972
973                         if (ret > 1) {
974                                 /* Number of bytes written in a short write. */
975                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
976                         } else {
977                                 /* Return payload size. */
978                                 ret = msg->size;
979                         }
980                 }
981                 break;
982
983         case DP_AUX_NATIVE_READ:
984         case DP_AUX_I2C_READ:
985                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
986                 rxsize = msg->size + 1;
987
988                 if (WARN_ON(rxsize > 20))
989                         return -E2BIG;
990
991                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
992                 if (ret > 0) {
993                         msg->reply = rxbuf[0] >> 4;
994                         /*
995                          * Assume happy day, and copy the data. The caller is
996                          * expected to check msg->reply before touching it.
997                          *
998                          * Return payload size.
999                          */
1000                         ret--;
1001                         memcpy(msg->buffer, rxbuf + 1, ret);
1002                 }
1003                 break;
1004
1005         default:
1006                 ret = -EINVAL;
1007                 break;
1008         }
1009
1010         return ret;
1011 }
1012
1013 static void
1014 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1015 {
1016         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1017         struct drm_i915_private *dev_priv = dev->dev_private;
1018         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1019         enum port port = intel_dig_port->port;
1020         struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1021         const char *name = NULL;
1022         uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1023         int ret;
1024
1025         /* On SKL we don't have Aux for port E so we rely on VBT to set
1026          * a proper alternate aux channel.
1027          */
1028         if (IS_SKYLAKE(dev) && port == PORT_E) {
1029                 switch (info->alternate_aux_channel) {
1030                 case DP_AUX_B:
1031                         porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1032                         break;
1033                 case DP_AUX_C:
1034                         porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1035                         break;
1036                 case DP_AUX_D:
1037                         porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1038                         break;
1039                 case DP_AUX_A:
1040                 default:
1041                         porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1042                 }
1043         }
1044
1045         switch (port) {
1046         case PORT_A:
1047                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1048                 name = "DPDDC-A";
1049                 break;
1050         case PORT_B:
1051                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1052                 name = "DPDDC-B";
1053                 break;
1054         case PORT_C:
1055                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1056                 name = "DPDDC-C";
1057                 break;
1058         case PORT_D:
1059                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1060                 name = "DPDDC-D";
1061                 break;
1062         case PORT_E:
1063                 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1064                 name = "DPDDC-E";
1065                 break;
1066         default:
1067                 BUG();
1068         }
1069
1070         /*
1071          * The AUX_CTL register is usually DP_CTL + 0x10.
1072          *
1073          * On Haswell and Broadwell though:
1074          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1075          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1076          *
1077          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1078          */
1079         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1080                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1081
1082         intel_dp->aux.name = name;
1083         intel_dp->aux.dev = dev->dev;
1084         intel_dp->aux.transfer = intel_dp_aux_transfer;
1085
1086         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1087                       connector->base.kdev->kobj.name);
1088
1089         ret = drm_dp_aux_register(&intel_dp->aux);
1090         if (ret < 0) {
1091                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1092                           name, ret);
1093                 return;
1094         }
1095
1096         ret = sysfs_create_link(&connector->base.kdev->kobj,
1097                                 &intel_dp->aux.ddc.dev.kobj,
1098                                 intel_dp->aux.ddc.dev.kobj.name);
1099         if (ret < 0) {
1100                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1101                 drm_dp_aux_unregister(&intel_dp->aux);
1102         }
1103 }
1104
1105 static void
1106 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1107 {
1108         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1109
1110         if (!intel_connector->mst_port)
1111                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1112                                   intel_dp->aux.ddc.dev.kobj.name);
1113         intel_connector_unregister(intel_connector);
1114 }
1115
1116 static void
1117 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1118 {
1119         u32 ctrl1;
1120
1121         memset(&pipe_config->dpll_hw_state, 0,
1122                sizeof(pipe_config->dpll_hw_state));
1123
1124         pipe_config->ddi_pll_sel = SKL_DPLL0;
1125         pipe_config->dpll_hw_state.cfgcr1 = 0;
1126         pipe_config->dpll_hw_state.cfgcr2 = 0;
1127
1128         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1129         switch (pipe_config->port_clock / 2) {
1130         case 81000:
1131                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1132                                               SKL_DPLL0);
1133                 break;
1134         case 135000:
1135                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1136                                               SKL_DPLL0);
1137                 break;
1138         case 270000:
1139                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1140                                               SKL_DPLL0);
1141                 break;
1142         case 162000:
1143                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1144                                               SKL_DPLL0);
1145                 break;
1146         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1147         results in CDCLK change. Need to handle the change of CDCLK by
1148         disabling pipes and re-enabling them */
1149         case 108000:
1150                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1151                                               SKL_DPLL0);
1152                 break;
1153         case 216000:
1154                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1155                                               SKL_DPLL0);
1156                 break;
1157
1158         }
1159         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1160 }
1161
1162 static void
1163 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1164 {
1165         memset(&pipe_config->dpll_hw_state, 0,
1166                sizeof(pipe_config->dpll_hw_state));
1167
1168         switch (pipe_config->port_clock / 2) {
1169         case 81000:
1170                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1171                 break;
1172         case 135000:
1173                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1174                 break;
1175         case 270000:
1176                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1177                 break;
1178         }
1179 }
1180
1181 static int
1182 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1183 {
1184         if (intel_dp->num_sink_rates) {
1185                 *sink_rates = intel_dp->sink_rates;
1186                 return intel_dp->num_sink_rates;
1187         }
1188
1189         *sink_rates = default_rates;
1190
1191         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1192 }
1193
1194 static int
1195 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1196 {
1197         if (IS_BROXTON(dev)) {
1198                 *source_rates = bxt_rates;
1199                 return ARRAY_SIZE(bxt_rates);
1200         } else if (IS_SKYLAKE(dev)) {
1201                 *source_rates = skl_rates;
1202                 return ARRAY_SIZE(skl_rates);
1203         } else if (IS_CHERRYVIEW(dev)) {
1204                 *source_rates = chv_rates;
1205                 return ARRAY_SIZE(chv_rates);
1206         }
1207
1208         *source_rates = default_rates;
1209
1210         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1211                 /* WaDisableHBR2:skl */
1212                 return (DP_LINK_BW_2_7 >> 3) + 1;
1213         else if (INTEL_INFO(dev)->gen >= 8 ||
1214             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1215                 return (DP_LINK_BW_5_4 >> 3) + 1;
1216         else
1217                 return (DP_LINK_BW_2_7 >> 3) + 1;
1218 }
1219
1220 static void
1221 intel_dp_set_clock(struct intel_encoder *encoder,
1222                    struct intel_crtc_state *pipe_config)
1223 {
1224         struct drm_device *dev = encoder->base.dev;
1225         const struct dp_link_dpll *divisor = NULL;
1226         int i, count = 0;
1227
1228         if (IS_G4X(dev)) {
1229                 divisor = gen4_dpll;
1230                 count = ARRAY_SIZE(gen4_dpll);
1231         } else if (HAS_PCH_SPLIT(dev)) {
1232                 divisor = pch_dpll;
1233                 count = ARRAY_SIZE(pch_dpll);
1234         } else if (IS_CHERRYVIEW(dev)) {
1235                 divisor = chv_dpll;
1236                 count = ARRAY_SIZE(chv_dpll);
1237         } else if (IS_VALLEYVIEW(dev)) {
1238                 divisor = vlv_dpll;
1239                 count = ARRAY_SIZE(vlv_dpll);
1240         }
1241
1242         if (divisor && count) {
1243                 for (i = 0; i < count; i++) {
1244                         if (pipe_config->port_clock == divisor[i].clock) {
1245                                 pipe_config->dpll = divisor[i].dpll;
1246                                 pipe_config->clock_set = true;
1247                                 break;
1248                         }
1249                 }
1250         }
1251 }
1252
1253 static int intersect_rates(const int *source_rates, int source_len,
1254                            const int *sink_rates, int sink_len,
1255                            int *common_rates)
1256 {
1257         int i = 0, j = 0, k = 0;
1258
1259         while (i < source_len && j < sink_len) {
1260                 if (source_rates[i] == sink_rates[j]) {
1261                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1262                                 return k;
1263                         common_rates[k] = source_rates[i];
1264                         ++k;
1265                         ++i;
1266                         ++j;
1267                 } else if (source_rates[i] < sink_rates[j]) {
1268                         ++i;
1269                 } else {
1270                         ++j;
1271                 }
1272         }
1273         return k;
1274 }
1275
1276 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1277                                  int *common_rates)
1278 {
1279         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1280         const int *source_rates, *sink_rates;
1281         int source_len, sink_len;
1282
1283         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1284         source_len = intel_dp_source_rates(dev, &source_rates);
1285
1286         return intersect_rates(source_rates, source_len,
1287                                sink_rates, sink_len,
1288                                common_rates);
1289 }
1290
1291 static void snprintf_int_array(char *str, size_t len,
1292                                const int *array, int nelem)
1293 {
1294         int i;
1295
1296         str[0] = '\0';
1297
1298         for (i = 0; i < nelem; i++) {
1299                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1300                 if (r >= len)
1301                         return;
1302                 str += r;
1303                 len -= r;
1304         }
1305 }
1306
1307 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1308 {
1309         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1310         const int *source_rates, *sink_rates;
1311         int source_len, sink_len, common_len;
1312         int common_rates[DP_MAX_SUPPORTED_RATES];
1313         char str[128]; /* FIXME: too big for stack? */
1314
1315         if ((drm_debug & DRM_UT_KMS) == 0)
1316                 return;
1317
1318         source_len = intel_dp_source_rates(dev, &source_rates);
1319         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1320         DRM_DEBUG_KMS("source rates: %s\n", str);
1321
1322         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1323         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1324         DRM_DEBUG_KMS("sink rates: %s\n", str);
1325
1326         common_len = intel_dp_common_rates(intel_dp, common_rates);
1327         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1328         DRM_DEBUG_KMS("common rates: %s\n", str);
1329 }
1330
1331 static int rate_to_index(int find, const int *rates)
1332 {
1333         int i = 0;
1334
1335         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1336                 if (find == rates[i])
1337                         break;
1338
1339         return i;
1340 }
1341
1342 int
1343 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1344 {
1345         int rates[DP_MAX_SUPPORTED_RATES] = {};
1346         int len;
1347
1348         len = intel_dp_common_rates(intel_dp, rates);
1349         if (WARN_ON(len <= 0))
1350                 return 162000;
1351
1352         return rates[rate_to_index(0, rates) - 1];
1353 }
1354
1355 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1356 {
1357         return rate_to_index(rate, intel_dp->sink_rates);
1358 }
1359
1360 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1361                                   uint8_t *link_bw, uint8_t *rate_select)
1362 {
1363         if (intel_dp->num_sink_rates) {
1364                 *link_bw = 0;
1365                 *rate_select =
1366                         intel_dp_rate_select(intel_dp, port_clock);
1367         } else {
1368                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1369                 *rate_select = 0;
1370         }
1371 }
1372
1373 bool
1374 intel_dp_compute_config(struct intel_encoder *encoder,
1375                         struct intel_crtc_state *pipe_config)
1376 {
1377         struct drm_device *dev = encoder->base.dev;
1378         struct drm_i915_private *dev_priv = dev->dev_private;
1379         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1380         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1381         enum port port = dp_to_dig_port(intel_dp)->port;
1382         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1383         struct intel_connector *intel_connector = intel_dp->attached_connector;
1384         int lane_count, clock;
1385         int min_lane_count = 1;
1386         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1387         /* Conveniently, the link BW constants become indices with a shift...*/
1388         int min_clock = 0;
1389         int max_clock;
1390         int bpp, mode_rate;
1391         int link_avail, link_clock;
1392         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1393         int common_len;
1394         uint8_t link_bw, rate_select;
1395
1396         common_len = intel_dp_common_rates(intel_dp, common_rates);
1397
1398         /* No common link rates between source and sink */
1399         WARN_ON(common_len <= 0);
1400
1401         max_clock = common_len - 1;
1402
1403         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1404                 pipe_config->has_pch_encoder = true;
1405
1406         pipe_config->has_dp_encoder = true;
1407         pipe_config->has_drrs = false;
1408         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1409
1410         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1411                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1412                                        adjusted_mode);
1413
1414                 if (INTEL_INFO(dev)->gen >= 9) {
1415                         int ret;
1416                         ret = skl_update_scaler_crtc(pipe_config);
1417                         if (ret)
1418                                 return ret;
1419                 }
1420
1421                 if (!HAS_PCH_SPLIT(dev))
1422                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1423                                                  intel_connector->panel.fitting_mode);
1424                 else
1425                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1426                                                 intel_connector->panel.fitting_mode);
1427         }
1428
1429         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1430                 return false;
1431
1432         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1433                       "max bw %d pixel clock %iKHz\n",
1434                       max_lane_count, common_rates[max_clock],
1435                       adjusted_mode->crtc_clock);
1436
1437         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1438          * bpc in between. */
1439         bpp = pipe_config->pipe_bpp;
1440         if (is_edp(intel_dp)) {
1441
1442                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1443                 if (intel_connector->base.display_info.bpc == 0 &&
1444                         (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1445                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1446                                       dev_priv->vbt.edp_bpp);
1447                         bpp = dev_priv->vbt.edp_bpp;
1448                 }
1449
1450                 /*
1451                  * Use the maximum clock and number of lanes the eDP panel
1452                  * advertizes being capable of. The panels are generally
1453                  * designed to support only a single clock and lane
1454                  * configuration, and typically these values correspond to the
1455                  * native resolution of the panel.
1456                  */
1457                 min_lane_count = max_lane_count;
1458                 min_clock = max_clock;
1459         }
1460
1461         for (; bpp >= 6*3; bpp -= 2*3) {
1462                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1463                                                    bpp);
1464
1465                 for (clock = min_clock; clock <= max_clock; clock++) {
1466                         for (lane_count = min_lane_count;
1467                                 lane_count <= max_lane_count;
1468                                 lane_count <<= 1) {
1469
1470                                 link_clock = common_rates[clock];
1471                                 link_avail = intel_dp_max_data_rate(link_clock,
1472                                                                     lane_count);
1473
1474                                 if (mode_rate <= link_avail) {
1475                                         goto found;
1476                                 }
1477                         }
1478                 }
1479         }
1480
1481         return false;
1482
1483 found:
1484         if (intel_dp->color_range_auto) {
1485                 /*
1486                  * See:
1487                  * CEA-861-E - 5.1 Default Encoding Parameters
1488                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1489                  */
1490                 pipe_config->limited_color_range =
1491                         bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1492         } else {
1493                 pipe_config->limited_color_range =
1494                         intel_dp->limited_color_range;
1495         }
1496
1497         pipe_config->lane_count = lane_count;
1498
1499         pipe_config->pipe_bpp = bpp;
1500         pipe_config->port_clock = common_rates[clock];
1501
1502         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1503                               &link_bw, &rate_select);
1504
1505         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1506                       link_bw, rate_select, pipe_config->lane_count,
1507                       pipe_config->port_clock, bpp);
1508         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1509                       mode_rate, link_avail);
1510
1511         intel_link_compute_m_n(bpp, lane_count,
1512                                adjusted_mode->crtc_clock,
1513                                pipe_config->port_clock,
1514                                &pipe_config->dp_m_n);
1515
1516         if (intel_connector->panel.downclock_mode != NULL &&
1517                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1518                         pipe_config->has_drrs = true;
1519                         intel_link_compute_m_n(bpp, lane_count,
1520                                 intel_connector->panel.downclock_mode->clock,
1521                                 pipe_config->port_clock,
1522                                 &pipe_config->dp_m2_n2);
1523         }
1524
1525         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1526                 skl_edp_set_pll_config(pipe_config);
1527         else if (IS_BROXTON(dev))
1528                 /* handled in ddi */;
1529         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1530                 hsw_dp_set_ddi_pll_sel(pipe_config);
1531         else
1532                 intel_dp_set_clock(encoder, pipe_config);
1533
1534         return true;
1535 }
1536
1537 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1538 {
1539         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1540         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1541         struct drm_device *dev = crtc->base.dev;
1542         struct drm_i915_private *dev_priv = dev->dev_private;
1543         u32 dpa_ctl;
1544
1545         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1546                       crtc->config->port_clock);
1547         dpa_ctl = I915_READ(DP_A);
1548         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1549
1550         if (crtc->config->port_clock == 162000) {
1551                 /* For a long time we've carried around a ILK-DevA w/a for the
1552                  * 160MHz clock. If we're really unlucky, it's still required.
1553                  */
1554                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1555                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1556                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1557         } else {
1558                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1559                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1560         }
1561
1562         I915_WRITE(DP_A, dpa_ctl);
1563
1564         POSTING_READ(DP_A);
1565         udelay(500);
1566 }
1567
1568 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1569                               const struct intel_crtc_state *pipe_config)
1570 {
1571         intel_dp->link_rate = pipe_config->port_clock;
1572         intel_dp->lane_count = pipe_config->lane_count;
1573 }
1574
1575 static void intel_dp_prepare(struct intel_encoder *encoder)
1576 {
1577         struct drm_device *dev = encoder->base.dev;
1578         struct drm_i915_private *dev_priv = dev->dev_private;
1579         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1580         enum port port = dp_to_dig_port(intel_dp)->port;
1581         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1582         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1583
1584         intel_dp_set_link_params(intel_dp, crtc->config);
1585
1586         /*
1587          * There are four kinds of DP registers:
1588          *
1589          *      IBX PCH
1590          *      SNB CPU
1591          *      IVB CPU
1592          *      CPT PCH
1593          *
1594          * IBX PCH and CPU are the same for almost everything,
1595          * except that the CPU DP PLL is configured in this
1596          * register
1597          *
1598          * CPT PCH is quite different, having many bits moved
1599          * to the TRANS_DP_CTL register instead. That
1600          * configuration happens (oddly) in ironlake_pch_enable
1601          */
1602
1603         /* Preserve the BIOS-computed detected bit. This is
1604          * supposed to be read-only.
1605          */
1606         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1607
1608         /* Handle DP bits in common between all three register formats */
1609         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1610         intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1611
1612         if (crtc->config->has_audio)
1613                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1614
1615         /* Split out the IBX/CPU vs CPT settings */
1616
1617         if (IS_GEN7(dev) && port == PORT_A) {
1618                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1619                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1620                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1621                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1622                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1623
1624                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1625                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1626
1627                 intel_dp->DP |= crtc->pipe << 29;
1628         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1629                 u32 trans_dp;
1630
1631                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1632
1633                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1634                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1635                         trans_dp |= TRANS_DP_ENH_FRAMING;
1636                 else
1637                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1638                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1639         } else {
1640                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1641                     crtc->config->limited_color_range)
1642                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1643
1644                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1645                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1646                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1647                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1648                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1649
1650                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1651                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1652
1653                 if (IS_CHERRYVIEW(dev))
1654                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1655                 else if (crtc->pipe == PIPE_B)
1656                         intel_dp->DP |= DP_PIPEB_SELECT;
1657         }
1658 }
1659
1660 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1661 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1662
1663 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1664 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1665
1666 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1667 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1668
1669 static void wait_panel_status(struct intel_dp *intel_dp,
1670                                        u32 mask,
1671                                        u32 value)
1672 {
1673         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1674         struct drm_i915_private *dev_priv = dev->dev_private;
1675         u32 pp_stat_reg, pp_ctrl_reg;
1676
1677         lockdep_assert_held(&dev_priv->pps_mutex);
1678
1679         pp_stat_reg = _pp_stat_reg(intel_dp);
1680         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1681
1682         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1683                         mask, value,
1684                         I915_READ(pp_stat_reg),
1685                         I915_READ(pp_ctrl_reg));
1686
1687         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1688                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1689                                 I915_READ(pp_stat_reg),
1690                                 I915_READ(pp_ctrl_reg));
1691         }
1692
1693         DRM_DEBUG_KMS("Wait complete\n");
1694 }
1695
1696 static void wait_panel_on(struct intel_dp *intel_dp)
1697 {
1698         DRM_DEBUG_KMS("Wait for panel power on\n");
1699         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1700 }
1701
1702 static void wait_panel_off(struct intel_dp *intel_dp)
1703 {
1704         DRM_DEBUG_KMS("Wait for panel power off time\n");
1705         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1706 }
1707
1708 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1709 {
1710         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1711
1712         /* When we disable the VDD override bit last we have to do the manual
1713          * wait. */
1714         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1715                                        intel_dp->panel_power_cycle_delay);
1716
1717         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1718 }
1719
1720 static void wait_backlight_on(struct intel_dp *intel_dp)
1721 {
1722         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1723                                        intel_dp->backlight_on_delay);
1724 }
1725
1726 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1727 {
1728         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1729                                        intel_dp->backlight_off_delay);
1730 }
1731
1732 /* Read the current pp_control value, unlocking the register if it
1733  * is locked
1734  */
1735
1736 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1737 {
1738         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1739         struct drm_i915_private *dev_priv = dev->dev_private;
1740         u32 control;
1741
1742         lockdep_assert_held(&dev_priv->pps_mutex);
1743
1744         control = I915_READ(_pp_ctrl_reg(intel_dp));
1745         if (!IS_BROXTON(dev)) {
1746                 control &= ~PANEL_UNLOCK_MASK;
1747                 control |= PANEL_UNLOCK_REGS;
1748         }
1749         return control;
1750 }
1751
1752 /*
1753  * Must be paired with edp_panel_vdd_off().
1754  * Must hold pps_mutex around the whole on/off sequence.
1755  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1756  */
1757 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1758 {
1759         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1760         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1761         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1762         struct drm_i915_private *dev_priv = dev->dev_private;
1763         enum intel_display_power_domain power_domain;
1764         u32 pp;
1765         u32 pp_stat_reg, pp_ctrl_reg;
1766         bool need_to_disable = !intel_dp->want_panel_vdd;
1767
1768         lockdep_assert_held(&dev_priv->pps_mutex);
1769
1770         if (!is_edp(intel_dp))
1771                 return false;
1772
1773         cancel_delayed_work(&intel_dp->panel_vdd_work);
1774         intel_dp->want_panel_vdd = true;
1775
1776         if (edp_have_panel_vdd(intel_dp))
1777                 return need_to_disable;
1778
1779         power_domain = intel_display_port_power_domain(intel_encoder);
1780         intel_display_power_get(dev_priv, power_domain);
1781
1782         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1783                       port_name(intel_dig_port->port));
1784
1785         if (!edp_have_panel_power(intel_dp))
1786                 wait_panel_power_cycle(intel_dp);
1787
1788         pp = ironlake_get_pp_control(intel_dp);
1789         pp |= EDP_FORCE_VDD;
1790
1791         pp_stat_reg = _pp_stat_reg(intel_dp);
1792         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1793
1794         I915_WRITE(pp_ctrl_reg, pp);
1795         POSTING_READ(pp_ctrl_reg);
1796         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1797                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1798         /*
1799          * If the panel wasn't on, delay before accessing aux channel
1800          */
1801         if (!edp_have_panel_power(intel_dp)) {
1802                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1803                               port_name(intel_dig_port->port));
1804                 msleep(intel_dp->panel_power_up_delay);
1805         }
1806
1807         return need_to_disable;
1808 }
1809
1810 /*
1811  * Must be paired with intel_edp_panel_vdd_off() or
1812  * intel_edp_panel_off().
1813  * Nested calls to these functions are not allowed since
1814  * we drop the lock. Caller must use some higher level
1815  * locking to prevent nested calls from other threads.
1816  */
1817 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1818 {
1819         bool vdd;
1820
1821         if (!is_edp(intel_dp))
1822                 return;
1823
1824         pps_lock(intel_dp);
1825         vdd = edp_panel_vdd_on(intel_dp);
1826         pps_unlock(intel_dp);
1827
1828         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1829              port_name(dp_to_dig_port(intel_dp)->port));
1830 }
1831
1832 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1833 {
1834         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1835         struct drm_i915_private *dev_priv = dev->dev_private;
1836         struct intel_digital_port *intel_dig_port =
1837                 dp_to_dig_port(intel_dp);
1838         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1839         enum intel_display_power_domain power_domain;
1840         u32 pp;
1841         u32 pp_stat_reg, pp_ctrl_reg;
1842
1843         lockdep_assert_held(&dev_priv->pps_mutex);
1844
1845         WARN_ON(intel_dp->want_panel_vdd);
1846
1847         if (!edp_have_panel_vdd(intel_dp))
1848                 return;
1849
1850         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1851                       port_name(intel_dig_port->port));
1852
1853         pp = ironlake_get_pp_control(intel_dp);
1854         pp &= ~EDP_FORCE_VDD;
1855
1856         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1857         pp_stat_reg = _pp_stat_reg(intel_dp);
1858
1859         I915_WRITE(pp_ctrl_reg, pp);
1860         POSTING_READ(pp_ctrl_reg);
1861
1862         /* Make sure sequencer is idle before allowing subsequent activity */
1863         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1864         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1865
1866         if ((pp & POWER_TARGET_ON) == 0)
1867                 intel_dp->last_power_cycle = jiffies;
1868
1869         power_domain = intel_display_port_power_domain(intel_encoder);
1870         intel_display_power_put(dev_priv, power_domain);
1871 }
1872
1873 static void edp_panel_vdd_work(struct work_struct *__work)
1874 {
1875         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1876                                                  struct intel_dp, panel_vdd_work);
1877
1878         pps_lock(intel_dp);
1879         if (!intel_dp->want_panel_vdd)
1880                 edp_panel_vdd_off_sync(intel_dp);
1881         pps_unlock(intel_dp);
1882 }
1883
1884 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1885 {
1886         unsigned long delay;
1887
1888         /*
1889          * Queue the timer to fire a long time from now (relative to the power
1890          * down delay) to keep the panel power up across a sequence of
1891          * operations.
1892          */
1893         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1894         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1895 }
1896
1897 /*
1898  * Must be paired with edp_panel_vdd_on().
1899  * Must hold pps_mutex around the whole on/off sequence.
1900  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1901  */
1902 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1903 {
1904         struct drm_i915_private *dev_priv =
1905                 intel_dp_to_dev(intel_dp)->dev_private;
1906
1907         lockdep_assert_held(&dev_priv->pps_mutex);
1908
1909         if (!is_edp(intel_dp))
1910                 return;
1911
1912         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1913              port_name(dp_to_dig_port(intel_dp)->port));
1914
1915         intel_dp->want_panel_vdd = false;
1916
1917         if (sync)
1918                 edp_panel_vdd_off_sync(intel_dp);
1919         else
1920                 edp_panel_vdd_schedule_off(intel_dp);
1921 }
1922
1923 static void edp_panel_on(struct intel_dp *intel_dp)
1924 {
1925         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1926         struct drm_i915_private *dev_priv = dev->dev_private;
1927         u32 pp;
1928         u32 pp_ctrl_reg;
1929
1930         lockdep_assert_held(&dev_priv->pps_mutex);
1931
1932         if (!is_edp(intel_dp))
1933                 return;
1934
1935         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1936                       port_name(dp_to_dig_port(intel_dp)->port));
1937
1938         if (WARN(edp_have_panel_power(intel_dp),
1939                  "eDP port %c panel power already on\n",
1940                  port_name(dp_to_dig_port(intel_dp)->port)))
1941                 return;
1942
1943         wait_panel_power_cycle(intel_dp);
1944
1945         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1946         pp = ironlake_get_pp_control(intel_dp);
1947         if (IS_GEN5(dev)) {
1948                 /* ILK workaround: disable reset around power sequence */
1949                 pp &= ~PANEL_POWER_RESET;
1950                 I915_WRITE(pp_ctrl_reg, pp);
1951                 POSTING_READ(pp_ctrl_reg);
1952         }
1953
1954         pp |= POWER_TARGET_ON;
1955         if (!IS_GEN5(dev))
1956                 pp |= PANEL_POWER_RESET;
1957
1958         I915_WRITE(pp_ctrl_reg, pp);
1959         POSTING_READ(pp_ctrl_reg);
1960
1961         wait_panel_on(intel_dp);
1962         intel_dp->last_power_on = jiffies;
1963
1964         if (IS_GEN5(dev)) {
1965                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1966                 I915_WRITE(pp_ctrl_reg, pp);
1967                 POSTING_READ(pp_ctrl_reg);
1968         }
1969 }
1970
1971 void intel_edp_panel_on(struct intel_dp *intel_dp)
1972 {
1973         if (!is_edp(intel_dp))
1974                 return;
1975
1976         pps_lock(intel_dp);
1977         edp_panel_on(intel_dp);
1978         pps_unlock(intel_dp);
1979 }
1980
1981
1982 static void edp_panel_off(struct intel_dp *intel_dp)
1983 {
1984         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1985         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1986         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1987         struct drm_i915_private *dev_priv = dev->dev_private;
1988         enum intel_display_power_domain power_domain;
1989         u32 pp;
1990         u32 pp_ctrl_reg;
1991
1992         lockdep_assert_held(&dev_priv->pps_mutex);
1993
1994         if (!is_edp(intel_dp))
1995                 return;
1996
1997         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1998                       port_name(dp_to_dig_port(intel_dp)->port));
1999
2000         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2001              port_name(dp_to_dig_port(intel_dp)->port));
2002
2003         pp = ironlake_get_pp_control(intel_dp);
2004         /* We need to switch off panel power _and_ force vdd, for otherwise some
2005          * panels get very unhappy and cease to work. */
2006         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2007                 EDP_BLC_ENABLE);
2008
2009         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2010
2011         intel_dp->want_panel_vdd = false;
2012
2013         I915_WRITE(pp_ctrl_reg, pp);
2014         POSTING_READ(pp_ctrl_reg);
2015
2016         intel_dp->last_power_cycle = jiffies;
2017         wait_panel_off(intel_dp);
2018
2019         /* We got a reference when we enabled the VDD. */
2020         power_domain = intel_display_port_power_domain(intel_encoder);
2021         intel_display_power_put(dev_priv, power_domain);
2022 }
2023
2024 void intel_edp_panel_off(struct intel_dp *intel_dp)
2025 {
2026         if (!is_edp(intel_dp))
2027                 return;
2028
2029         pps_lock(intel_dp);
2030         edp_panel_off(intel_dp);
2031         pps_unlock(intel_dp);
2032 }
2033
2034 /* Enable backlight in the panel power control. */
2035 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2036 {
2037         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2038         struct drm_device *dev = intel_dig_port->base.base.dev;
2039         struct drm_i915_private *dev_priv = dev->dev_private;
2040         u32 pp;
2041         u32 pp_ctrl_reg;
2042
2043         /*
2044          * If we enable the backlight right away following a panel power
2045          * on, we may see slight flicker as the panel syncs with the eDP
2046          * link.  So delay a bit to make sure the image is solid before
2047          * allowing it to appear.
2048          */
2049         wait_backlight_on(intel_dp);
2050
2051         pps_lock(intel_dp);
2052
2053         pp = ironlake_get_pp_control(intel_dp);
2054         pp |= EDP_BLC_ENABLE;
2055
2056         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2057
2058         I915_WRITE(pp_ctrl_reg, pp);
2059         POSTING_READ(pp_ctrl_reg);
2060
2061         pps_unlock(intel_dp);
2062 }
2063
2064 /* Enable backlight PWM and backlight PP control. */
2065 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2066 {
2067         if (!is_edp(intel_dp))
2068                 return;
2069
2070         DRM_DEBUG_KMS("\n");
2071
2072         intel_panel_enable_backlight(intel_dp->attached_connector);
2073         _intel_edp_backlight_on(intel_dp);
2074 }
2075
2076 /* Disable backlight in the panel power control. */
2077 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2078 {
2079         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2080         struct drm_i915_private *dev_priv = dev->dev_private;
2081         u32 pp;
2082         u32 pp_ctrl_reg;
2083
2084         if (!is_edp(intel_dp))
2085                 return;
2086
2087         pps_lock(intel_dp);
2088
2089         pp = ironlake_get_pp_control(intel_dp);
2090         pp &= ~EDP_BLC_ENABLE;
2091
2092         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2093
2094         I915_WRITE(pp_ctrl_reg, pp);
2095         POSTING_READ(pp_ctrl_reg);
2096
2097         pps_unlock(intel_dp);
2098
2099         intel_dp->last_backlight_off = jiffies;
2100         edp_wait_backlight_off(intel_dp);
2101 }
2102
2103 /* Disable backlight PP control and backlight PWM. */
2104 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2105 {
2106         if (!is_edp(intel_dp))
2107                 return;
2108
2109         DRM_DEBUG_KMS("\n");
2110
2111         _intel_edp_backlight_off(intel_dp);
2112         intel_panel_disable_backlight(intel_dp->attached_connector);
2113 }
2114
2115 /*
2116  * Hook for controlling the panel power control backlight through the bl_power
2117  * sysfs attribute. Take care to handle multiple calls.
2118  */
2119 static void intel_edp_backlight_power(struct intel_connector *connector,
2120                                       bool enable)
2121 {
2122         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2123         bool is_enabled;
2124
2125         pps_lock(intel_dp);
2126         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2127         pps_unlock(intel_dp);
2128
2129         if (is_enabled == enable)
2130                 return;
2131
2132         DRM_DEBUG_KMS("panel power control backlight %s\n",
2133                       enable ? "enable" : "disable");
2134
2135         if (enable)
2136                 _intel_edp_backlight_on(intel_dp);
2137         else
2138                 _intel_edp_backlight_off(intel_dp);
2139 }
2140
2141 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2142 {
2143         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2144         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2145         struct drm_device *dev = crtc->dev;
2146         struct drm_i915_private *dev_priv = dev->dev_private;
2147         u32 dpa_ctl;
2148
2149         assert_pipe_disabled(dev_priv,
2150                              to_intel_crtc(crtc)->pipe);
2151
2152         DRM_DEBUG_KMS("\n");
2153         dpa_ctl = I915_READ(DP_A);
2154         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2155         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2156
2157         /* We don't adjust intel_dp->DP while tearing down the link, to
2158          * facilitate link retraining (e.g. after hotplug). Hence clear all
2159          * enable bits here to ensure that we don't enable too much. */
2160         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2161         intel_dp->DP |= DP_PLL_ENABLE;
2162         I915_WRITE(DP_A, intel_dp->DP);
2163         POSTING_READ(DP_A);
2164         udelay(200);
2165 }
2166
2167 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2168 {
2169         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2170         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2171         struct drm_device *dev = crtc->dev;
2172         struct drm_i915_private *dev_priv = dev->dev_private;
2173         u32 dpa_ctl;
2174
2175         assert_pipe_disabled(dev_priv,
2176                              to_intel_crtc(crtc)->pipe);
2177
2178         dpa_ctl = I915_READ(DP_A);
2179         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2180              "dp pll off, should be on\n");
2181         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2182
2183         /* We can't rely on the value tracked for the DP register in
2184          * intel_dp->DP because link_down must not change that (otherwise link
2185          * re-training will fail. */
2186         dpa_ctl &= ~DP_PLL_ENABLE;
2187         I915_WRITE(DP_A, dpa_ctl);
2188         POSTING_READ(DP_A);
2189         udelay(200);
2190 }
2191
2192 /* If the sink supports it, try to set the power state appropriately */
2193 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2194 {
2195         int ret, i;
2196
2197         /* Should have a valid DPCD by this point */
2198         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2199                 return;
2200
2201         if (mode != DRM_MODE_DPMS_ON) {
2202                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2203                                          DP_SET_POWER_D3);
2204         } else {
2205                 /*
2206                  * When turning on, we need to retry for 1ms to give the sink
2207                  * time to wake up.
2208                  */
2209                 for (i = 0; i < 3; i++) {
2210                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2211                                                  DP_SET_POWER_D0);
2212                         if (ret == 1)
2213                                 break;
2214                         msleep(1);
2215                 }
2216         }
2217
2218         if (ret != 1)
2219                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2220                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2221 }
2222
2223 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2224                                   enum pipe *pipe)
2225 {
2226         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2227         enum port port = dp_to_dig_port(intel_dp)->port;
2228         struct drm_device *dev = encoder->base.dev;
2229         struct drm_i915_private *dev_priv = dev->dev_private;
2230         enum intel_display_power_domain power_domain;
2231         u32 tmp;
2232
2233         power_domain = intel_display_port_power_domain(encoder);
2234         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2235                 return false;
2236
2237         tmp = I915_READ(intel_dp->output_reg);
2238
2239         if (!(tmp & DP_PORT_EN))
2240                 return false;
2241
2242         if (IS_GEN7(dev) && port == PORT_A) {
2243                 *pipe = PORT_TO_PIPE_CPT(tmp);
2244         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2245                 enum pipe p;
2246
2247                 for_each_pipe(dev_priv, p) {
2248                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2249                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2250                                 *pipe = p;
2251                                 return true;
2252                         }
2253                 }
2254
2255                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2256                               intel_dp->output_reg);
2257         } else if (IS_CHERRYVIEW(dev)) {
2258                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2259         } else {
2260                 *pipe = PORT_TO_PIPE(tmp);
2261         }
2262
2263         return true;
2264 }
2265
2266 static void intel_dp_get_config(struct intel_encoder *encoder,
2267                                 struct intel_crtc_state *pipe_config)
2268 {
2269         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2270         u32 tmp, flags = 0;
2271         struct drm_device *dev = encoder->base.dev;
2272         struct drm_i915_private *dev_priv = dev->dev_private;
2273         enum port port = dp_to_dig_port(intel_dp)->port;
2274         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2275         int dotclock;
2276
2277         tmp = I915_READ(intel_dp->output_reg);
2278
2279         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2280
2281         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2282                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2283
2284                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2285                         flags |= DRM_MODE_FLAG_PHSYNC;
2286                 else
2287                         flags |= DRM_MODE_FLAG_NHSYNC;
2288
2289                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2290                         flags |= DRM_MODE_FLAG_PVSYNC;
2291                 else
2292                         flags |= DRM_MODE_FLAG_NVSYNC;
2293         } else {
2294                 if (tmp & DP_SYNC_HS_HIGH)
2295                         flags |= DRM_MODE_FLAG_PHSYNC;
2296                 else
2297                         flags |= DRM_MODE_FLAG_NHSYNC;
2298
2299                 if (tmp & DP_SYNC_VS_HIGH)
2300                         flags |= DRM_MODE_FLAG_PVSYNC;
2301                 else
2302                         flags |= DRM_MODE_FLAG_NVSYNC;
2303         }
2304
2305         pipe_config->base.adjusted_mode.flags |= flags;
2306
2307         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2308             tmp & DP_COLOR_RANGE_16_235)
2309                 pipe_config->limited_color_range = true;
2310
2311         pipe_config->has_dp_encoder = true;
2312
2313         pipe_config->lane_count =
2314                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2315
2316         intel_dp_get_m_n(crtc, pipe_config);
2317
2318         if (port == PORT_A) {
2319                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2320                         pipe_config->port_clock = 162000;
2321                 else
2322                         pipe_config->port_clock = 270000;
2323         }
2324
2325         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2326                                             &pipe_config->dp_m_n);
2327
2328         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2329                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2330
2331         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2332
2333         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2334             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2335                 /*
2336                  * This is a big fat ugly hack.
2337                  *
2338                  * Some machines in UEFI boot mode provide us a VBT that has 18
2339                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2340                  * unknown we fail to light up. Yet the same BIOS boots up with
2341                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2342                  * max, not what it tells us to use.
2343                  *
2344                  * Note: This will still be broken if the eDP panel is not lit
2345                  * up by the BIOS, and thus we can't get the mode at module
2346                  * load.
2347                  */
2348                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2349                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2350                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2351         }
2352 }
2353
2354 static void intel_disable_dp(struct intel_encoder *encoder)
2355 {
2356         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2357         struct drm_device *dev = encoder->base.dev;
2358         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2359
2360         if (crtc->config->has_audio)
2361                 intel_audio_codec_disable(encoder);
2362
2363         if (HAS_PSR(dev) && !HAS_DDI(dev))
2364                 intel_psr_disable(intel_dp);
2365
2366         /* Make sure the panel is off before trying to change the mode. But also
2367          * ensure that we have vdd while we switch off the panel. */
2368         intel_edp_panel_vdd_on(intel_dp);
2369         intel_edp_backlight_off(intel_dp);
2370         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2371         intel_edp_panel_off(intel_dp);
2372
2373         /* disable the port before the pipe on g4x */
2374         if (INTEL_INFO(dev)->gen < 5)
2375                 intel_dp_link_down(intel_dp);
2376 }
2377
2378 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2379 {
2380         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2381         enum port port = dp_to_dig_port(intel_dp)->port;
2382
2383         intel_dp_link_down(intel_dp);
2384         if (port == PORT_A)
2385                 ironlake_edp_pll_off(intel_dp);
2386 }
2387
2388 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2389 {
2390         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2391
2392         intel_dp_link_down(intel_dp);
2393 }
2394
2395 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2396                                      bool reset)
2397 {
2398         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2399         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2400         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2401         enum pipe pipe = crtc->pipe;
2402         uint32_t val;
2403
2404         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2405         if (reset)
2406                 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2407         else
2408                 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2409         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2410
2411         if (crtc->config->lane_count > 2) {
2412                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2413                 if (reset)
2414                         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2415                 else
2416                         val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2417                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2418         }
2419
2420         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2421         val |= CHV_PCS_REQ_SOFTRESET_EN;
2422         if (reset)
2423                 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2424         else
2425                 val |= DPIO_PCS_CLK_SOFT_RESET;
2426         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2427
2428         if (crtc->config->lane_count > 2) {
2429                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2430                 val |= CHV_PCS_REQ_SOFTRESET_EN;
2431                 if (reset)
2432                         val &= ~DPIO_PCS_CLK_SOFT_RESET;
2433                 else
2434                         val |= DPIO_PCS_CLK_SOFT_RESET;
2435                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2436         }
2437 }
2438
2439 static void chv_post_disable_dp(struct intel_encoder *encoder)
2440 {
2441         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2442         struct drm_device *dev = encoder->base.dev;
2443         struct drm_i915_private *dev_priv = dev->dev_private;
2444
2445         intel_dp_link_down(intel_dp);
2446
2447         mutex_lock(&dev_priv->sb_lock);
2448
2449         /* Assert data lane reset */
2450         chv_data_lane_soft_reset(encoder, true);
2451
2452         mutex_unlock(&dev_priv->sb_lock);
2453 }
2454
2455 static void
2456 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2457                          uint32_t *DP,
2458                          uint8_t dp_train_pat)
2459 {
2460         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2461         struct drm_device *dev = intel_dig_port->base.base.dev;
2462         struct drm_i915_private *dev_priv = dev->dev_private;
2463         enum port port = intel_dig_port->port;
2464
2465         if (HAS_DDI(dev)) {
2466                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2467
2468                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2469                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2470                 else
2471                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2472
2473                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2474                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2475                 case DP_TRAINING_PATTERN_DISABLE:
2476                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2477
2478                         break;
2479                 case DP_TRAINING_PATTERN_1:
2480                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2481                         break;
2482                 case DP_TRAINING_PATTERN_2:
2483                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2484                         break;
2485                 case DP_TRAINING_PATTERN_3:
2486                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2487                         break;
2488                 }
2489                 I915_WRITE(DP_TP_CTL(port), temp);
2490
2491         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2492                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2493                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2494
2495                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2496                 case DP_TRAINING_PATTERN_DISABLE:
2497                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2498                         break;
2499                 case DP_TRAINING_PATTERN_1:
2500                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2501                         break;
2502                 case DP_TRAINING_PATTERN_2:
2503                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2504                         break;
2505                 case DP_TRAINING_PATTERN_3:
2506                         DRM_ERROR("DP training pattern 3 not supported\n");
2507                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2508                         break;
2509                 }
2510
2511         } else {
2512                 if (IS_CHERRYVIEW(dev))
2513                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2514                 else
2515                         *DP &= ~DP_LINK_TRAIN_MASK;
2516
2517                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2518                 case DP_TRAINING_PATTERN_DISABLE:
2519                         *DP |= DP_LINK_TRAIN_OFF;
2520                         break;
2521                 case DP_TRAINING_PATTERN_1:
2522                         *DP |= DP_LINK_TRAIN_PAT_1;
2523                         break;
2524                 case DP_TRAINING_PATTERN_2:
2525                         *DP |= DP_LINK_TRAIN_PAT_2;
2526                         break;
2527                 case DP_TRAINING_PATTERN_3:
2528                         if (IS_CHERRYVIEW(dev)) {
2529                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2530                         } else {
2531                                 DRM_ERROR("DP training pattern 3 not supported\n");
2532                                 *DP |= DP_LINK_TRAIN_PAT_2;
2533                         }
2534                         break;
2535                 }
2536         }
2537 }
2538
2539 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2540 {
2541         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2542         struct drm_i915_private *dev_priv = dev->dev_private;
2543
2544         /* enable with pattern 1 (as per spec) */
2545         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2546                                  DP_TRAINING_PATTERN_1);
2547
2548         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2549         POSTING_READ(intel_dp->output_reg);
2550
2551         /*
2552          * Magic for VLV/CHV. We _must_ first set up the register
2553          * without actually enabling the port, and then do another
2554          * write to enable the port. Otherwise link training will
2555          * fail when the power sequencer is freshly used for this port.
2556          */
2557         intel_dp->DP |= DP_PORT_EN;
2558
2559         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2560         POSTING_READ(intel_dp->output_reg);
2561 }
2562
2563 static void intel_enable_dp(struct intel_encoder *encoder)
2564 {
2565         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2566         struct drm_device *dev = encoder->base.dev;
2567         struct drm_i915_private *dev_priv = dev->dev_private;
2568         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2569         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2570
2571         if (WARN_ON(dp_reg & DP_PORT_EN))
2572                 return;
2573
2574         pps_lock(intel_dp);
2575
2576         if (IS_VALLEYVIEW(dev))
2577                 vlv_init_panel_power_sequencer(intel_dp);
2578
2579         intel_dp_enable_port(intel_dp);
2580
2581         edp_panel_vdd_on(intel_dp);
2582         edp_panel_on(intel_dp);
2583         edp_panel_vdd_off(intel_dp, true);
2584
2585         pps_unlock(intel_dp);
2586
2587         if (IS_VALLEYVIEW(dev)) {
2588                 unsigned int lane_mask = 0x0;
2589
2590                 if (IS_CHERRYVIEW(dev))
2591                         lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2592
2593                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2594                                     lane_mask);
2595         }
2596
2597         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2598         intel_dp_start_link_train(intel_dp);
2599         intel_dp_complete_link_train(intel_dp);
2600         intel_dp_stop_link_train(intel_dp);
2601
2602         if (crtc->config->has_audio) {
2603                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2604                                  pipe_name(crtc->pipe));
2605                 intel_audio_codec_enable(encoder);
2606         }
2607 }
2608
2609 static void g4x_enable_dp(struct intel_encoder *encoder)
2610 {
2611         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2612
2613         intel_enable_dp(encoder);
2614         intel_edp_backlight_on(intel_dp);
2615 }
2616
2617 static void vlv_enable_dp(struct intel_encoder *encoder)
2618 {
2619         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2620
2621         intel_edp_backlight_on(intel_dp);
2622         intel_psr_enable(intel_dp);
2623 }
2624
2625 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2626 {
2627         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2628         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2629
2630         intel_dp_prepare(encoder);
2631
2632         /* Only ilk+ has port A */
2633         if (dport->port == PORT_A) {
2634                 ironlake_set_pll_cpu_edp(intel_dp);
2635                 ironlake_edp_pll_on(intel_dp);
2636         }
2637 }
2638
2639 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2640 {
2641         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2642         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2643         enum pipe pipe = intel_dp->pps_pipe;
2644         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2645
2646         edp_panel_vdd_off_sync(intel_dp);
2647
2648         /*
2649          * VLV seems to get confused when multiple power seqeuencers
2650          * have the same port selected (even if only one has power/vdd
2651          * enabled). The failure manifests as vlv_wait_port_ready() failing
2652          * CHV on the other hand doesn't seem to mind having the same port
2653          * selected in multiple power seqeuencers, but let's clear the
2654          * port select always when logically disconnecting a power sequencer
2655          * from a port.
2656          */
2657         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2658                       pipe_name(pipe), port_name(intel_dig_port->port));
2659         I915_WRITE(pp_on_reg, 0);
2660         POSTING_READ(pp_on_reg);
2661
2662         intel_dp->pps_pipe = INVALID_PIPE;
2663 }
2664
2665 static void vlv_steal_power_sequencer(struct drm_device *dev,
2666                                       enum pipe pipe)
2667 {
2668         struct drm_i915_private *dev_priv = dev->dev_private;
2669         struct intel_encoder *encoder;
2670
2671         lockdep_assert_held(&dev_priv->pps_mutex);
2672
2673         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2674                 return;
2675
2676         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2677                             base.head) {
2678                 struct intel_dp *intel_dp;
2679                 enum port port;
2680
2681                 if (encoder->type != INTEL_OUTPUT_EDP)
2682                         continue;
2683
2684                 intel_dp = enc_to_intel_dp(&encoder->base);
2685                 port = dp_to_dig_port(intel_dp)->port;
2686
2687                 if (intel_dp->pps_pipe != pipe)
2688                         continue;
2689
2690                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2691                               pipe_name(pipe), port_name(port));
2692
2693                 WARN(encoder->base.crtc,
2694                      "stealing pipe %c power sequencer from active eDP port %c\n",
2695                      pipe_name(pipe), port_name(port));
2696
2697                 /* make sure vdd is off before we steal it */
2698                 vlv_detach_power_sequencer(intel_dp);
2699         }
2700 }
2701
2702 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2703 {
2704         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2705         struct intel_encoder *encoder = &intel_dig_port->base;
2706         struct drm_device *dev = encoder->base.dev;
2707         struct drm_i915_private *dev_priv = dev->dev_private;
2708         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2709
2710         lockdep_assert_held(&dev_priv->pps_mutex);
2711
2712         if (!is_edp(intel_dp))
2713                 return;
2714
2715         if (intel_dp->pps_pipe == crtc->pipe)
2716                 return;
2717
2718         /*
2719          * If another power sequencer was being used on this
2720          * port previously make sure to turn off vdd there while
2721          * we still have control of it.
2722          */
2723         if (intel_dp->pps_pipe != INVALID_PIPE)
2724                 vlv_detach_power_sequencer(intel_dp);
2725
2726         /*
2727          * We may be stealing the power
2728          * sequencer from another port.
2729          */
2730         vlv_steal_power_sequencer(dev, crtc->pipe);
2731
2732         /* now it's all ours */
2733         intel_dp->pps_pipe = crtc->pipe;
2734
2735         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2736                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2737
2738         /* init power sequencer on this pipe and port */
2739         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2740         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2741 }
2742
2743 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2744 {
2745         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2746         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2747         struct drm_device *dev = encoder->base.dev;
2748         struct drm_i915_private *dev_priv = dev->dev_private;
2749         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2750         enum dpio_channel port = vlv_dport_to_channel(dport);
2751         int pipe = intel_crtc->pipe;
2752         u32 val;
2753
2754         mutex_lock(&dev_priv->sb_lock);
2755
2756         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2757         val = 0;
2758         if (pipe)
2759                 val |= (1<<21);
2760         else
2761                 val &= ~(1<<21);
2762         val |= 0x001000c4;
2763         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2764         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2765         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2766
2767         mutex_unlock(&dev_priv->sb_lock);
2768
2769         intel_enable_dp(encoder);
2770 }
2771
2772 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2773 {
2774         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2775         struct drm_device *dev = encoder->base.dev;
2776         struct drm_i915_private *dev_priv = dev->dev_private;
2777         struct intel_crtc *intel_crtc =
2778                 to_intel_crtc(encoder->base.crtc);
2779         enum dpio_channel port = vlv_dport_to_channel(dport);
2780         int pipe = intel_crtc->pipe;
2781
2782         intel_dp_prepare(encoder);
2783
2784         /* Program Tx lane resets to default */
2785         mutex_lock(&dev_priv->sb_lock);
2786         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2787                          DPIO_PCS_TX_LANE2_RESET |
2788                          DPIO_PCS_TX_LANE1_RESET);
2789         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2790                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2791                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2792                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2793                                  DPIO_PCS_CLK_SOFT_RESET);
2794
2795         /* Fix up inter-pair skew failure */
2796         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2797         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2798         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2799         mutex_unlock(&dev_priv->sb_lock);
2800 }
2801
2802 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2803 {
2804         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2805         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2806         struct drm_device *dev = encoder->base.dev;
2807         struct drm_i915_private *dev_priv = dev->dev_private;
2808         struct intel_crtc *intel_crtc =
2809                 to_intel_crtc(encoder->base.crtc);
2810         enum dpio_channel ch = vlv_dport_to_channel(dport);
2811         int pipe = intel_crtc->pipe;
2812         int data, i, stagger;
2813         u32 val;
2814
2815         mutex_lock(&dev_priv->sb_lock);
2816
2817         /* allow hardware to manage TX FIFO reset source */
2818         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2819         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2820         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2821
2822         if (intel_crtc->config->lane_count > 2) {
2823                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2824                 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2825                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2826         }
2827
2828         /* Program Tx lane latency optimal setting*/
2829         for (i = 0; i < intel_crtc->config->lane_count; i++) {
2830                 /* Set the upar bit */
2831                 if (intel_crtc->config->lane_count == 1)
2832                         data = 0x0;
2833                 else
2834                         data = (i == 1) ? 0x0 : 0x1;
2835                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2836                                 data << DPIO_UPAR_SHIFT);
2837         }
2838
2839         /* Data lane stagger programming */
2840         if (intel_crtc->config->port_clock > 270000)
2841                 stagger = 0x18;
2842         else if (intel_crtc->config->port_clock > 135000)
2843                 stagger = 0xd;
2844         else if (intel_crtc->config->port_clock > 67500)
2845                 stagger = 0x7;
2846         else if (intel_crtc->config->port_clock > 33750)
2847                 stagger = 0x4;
2848         else
2849                 stagger = 0x2;
2850
2851         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2852         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2853         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2854
2855         if (intel_crtc->config->lane_count > 2) {
2856                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2857                 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2858                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2859         }
2860
2861         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2862                        DPIO_LANESTAGGER_STRAP(stagger) |
2863                        DPIO_LANESTAGGER_STRAP_OVRD |
2864                        DPIO_TX1_STAGGER_MASK(0x1f) |
2865                        DPIO_TX1_STAGGER_MULT(6) |
2866                        DPIO_TX2_STAGGER_MULT(0));
2867
2868         if (intel_crtc->config->lane_count > 2) {
2869                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2870                                DPIO_LANESTAGGER_STRAP(stagger) |
2871                                DPIO_LANESTAGGER_STRAP_OVRD |
2872                                DPIO_TX1_STAGGER_MASK(0x1f) |
2873                                DPIO_TX1_STAGGER_MULT(7) |
2874                                DPIO_TX2_STAGGER_MULT(5));
2875         }
2876
2877         /* Deassert data lane reset */
2878         chv_data_lane_soft_reset(encoder, false);
2879
2880         mutex_unlock(&dev_priv->sb_lock);
2881
2882         intel_enable_dp(encoder);
2883
2884         /* Second common lane will stay alive on its own now */
2885         if (dport->release_cl2_override) {
2886                 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2887                 dport->release_cl2_override = false;
2888         }
2889 }
2890
2891 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2892 {
2893         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2894         struct drm_device *dev = encoder->base.dev;
2895         struct drm_i915_private *dev_priv = dev->dev_private;
2896         struct intel_crtc *intel_crtc =
2897                 to_intel_crtc(encoder->base.crtc);
2898         enum dpio_channel ch = vlv_dport_to_channel(dport);
2899         enum pipe pipe = intel_crtc->pipe;
2900         unsigned int lane_mask =
2901                 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2902         u32 val;
2903
2904         intel_dp_prepare(encoder);
2905
2906         /*
2907          * Must trick the second common lane into life.
2908          * Otherwise we can't even access the PLL.
2909          */
2910         if (ch == DPIO_CH0 && pipe == PIPE_B)
2911                 dport->release_cl2_override =
2912                         !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2913
2914         chv_phy_powergate_lanes(encoder, true, lane_mask);
2915
2916         mutex_lock(&dev_priv->sb_lock);
2917
2918         /* Assert data lane reset */
2919         chv_data_lane_soft_reset(encoder, true);
2920
2921         /* program left/right clock distribution */
2922         if (pipe != PIPE_B) {
2923                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2924                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2925                 if (ch == DPIO_CH0)
2926                         val |= CHV_BUFLEFTENA1_FORCE;
2927                 if (ch == DPIO_CH1)
2928                         val |= CHV_BUFRIGHTENA1_FORCE;
2929                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2930         } else {
2931                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2932                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2933                 if (ch == DPIO_CH0)
2934                         val |= CHV_BUFLEFTENA2_FORCE;
2935                 if (ch == DPIO_CH1)
2936                         val |= CHV_BUFRIGHTENA2_FORCE;
2937                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2938         }
2939
2940         /* program clock channel usage */
2941         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2942         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2943         if (pipe != PIPE_B)
2944                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2945         else
2946                 val |= CHV_PCS_USEDCLKCHANNEL;
2947         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2948
2949         if (intel_crtc->config->lane_count > 2) {
2950                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2951                 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2952                 if (pipe != PIPE_B)
2953                         val &= ~CHV_PCS_USEDCLKCHANNEL;
2954                 else
2955                         val |= CHV_PCS_USEDCLKCHANNEL;
2956                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2957         }
2958
2959         /*
2960          * This a a bit weird since generally CL
2961          * matches the pipe, but here we need to
2962          * pick the CL based on the port.
2963          */
2964         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2965         if (pipe != PIPE_B)
2966                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2967         else
2968                 val |= CHV_CMN_USEDCLKCHANNEL;
2969         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2970
2971         mutex_unlock(&dev_priv->sb_lock);
2972 }
2973
2974 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2975 {
2976         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2977         enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2978         u32 val;
2979
2980         mutex_lock(&dev_priv->sb_lock);
2981
2982         /* disable left/right clock distribution */
2983         if (pipe != PIPE_B) {
2984                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2985                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2986                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2987         } else {
2988                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2989                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2990                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2991         }
2992
2993         mutex_unlock(&dev_priv->sb_lock);
2994
2995         /*
2996          * Leave the power down bit cleared for at least one
2997          * lane so that chv_powergate_phy_ch() will power
2998          * on something when the channel is otherwise unused.
2999          * When the port is off and the override is removed
3000          * the lanes power down anyway, so otherwise it doesn't
3001          * really matter what the state of power down bits is
3002          * after this.
3003          */
3004         chv_phy_powergate_lanes(encoder, false, 0x0);
3005 }
3006
3007 /*
3008  * Native read with retry for link status and receiver capability reads for
3009  * cases where the sink may still be asleep.
3010  *
3011  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3012  * supposed to retry 3 times per the spec.
3013  */
3014 static ssize_t
3015 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3016                         void *buffer, size_t size)
3017 {
3018         ssize_t ret;
3019         int i;
3020
3021         /*
3022          * Sometime we just get the same incorrect byte repeated
3023          * over the entire buffer. Doing just one throw away read
3024          * initially seems to "solve" it.
3025          */
3026         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3027
3028         for (i = 0; i < 3; i++) {
3029                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3030                 if (ret == size)
3031                         return ret;
3032                 msleep(1);
3033         }
3034
3035         return ret;
3036 }
3037
3038 /*
3039  * Fetch AUX CH registers 0x202 - 0x207 which contain
3040  * link status information
3041  */
3042 static bool
3043 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3044 {
3045         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3046                                        DP_LANE0_1_STATUS,
3047                                        link_status,
3048                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3049 }
3050
3051 /* These are source-specific values. */
3052 static uint8_t
3053 intel_dp_voltage_max(struct intel_dp *intel_dp)
3054 {
3055         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3056         struct drm_i915_private *dev_priv = dev->dev_private;
3057         enum port port = dp_to_dig_port(intel_dp)->port;
3058
3059         if (IS_BROXTON(dev))
3060                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3061         else if (INTEL_INFO(dev)->gen >= 9) {
3062                 if (dev_priv->edp_low_vswing && port == PORT_A)
3063                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3064                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3065         } else if (IS_VALLEYVIEW(dev))
3066                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3067         else if (IS_GEN7(dev) && port == PORT_A)
3068                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3069         else if (HAS_PCH_CPT(dev) && port != PORT_A)
3070                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3071         else
3072                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3073 }
3074
3075 static uint8_t
3076 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3077 {
3078         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3079         enum port port = dp_to_dig_port(intel_dp)->port;
3080
3081         if (INTEL_INFO(dev)->gen >= 9) {
3082                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3083                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3084                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3085                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3086                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3087                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3088                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3089                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3090                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3091                 default:
3092                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3093                 }
3094         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3095                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3096                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3097                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3098                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3099                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3100                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3101                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3102                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3103                 default:
3104                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3105                 }
3106         } else if (IS_VALLEYVIEW(dev)) {
3107                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3108                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3109                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3110                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3111                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3112                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3113                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3114                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3115                 default:
3116                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3117                 }
3118         } else if (IS_GEN7(dev) && port == PORT_A) {
3119                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3120                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3121                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3122                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3123                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3124                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3125                 default:
3126                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3127                 }
3128         } else {
3129                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3130                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3131                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3132                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3133                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3134                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3135                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3136                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3137                 default:
3138                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3139                 }
3140         }
3141 }
3142
3143 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3144 {
3145         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3146         struct drm_i915_private *dev_priv = dev->dev_private;
3147         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3148         struct intel_crtc *intel_crtc =
3149                 to_intel_crtc(dport->base.base.crtc);
3150         unsigned long demph_reg_value, preemph_reg_value,
3151                 uniqtranscale_reg_value;
3152         uint8_t train_set = intel_dp->train_set[0];
3153         enum dpio_channel port = vlv_dport_to_channel(dport);
3154         int pipe = intel_crtc->pipe;
3155
3156         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3157         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3158                 preemph_reg_value = 0x0004000;
3159                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3160                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3161                         demph_reg_value = 0x2B405555;
3162                         uniqtranscale_reg_value = 0x552AB83A;
3163                         break;
3164                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3165                         demph_reg_value = 0x2B404040;
3166                         uniqtranscale_reg_value = 0x5548B83A;
3167                         break;
3168                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3169                         demph_reg_value = 0x2B245555;
3170                         uniqtranscale_reg_value = 0x5560B83A;
3171                         break;
3172                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3173                         demph_reg_value = 0x2B405555;
3174                         uniqtranscale_reg_value = 0x5598DA3A;
3175                         break;
3176                 default:
3177                         return 0;
3178                 }
3179                 break;
3180         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3181                 preemph_reg_value = 0x0002000;
3182                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3183                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3184                         demph_reg_value = 0x2B404040;
3185                         uniqtranscale_reg_value = 0x5552B83A;
3186                         break;
3187                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3188                         demph_reg_value = 0x2B404848;
3189                         uniqtranscale_reg_value = 0x5580B83A;
3190                         break;
3191                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3192                         demph_reg_value = 0x2B404040;
3193                         uniqtranscale_reg_value = 0x55ADDA3A;
3194                         break;
3195                 default:
3196                         return 0;
3197                 }
3198                 break;
3199         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3200                 preemph_reg_value = 0x0000000;
3201                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3202                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3203                         demph_reg_value = 0x2B305555;
3204                         uniqtranscale_reg_value = 0x5570B83A;
3205                         break;
3206                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3207                         demph_reg_value = 0x2B2B4040;
3208                         uniqtranscale_reg_value = 0x55ADDA3A;
3209                         break;
3210                 default:
3211                         return 0;
3212                 }
3213                 break;
3214         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3215                 preemph_reg_value = 0x0006000;
3216                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3217                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3218                         demph_reg_value = 0x1B405555;
3219                         uniqtranscale_reg_value = 0x55ADDA3A;
3220                         break;
3221                 default:
3222                         return 0;
3223                 }
3224                 break;
3225         default:
3226                 return 0;
3227         }
3228
3229         mutex_lock(&dev_priv->sb_lock);
3230         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3231         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3232         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3233                          uniqtranscale_reg_value);
3234         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3235         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3236         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3237         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3238         mutex_unlock(&dev_priv->sb_lock);
3239
3240         return 0;
3241 }
3242
3243 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3244 {
3245         return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3246                 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3247 }
3248
3249 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3250 {
3251         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3252         struct drm_i915_private *dev_priv = dev->dev_private;
3253         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3254         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3255         u32 deemph_reg_value, margin_reg_value, val;
3256         uint8_t train_set = intel_dp->train_set[0];
3257         enum dpio_channel ch = vlv_dport_to_channel(dport);
3258         enum pipe pipe = intel_crtc->pipe;
3259         int i;
3260
3261         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3262         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3263                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3264                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3265                         deemph_reg_value = 128;
3266                         margin_reg_value = 52;
3267                         break;
3268                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3269                         deemph_reg_value = 128;
3270                         margin_reg_value = 77;
3271                         break;
3272                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3273                         deemph_reg_value = 128;
3274                         margin_reg_value = 102;
3275                         break;
3276                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3277                         deemph_reg_value = 128;
3278                         margin_reg_value = 154;
3279                         /* FIXME extra to set for 1200 */
3280                         break;
3281                 default:
3282                         return 0;
3283                 }
3284                 break;
3285         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3286                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3287                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3288                         deemph_reg_value = 85;
3289                         margin_reg_value = 78;
3290                         break;
3291                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3292                         deemph_reg_value = 85;
3293                         margin_reg_value = 116;
3294                         break;
3295                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3296                         deemph_reg_value = 85;
3297                         margin_reg_value = 154;
3298                         break;
3299                 default:
3300                         return 0;
3301                 }
3302                 break;
3303         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3304                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3305                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3306                         deemph_reg_value = 64;
3307                         margin_reg_value = 104;
3308                         break;
3309                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3310                         deemph_reg_value = 64;
3311                         margin_reg_value = 154;
3312                         break;
3313                 default:
3314                         return 0;
3315                 }
3316                 break;
3317         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3318                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3319                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3320                         deemph_reg_value = 43;
3321                         margin_reg_value = 154;
3322                         break;
3323                 default:
3324                         return 0;
3325                 }
3326                 break;
3327         default:
3328                 return 0;
3329         }
3330
3331         mutex_lock(&dev_priv->sb_lock);
3332
3333         /* Clear calc init */
3334         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3335         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3336         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3337         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3338         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3339
3340         if (intel_crtc->config->lane_count > 2) {
3341                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3342                 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3343                 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3344                 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3345                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3346         }
3347
3348         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3349         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3350         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3351         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3352
3353         if (intel_crtc->config->lane_count > 2) {
3354                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3355                 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3356                 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3357                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3358         }
3359
3360         /* Program swing deemph */
3361         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3362                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3363                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3364                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3365                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3366         }
3367
3368         /* Program swing margin */
3369         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3370                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3371
3372                 val &= ~DPIO_SWING_MARGIN000_MASK;
3373                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3374
3375                 /*
3376                  * Supposedly this value shouldn't matter when unique transition
3377                  * scale is disabled, but in fact it does matter. Let's just
3378                  * always program the same value and hope it's OK.
3379                  */
3380                 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3381                 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3382
3383                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3384         }
3385
3386         /*
3387          * The document said it needs to set bit 27 for ch0 and bit 26
3388          * for ch1. Might be a typo in the doc.
3389          * For now, for this unique transition scale selection, set bit
3390          * 27 for ch0 and ch1.
3391          */
3392         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3393                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3394                 if (chv_need_uniq_trans_scale(train_set))
3395                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3396                 else
3397                         val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3398                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3399         }
3400
3401         /* Start swing calculation */
3402         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3403         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3404         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3405
3406         if (intel_crtc->config->lane_count > 2) {
3407                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3408                 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3409                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3410         }
3411
3412         /* LRC Bypass */
3413         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3414         val |= DPIO_LRC_BYPASS;
3415         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3416
3417         mutex_unlock(&dev_priv->sb_lock);
3418
3419         return 0;
3420 }
3421
3422 static void
3423 intel_get_adjust_train(struct intel_dp *intel_dp,
3424                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3425 {
3426         uint8_t v = 0;
3427         uint8_t p = 0;
3428         int lane;
3429         uint8_t voltage_max;
3430         uint8_t preemph_max;
3431
3432         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3433                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3434                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3435
3436                 if (this_v > v)
3437                         v = this_v;
3438                 if (this_p > p)
3439                         p = this_p;
3440         }
3441
3442         voltage_max = intel_dp_voltage_max(intel_dp);
3443         if (v >= voltage_max)
3444                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3445
3446         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3447         if (p >= preemph_max)
3448                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3449
3450         for (lane = 0; lane < 4; lane++)
3451                 intel_dp->train_set[lane] = v | p;
3452 }
3453
3454 static uint32_t
3455 gen4_signal_levels(uint8_t train_set)
3456 {
3457         uint32_t        signal_levels = 0;
3458
3459         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3460         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3461         default:
3462                 signal_levels |= DP_VOLTAGE_0_4;
3463                 break;
3464         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3465                 signal_levels |= DP_VOLTAGE_0_6;
3466                 break;
3467         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3468                 signal_levels |= DP_VOLTAGE_0_8;
3469                 break;
3470         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3471                 signal_levels |= DP_VOLTAGE_1_2;
3472                 break;
3473         }
3474         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3475         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3476         default:
3477                 signal_levels |= DP_PRE_EMPHASIS_0;
3478                 break;
3479         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3480                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3481                 break;
3482         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3483                 signal_levels |= DP_PRE_EMPHASIS_6;
3484                 break;
3485         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3486                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3487                 break;
3488         }
3489         return signal_levels;
3490 }
3491
3492 /* Gen6's DP voltage swing and pre-emphasis control */
3493 static uint32_t
3494 gen6_edp_signal_levels(uint8_t train_set)
3495 {
3496         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3497                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3498         switch (signal_levels) {
3499         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3500         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3501                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3502         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3503                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3504         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3505         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3506                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3507         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3508         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3509                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3510         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3511         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3512                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3513         default:
3514                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3515                               "0x%x\n", signal_levels);
3516                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3517         }
3518 }
3519
3520 /* Gen7's DP voltage swing and pre-emphasis control */
3521 static uint32_t
3522 gen7_edp_signal_levels(uint8_t train_set)
3523 {
3524         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3525                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3526         switch (signal_levels) {
3527         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3528                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3529         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3530                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3531         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3532                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3533
3534         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3535                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3536         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3537                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3538
3539         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3540                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3541         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3542                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3543
3544         default:
3545                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3546                               "0x%x\n", signal_levels);
3547                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3548         }
3549 }
3550
3551 /* Properly updates "DP" with the correct signal levels. */
3552 static void
3553 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3554 {
3555         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3556         enum port port = intel_dig_port->port;
3557         struct drm_device *dev = intel_dig_port->base.base.dev;
3558         uint32_t signal_levels, mask = 0;
3559         uint8_t train_set = intel_dp->train_set[0];
3560
3561         if (HAS_DDI(dev)) {
3562                 signal_levels = ddi_signal_levels(intel_dp);
3563
3564                 if (IS_BROXTON(dev))
3565                         signal_levels = 0;
3566                 else
3567                         mask = DDI_BUF_EMP_MASK;
3568         } else if (IS_CHERRYVIEW(dev)) {
3569                 signal_levels = chv_signal_levels(intel_dp);
3570         } else if (IS_VALLEYVIEW(dev)) {
3571                 signal_levels = vlv_signal_levels(intel_dp);
3572         } else if (IS_GEN7(dev) && port == PORT_A) {
3573                 signal_levels = gen7_edp_signal_levels(train_set);
3574                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3575         } else if (IS_GEN6(dev) && port == PORT_A) {
3576                 signal_levels = gen6_edp_signal_levels(train_set);
3577                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3578         } else {
3579                 signal_levels = gen4_signal_levels(train_set);
3580                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3581         }
3582
3583         if (mask)
3584                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3585
3586         DRM_DEBUG_KMS("Using vswing level %d\n",
3587                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3588         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3589                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3590                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3591
3592         *DP = (*DP & ~mask) | signal_levels;
3593 }
3594
3595 static bool
3596 intel_dp_set_link_train(struct intel_dp *intel_dp,
3597                         uint32_t *DP,
3598                         uint8_t dp_train_pat)
3599 {
3600         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3601         struct drm_i915_private *dev_priv =
3602                 to_i915(intel_dig_port->base.base.dev);
3603         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3604         int ret, len;
3605
3606         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3607
3608         I915_WRITE(intel_dp->output_reg, *DP);
3609         POSTING_READ(intel_dp->output_reg);
3610
3611         buf[0] = dp_train_pat;
3612         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3613             DP_TRAINING_PATTERN_DISABLE) {
3614                 /* don't write DP_TRAINING_LANEx_SET on disable */
3615                 len = 1;
3616         } else {
3617                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3618                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3619                 len = intel_dp->lane_count + 1;
3620         }
3621
3622         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3623                                 buf, len);
3624
3625         return ret == len;
3626 }
3627
3628 static bool
3629 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3630                         uint8_t dp_train_pat)
3631 {
3632         if (!intel_dp->train_set_valid)
3633                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3634         intel_dp_set_signal_levels(intel_dp, DP);
3635         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3636 }
3637
3638 static bool
3639 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3640                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3641 {
3642         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3643         struct drm_i915_private *dev_priv =
3644                 to_i915(intel_dig_port->base.base.dev);
3645         int ret;
3646
3647         intel_get_adjust_train(intel_dp, link_status);
3648         intel_dp_set_signal_levels(intel_dp, DP);
3649
3650         I915_WRITE(intel_dp->output_reg, *DP);
3651         POSTING_READ(intel_dp->output_reg);
3652
3653         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3654                                 intel_dp->train_set, intel_dp->lane_count);
3655
3656         return ret == intel_dp->lane_count;
3657 }
3658
3659 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3660 {
3661         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3662         struct drm_device *dev = intel_dig_port->base.base.dev;
3663         struct drm_i915_private *dev_priv = dev->dev_private;
3664         enum port port = intel_dig_port->port;
3665         uint32_t val;
3666
3667         if (!HAS_DDI(dev))
3668                 return;
3669
3670         val = I915_READ(DP_TP_CTL(port));
3671         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3672         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3673         I915_WRITE(DP_TP_CTL(port), val);
3674
3675         /*
3676          * On PORT_A we can have only eDP in SST mode. There the only reason
3677          * we need to set idle transmission mode is to work around a HW issue
3678          * where we enable the pipe while not in idle link-training mode.
3679          * In this case there is requirement to wait for a minimum number of
3680          * idle patterns to be sent.
3681          */
3682         if (port == PORT_A)
3683                 return;
3684
3685         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3686                      1))
3687                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3688 }
3689
3690 /* Enable corresponding port and start training pattern 1 */
3691 void
3692 intel_dp_start_link_train(struct intel_dp *intel_dp)
3693 {
3694         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3695         struct drm_device *dev = encoder->dev;
3696         int i;
3697         uint8_t voltage;
3698         int voltage_tries, loop_tries;
3699         uint32_t DP = intel_dp->DP;
3700         uint8_t link_config[2];
3701         uint8_t link_bw, rate_select;
3702
3703         if (HAS_DDI(dev))
3704                 intel_ddi_prepare_link_retrain(encoder);
3705
3706         intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3707                               &link_bw, &rate_select);
3708
3709         /* Write the link configuration data */
3710         link_config[0] = link_bw;
3711         link_config[1] = intel_dp->lane_count;
3712         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3713                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3714         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3715         if (intel_dp->num_sink_rates)
3716                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3717                                   &rate_select, 1);
3718
3719         link_config[0] = 0;
3720         link_config[1] = DP_SET_ANSI_8B10B;
3721         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3722
3723         DP |= DP_PORT_EN;
3724
3725         /* clock recovery */
3726         if (!intel_dp_reset_link_train(intel_dp, &DP,
3727                                        DP_TRAINING_PATTERN_1 |
3728                                        DP_LINK_SCRAMBLING_DISABLE)) {
3729                 DRM_ERROR("failed to enable link training\n");
3730                 return;
3731         }
3732
3733         voltage = 0xff;
3734         voltage_tries = 0;
3735         loop_tries = 0;
3736         for (;;) {
3737                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3738
3739                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3740                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3741                         DRM_ERROR("failed to get link status\n");
3742                         break;
3743                 }
3744
3745                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3746                         DRM_DEBUG_KMS("clock recovery OK\n");
3747                         break;
3748                 }
3749
3750                 /*
3751                  * if we used previously trained voltage and pre-emphasis values
3752                  * and we don't get clock recovery, reset link training values
3753                  */
3754                 if (intel_dp->train_set_valid) {
3755                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3756                         /* clear the flag as we are not reusing train set */
3757                         intel_dp->train_set_valid = false;
3758                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3759                                                        DP_TRAINING_PATTERN_1 |
3760                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3761                                 DRM_ERROR("failed to enable link training\n");
3762                                 return;
3763                         }
3764                         continue;
3765                 }
3766
3767                 /* Check to see if we've tried the max voltage */
3768                 for (i = 0; i < intel_dp->lane_count; i++)
3769                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3770                                 break;
3771                 if (i == intel_dp->lane_count) {
3772                         ++loop_tries;
3773                         if (loop_tries == 5) {
3774                                 DRM_ERROR("too many full retries, give up\n");
3775                                 break;
3776                         }
3777                         intel_dp_reset_link_train(intel_dp, &DP,
3778                                                   DP_TRAINING_PATTERN_1 |
3779                                                   DP_LINK_SCRAMBLING_DISABLE);
3780                         voltage_tries = 0;
3781                         continue;
3782                 }
3783
3784                 /* Check to see if we've tried the same voltage 5 times */
3785                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3786                         ++voltage_tries;
3787                         if (voltage_tries == 5) {
3788                                 DRM_ERROR("too many voltage retries, give up\n");
3789                                 break;
3790                         }
3791                 } else
3792                         voltage_tries = 0;
3793                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3794
3795                 /* Update training set as requested by target */
3796                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3797                         DRM_ERROR("failed to update link training\n");
3798                         break;
3799                 }
3800         }
3801
3802         intel_dp->DP = DP;
3803 }
3804
3805 void
3806 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3807 {
3808         bool channel_eq = false;
3809         int tries, cr_tries;
3810         uint32_t DP = intel_dp->DP;
3811         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3812
3813         /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
3814         if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
3815                 training_pattern = DP_TRAINING_PATTERN_3;
3816
3817         /* channel equalization */
3818         if (!intel_dp_set_link_train(intel_dp, &DP,
3819                                      training_pattern |
3820                                      DP_LINK_SCRAMBLING_DISABLE)) {
3821                 DRM_ERROR("failed to start channel equalization\n");
3822                 return;
3823         }
3824
3825         tries = 0;
3826         cr_tries = 0;
3827         channel_eq = false;
3828         for (;;) {
3829                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3830
3831                 if (cr_tries > 5) {
3832                         DRM_ERROR("failed to train DP, aborting\n");
3833                         break;
3834                 }
3835
3836                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3837                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3838                         DRM_ERROR("failed to get link status\n");
3839                         break;
3840                 }
3841
3842                 /* Make sure clock is still ok */
3843                 if (!drm_dp_clock_recovery_ok(link_status,
3844                                               intel_dp->lane_count)) {
3845                         intel_dp->train_set_valid = false;
3846                         intel_dp_start_link_train(intel_dp);
3847                         intel_dp_set_link_train(intel_dp, &DP,
3848                                                 training_pattern |
3849                                                 DP_LINK_SCRAMBLING_DISABLE);
3850                         cr_tries++;
3851                         continue;
3852                 }
3853
3854                 if (drm_dp_channel_eq_ok(link_status,
3855                                          intel_dp->lane_count)) {
3856                         channel_eq = true;
3857                         break;
3858                 }
3859
3860                 /* Try 5 times, then try clock recovery if that fails */
3861                 if (tries > 5) {
3862                         intel_dp->train_set_valid = false;
3863                         intel_dp_start_link_train(intel_dp);
3864                         intel_dp_set_link_train(intel_dp, &DP,
3865                                                 training_pattern |
3866                                                 DP_LINK_SCRAMBLING_DISABLE);
3867                         tries = 0;
3868                         cr_tries++;
3869                         continue;
3870                 }
3871
3872                 /* Update training set as requested by target */
3873                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3874                         DRM_ERROR("failed to update link training\n");
3875                         break;
3876                 }
3877                 ++tries;
3878         }
3879
3880         intel_dp_set_idle_link_train(intel_dp);
3881
3882         intel_dp->DP = DP;
3883
3884         if (channel_eq) {
3885                 intel_dp->train_set_valid = true;
3886                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3887         }
3888 }
3889
3890 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3891 {
3892         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3893                                 DP_TRAINING_PATTERN_DISABLE);
3894 }
3895
3896 static void
3897 intel_dp_link_down(struct intel_dp *intel_dp)
3898 {
3899         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3900         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3901         enum port port = intel_dig_port->port;
3902         struct drm_device *dev = intel_dig_port->base.base.dev;
3903         struct drm_i915_private *dev_priv = dev->dev_private;
3904         uint32_t DP = intel_dp->DP;
3905
3906         if (WARN_ON(HAS_DDI(dev)))
3907                 return;
3908
3909         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3910                 return;
3911
3912         DRM_DEBUG_KMS("\n");
3913
3914         if ((IS_GEN7(dev) && port == PORT_A) ||
3915             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3916                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3917                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3918         } else {
3919                 if (IS_CHERRYVIEW(dev))
3920                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3921                 else
3922                         DP &= ~DP_LINK_TRAIN_MASK;
3923                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3924         }
3925         I915_WRITE(intel_dp->output_reg, DP);
3926         POSTING_READ(intel_dp->output_reg);
3927
3928         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3929         I915_WRITE(intel_dp->output_reg, DP);
3930         POSTING_READ(intel_dp->output_reg);
3931
3932         /*
3933          * HW workaround for IBX, we need to move the port
3934          * to transcoder A after disabling it to allow the
3935          * matching HDMI port to be enabled on transcoder A.
3936          */
3937         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3938                 /* always enable with pattern 1 (as per spec) */
3939                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3940                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3941                 I915_WRITE(intel_dp->output_reg, DP);
3942                 POSTING_READ(intel_dp->output_reg);
3943
3944                 DP &= ~DP_PORT_EN;
3945                 I915_WRITE(intel_dp->output_reg, DP);
3946                 POSTING_READ(intel_dp->output_reg);
3947         }
3948
3949         msleep(intel_dp->panel_power_down_delay);
3950 }
3951
3952 static bool
3953 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3954 {
3955         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3956         struct drm_device *dev = dig_port->base.base.dev;
3957         struct drm_i915_private *dev_priv = dev->dev_private;
3958         uint8_t rev;
3959
3960         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3961                                     sizeof(intel_dp->dpcd)) < 0)
3962                 return false; /* aux transfer failed */
3963
3964         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3965
3966         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3967                 return false; /* DPCD not present */
3968
3969         /* Check if the panel supports PSR */
3970         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3971         if (is_edp(intel_dp)) {
3972                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3973                                         intel_dp->psr_dpcd,
3974                                         sizeof(intel_dp->psr_dpcd));
3975                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3976                         dev_priv->psr.sink_support = true;
3977                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3978                 }
3979
3980                 if (INTEL_INFO(dev)->gen >= 9 &&
3981                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3982                         uint8_t frame_sync_cap;
3983
3984                         dev_priv->psr.sink_support = true;
3985                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3986                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3987                                         &frame_sync_cap, 1);
3988                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3989                         /* PSR2 needs frame sync as well */
3990                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3991                         DRM_DEBUG_KMS("PSR2 %s on sink",
3992                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3993                 }
3994         }
3995
3996         /* Training Pattern 3 support, both source and sink */
3997         if (drm_dp_tps3_supported(intel_dp->dpcd) &&
3998             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3999                 intel_dp->use_tps3 = true;
4000                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4001         } else
4002                 intel_dp->use_tps3 = false;
4003
4004         /* Intermediate frequency support */
4005         if (is_edp(intel_dp) &&
4006             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4007             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4008             (rev >= 0x03)) { /* eDp v1.4 or higher */
4009                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4010                 int i;
4011
4012                 intel_dp_dpcd_read_wake(&intel_dp->aux,
4013                                 DP_SUPPORTED_LINK_RATES,
4014                                 sink_rates,
4015                                 sizeof(sink_rates));
4016
4017                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4018                         int val = le16_to_cpu(sink_rates[i]);
4019
4020                         if (val == 0)
4021                                 break;
4022
4023                         /* Value read is in kHz while drm clock is saved in deca-kHz */
4024                         intel_dp->sink_rates[i] = (val * 200) / 10;
4025                 }
4026                 intel_dp->num_sink_rates = i;
4027         }
4028
4029         intel_dp_print_rates(intel_dp);
4030
4031         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4032               DP_DWN_STRM_PORT_PRESENT))
4033                 return true; /* native DP sink */
4034
4035         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4036                 return true; /* no per-port downstream info */
4037
4038         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4039                                     intel_dp->downstream_ports,
4040                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
4041                 return false; /* downstream port status fetch failed */
4042
4043         return true;
4044 }
4045
4046 static void
4047 intel_dp_probe_oui(struct intel_dp *intel_dp)
4048 {
4049         u8 buf[3];
4050
4051         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4052                 return;
4053
4054         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4055                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4056                               buf[0], buf[1], buf[2]);
4057
4058         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4059                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4060                               buf[0], buf[1], buf[2]);
4061 }
4062
4063 static bool
4064 intel_dp_probe_mst(struct intel_dp *intel_dp)
4065 {
4066         u8 buf[1];
4067
4068         if (!intel_dp->can_mst)
4069                 return false;
4070
4071         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4072                 return false;
4073
4074         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4075                 if (buf[0] & DP_MST_CAP) {
4076                         DRM_DEBUG_KMS("Sink is MST capable\n");
4077                         intel_dp->is_mst = true;
4078                 } else {
4079                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4080                         intel_dp->is_mst = false;
4081                 }
4082         }
4083
4084         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4085         return intel_dp->is_mst;
4086 }
4087
4088 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4089 {
4090         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4091         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4092         u8 buf;
4093         int ret = 0;
4094
4095         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4096                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4097                 ret = -EIO;
4098                 goto out;
4099         }
4100
4101         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4102                                buf & ~DP_TEST_SINK_START) < 0) {
4103                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4104                 ret = -EIO;
4105                 goto out;
4106         }
4107
4108         intel_dp->sink_crc.started = false;
4109  out:
4110         hsw_enable_ips(intel_crtc);
4111         return ret;
4112 }
4113
4114 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4115 {
4116         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4117         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4118         u8 buf;
4119         int ret;
4120
4121         if (intel_dp->sink_crc.started) {
4122                 ret = intel_dp_sink_crc_stop(intel_dp);
4123                 if (ret)
4124                         return ret;
4125         }
4126
4127         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4128                 return -EIO;
4129
4130         if (!(buf & DP_TEST_CRC_SUPPORTED))
4131                 return -ENOTTY;
4132
4133         intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4134
4135         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4136                 return -EIO;
4137
4138         hsw_disable_ips(intel_crtc);
4139
4140         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4141                                buf | DP_TEST_SINK_START) < 0) {
4142                 hsw_enable_ips(intel_crtc);
4143                 return -EIO;
4144         }
4145
4146         intel_dp->sink_crc.started = true;
4147         return 0;
4148 }
4149
4150 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4151 {
4152         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4153         struct drm_device *dev = dig_port->base.base.dev;
4154         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4155         u8 buf;
4156         int count, ret;
4157         int attempts = 6;
4158         bool old_equal_new;
4159
4160         ret = intel_dp_sink_crc_start(intel_dp);
4161         if (ret)
4162                 return ret;
4163
4164         do {
4165                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4166
4167                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4168                                       DP_TEST_SINK_MISC, &buf) < 0) {
4169                         ret = -EIO;
4170                         goto stop;
4171                 }
4172                 count = buf & DP_TEST_COUNT_MASK;
4173
4174                 /*
4175                  * Count might be reset during the loop. In this case
4176                  * last known count needs to be reset as well.
4177                  */
4178                 if (count == 0)
4179                         intel_dp->sink_crc.last_count = 0;
4180
4181                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4182                         ret = -EIO;
4183                         goto stop;
4184                 }
4185
4186                 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4187                                  !memcmp(intel_dp->sink_crc.last_crc, crc,
4188                                          6 * sizeof(u8)));
4189
4190         } while (--attempts && (count == 0 || old_equal_new));
4191
4192         intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4193         memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4194
4195         if (attempts == 0) {
4196                 if (old_equal_new) {
4197                         DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4198                 } else {
4199                         DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4200                         ret = -ETIMEDOUT;
4201                         goto stop;
4202                 }
4203         }
4204
4205 stop:
4206         intel_dp_sink_crc_stop(intel_dp);
4207         return ret;
4208 }
4209
4210 static bool
4211 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4212 {
4213         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4214                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4215                                        sink_irq_vector, 1) == 1;
4216 }
4217
4218 static bool
4219 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4220 {
4221         int ret;
4222
4223         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4224                                              DP_SINK_COUNT_ESI,
4225                                              sink_irq_vector, 14);
4226         if (ret != 14)
4227                 return false;
4228
4229         return true;
4230 }
4231
4232 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4233 {
4234         uint8_t test_result = DP_TEST_ACK;
4235         return test_result;
4236 }
4237
4238 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4239 {
4240         uint8_t test_result = DP_TEST_NAK;
4241         return test_result;
4242 }
4243
4244 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4245 {
4246         uint8_t test_result = DP_TEST_NAK;
4247         struct intel_connector *intel_connector = intel_dp->attached_connector;
4248         struct drm_connector *connector = &intel_connector->base;
4249
4250         if (intel_connector->detect_edid == NULL ||
4251             connector->edid_corrupt ||
4252             intel_dp->aux.i2c_defer_count > 6) {
4253                 /* Check EDID read for NACKs, DEFERs and corruption
4254                  * (DP CTS 1.2 Core r1.1)
4255                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4256                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4257                  *    4.2.2.6 : EDID corruption detected
4258                  * Use failsafe mode for all cases
4259                  */
4260                 if (intel_dp->aux.i2c_nack_count > 0 ||
4261                         intel_dp->aux.i2c_defer_count > 0)
4262                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4263                                       intel_dp->aux.i2c_nack_count,
4264                                       intel_dp->aux.i2c_defer_count);
4265                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4266         } else {
4267                 struct edid *block = intel_connector->detect_edid;
4268
4269                 /* We have to write the checksum
4270                  * of the last block read
4271                  */
4272                 block += intel_connector->detect_edid->extensions;
4273
4274                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4275                                         DP_TEST_EDID_CHECKSUM,
4276                                         &block->checksum,
4277                                         1))
4278                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4279
4280                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4281                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4282         }
4283
4284         /* Set test active flag here so userspace doesn't interrupt things */
4285         intel_dp->compliance_test_active = 1;
4286
4287         return test_result;
4288 }
4289
4290 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4291 {
4292         uint8_t test_result = DP_TEST_NAK;
4293         return test_result;
4294 }
4295
4296 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4297 {
4298         uint8_t response = DP_TEST_NAK;
4299         uint8_t rxdata = 0;
4300         int status = 0;
4301
4302         intel_dp->compliance_test_active = 0;
4303         intel_dp->compliance_test_type = 0;
4304         intel_dp->compliance_test_data = 0;
4305
4306         intel_dp->aux.i2c_nack_count = 0;
4307         intel_dp->aux.i2c_defer_count = 0;
4308
4309         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4310         if (status <= 0) {
4311                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4312                 goto update_status;
4313         }
4314
4315         switch (rxdata) {
4316         case DP_TEST_LINK_TRAINING:
4317                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4318                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4319                 response = intel_dp_autotest_link_training(intel_dp);
4320                 break;
4321         case DP_TEST_LINK_VIDEO_PATTERN:
4322                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4323                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4324                 response = intel_dp_autotest_video_pattern(intel_dp);
4325                 break;
4326         case DP_TEST_LINK_EDID_READ:
4327                 DRM_DEBUG_KMS("EDID test requested\n");
4328                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4329                 response = intel_dp_autotest_edid(intel_dp);
4330                 break;
4331         case DP_TEST_LINK_PHY_TEST_PATTERN:
4332                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4333                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4334                 response = intel_dp_autotest_phy_pattern(intel_dp);
4335                 break;
4336         default:
4337                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4338                 break;
4339         }
4340
4341 update_status:
4342         status = drm_dp_dpcd_write(&intel_dp->aux,
4343                                    DP_TEST_RESPONSE,
4344                                    &response, 1);
4345         if (status <= 0)
4346                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4347 }
4348
4349 static int
4350 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4351 {
4352         bool bret;
4353
4354         if (intel_dp->is_mst) {
4355                 u8 esi[16] = { 0 };
4356                 int ret = 0;
4357                 int retry;
4358                 bool handled;
4359                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4360 go_again:
4361                 if (bret == true) {
4362
4363                         /* check link status - esi[10] = 0x200c */
4364                         if (intel_dp->active_mst_links &&
4365                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4366                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4367                                 intel_dp_start_link_train(intel_dp);
4368                                 intel_dp_complete_link_train(intel_dp);
4369                                 intel_dp_stop_link_train(intel_dp);
4370                         }
4371
4372                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4373                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4374
4375                         if (handled) {
4376                                 for (retry = 0; retry < 3; retry++) {
4377                                         int wret;
4378                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4379                                                                  DP_SINK_COUNT_ESI+1,
4380                                                                  &esi[1], 3);
4381                                         if (wret == 3) {
4382                                                 break;
4383                                         }
4384                                 }
4385
4386                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4387                                 if (bret == true) {
4388                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4389                                         goto go_again;
4390                                 }
4391                         } else
4392                                 ret = 0;
4393
4394                         return ret;
4395                 } else {
4396                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4397                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4398                         intel_dp->is_mst = false;
4399                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4400                         /* send a hotplug event */
4401                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4402                 }
4403         }
4404         return -EINVAL;
4405 }
4406
4407 /*
4408  * According to DP spec
4409  * 5.1.2:
4410  *  1. Read DPCD
4411  *  2. Configure link according to Receiver Capabilities
4412  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4413  *  4. Check link status on receipt of hot-plug interrupt
4414  */
4415 static void
4416 intel_dp_check_link_status(struct intel_dp *intel_dp)
4417 {
4418         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4419         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4420         u8 sink_irq_vector;
4421         u8 link_status[DP_LINK_STATUS_SIZE];
4422
4423         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4424
4425         if (!intel_encoder->base.crtc)
4426                 return;
4427
4428         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4429                 return;
4430
4431         /* Try to read receiver status if the link appears to be up */
4432         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4433                 return;
4434         }
4435
4436         /* Now read the DPCD to see if it's actually running */
4437         if (!intel_dp_get_dpcd(intel_dp)) {
4438                 return;
4439         }
4440
4441         /* Try to read the source of the interrupt */
4442         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4443             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4444                 /* Clear interrupt source */
4445                 drm_dp_dpcd_writeb(&intel_dp->aux,
4446                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4447                                    sink_irq_vector);
4448
4449                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4450                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4451                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4452                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4453         }
4454
4455         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4456                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4457                               intel_encoder->base.name);
4458                 intel_dp_start_link_train(intel_dp);
4459                 intel_dp_complete_link_train(intel_dp);
4460                 intel_dp_stop_link_train(intel_dp);
4461         }
4462 }
4463
4464 /* XXX this is probably wrong for multiple downstream ports */
4465 static enum drm_connector_status
4466 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4467 {
4468         uint8_t *dpcd = intel_dp->dpcd;
4469         uint8_t type;
4470
4471         if (!intel_dp_get_dpcd(intel_dp))
4472                 return connector_status_disconnected;
4473
4474         /* if there's no downstream port, we're done */
4475         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4476                 return connector_status_connected;
4477
4478         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4479         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4480             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4481                 uint8_t reg;
4482
4483                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4484                                             &reg, 1) < 0)
4485                         return connector_status_unknown;
4486
4487                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4488                                               : connector_status_disconnected;
4489         }
4490
4491         /* If no HPD, poke DDC gently */
4492         if (drm_probe_ddc(&intel_dp->aux.ddc))
4493                 return connector_status_connected;
4494
4495         /* Well we tried, say unknown for unreliable port types */
4496         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4497                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4498                 if (type == DP_DS_PORT_TYPE_VGA ||
4499                     type == DP_DS_PORT_TYPE_NON_EDID)
4500                         return connector_status_unknown;
4501         } else {
4502                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4503                         DP_DWN_STRM_PORT_TYPE_MASK;
4504                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4505                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4506                         return connector_status_unknown;
4507         }
4508
4509         /* Anything else is out of spec, warn and ignore */
4510         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4511         return connector_status_disconnected;
4512 }
4513
4514 static enum drm_connector_status
4515 edp_detect(struct intel_dp *intel_dp)
4516 {
4517         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4518         enum drm_connector_status status;
4519
4520         status = intel_panel_detect(dev);
4521         if (status == connector_status_unknown)
4522                 status = connector_status_connected;
4523
4524         return status;
4525 }
4526
4527 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4528                                        struct intel_digital_port *port)
4529 {
4530         u32 bit;
4531
4532         switch (port->port) {
4533         case PORT_A:
4534                 return true;
4535         case PORT_B:
4536                 bit = SDE_PORTB_HOTPLUG;
4537                 break;
4538         case PORT_C:
4539                 bit = SDE_PORTC_HOTPLUG;
4540                 break;
4541         case PORT_D:
4542                 bit = SDE_PORTD_HOTPLUG;
4543                 break;
4544         default:
4545                 MISSING_CASE(port->port);
4546                 return false;
4547         }
4548
4549         return I915_READ(SDEISR) & bit;
4550 }
4551
4552 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4553                                        struct intel_digital_port *port)
4554 {
4555         u32 bit;
4556
4557         switch (port->port) {
4558         case PORT_A:
4559                 return true;
4560         case PORT_B:
4561                 bit = SDE_PORTB_HOTPLUG_CPT;
4562                 break;
4563         case PORT_C:
4564                 bit = SDE_PORTC_HOTPLUG_CPT;
4565                 break;
4566         case PORT_D:
4567                 bit = SDE_PORTD_HOTPLUG_CPT;
4568                 break;
4569         default:
4570                 MISSING_CASE(port->port);
4571                 return false;
4572         }
4573
4574         return I915_READ(SDEISR) & bit;
4575 }
4576
4577 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4578                                        struct intel_digital_port *port)
4579 {
4580         u32 bit;
4581
4582         switch (port->port) {
4583         case PORT_B:
4584                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4585                 break;
4586         case PORT_C:
4587                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4588                 break;
4589         case PORT_D:
4590                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4591                 break;
4592         default:
4593                 MISSING_CASE(port->port);
4594                 return false;
4595         }
4596
4597         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4598 }
4599
4600 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4601                                        struct intel_digital_port *port)
4602 {
4603         u32 bit;
4604
4605         switch (port->port) {
4606         case PORT_B:
4607                 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4608                 break;
4609         case PORT_C:
4610                 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4611                 break;
4612         case PORT_D:
4613                 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4614                 break;
4615         default:
4616                 MISSING_CASE(port->port);
4617                 return false;
4618         }
4619
4620         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4621 }
4622
4623 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4624                                        struct intel_digital_port *port)
4625 {
4626         u32 bit;
4627
4628         switch (port->port) {
4629         case PORT_A:
4630                 bit = BXT_DE_PORT_HP_DDIA;
4631                 break;
4632         case PORT_B:
4633                 bit = BXT_DE_PORT_HP_DDIB;
4634                 break;
4635         case PORT_C:
4636                 bit = BXT_DE_PORT_HP_DDIC;
4637                 break;
4638         default:
4639                 MISSING_CASE(port->port);
4640                 return false;
4641         }
4642
4643         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4644 }
4645
4646 /*
4647  * intel_digital_port_connected - is the specified port connected?
4648  * @dev_priv: i915 private structure
4649  * @port: the port to test
4650  *
4651  * Return %true if @port is connected, %false otherwise.
4652  */
4653 static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4654                                          struct intel_digital_port *port)
4655 {
4656         if (HAS_PCH_IBX(dev_priv))
4657                 return ibx_digital_port_connected(dev_priv, port);
4658         if (HAS_PCH_SPLIT(dev_priv))
4659                 return cpt_digital_port_connected(dev_priv, port);
4660         else if (IS_BROXTON(dev_priv))
4661                 return bxt_digital_port_connected(dev_priv, port);
4662         else if (IS_VALLEYVIEW(dev_priv))
4663                 return vlv_digital_port_connected(dev_priv, port);
4664         else
4665                 return g4x_digital_port_connected(dev_priv, port);
4666 }
4667
4668 static enum drm_connector_status
4669 ironlake_dp_detect(struct intel_dp *intel_dp)
4670 {
4671         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4672         struct drm_i915_private *dev_priv = dev->dev_private;
4673         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4674
4675         if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4676                 return connector_status_disconnected;
4677
4678         return intel_dp_detect_dpcd(intel_dp);
4679 }
4680
4681 static enum drm_connector_status
4682 g4x_dp_detect(struct intel_dp *intel_dp)
4683 {
4684         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4685         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4686
4687         /* Can't disconnect eDP, but you can close the lid... */
4688         if (is_edp(intel_dp)) {
4689                 enum drm_connector_status status;
4690
4691                 status = intel_panel_detect(dev);
4692                 if (status == connector_status_unknown)
4693                         status = connector_status_connected;
4694                 return status;
4695         }
4696
4697         if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4698                 return connector_status_disconnected;
4699
4700         return intel_dp_detect_dpcd(intel_dp);
4701 }
4702
4703 static struct edid *
4704 intel_dp_get_edid(struct intel_dp *intel_dp)
4705 {
4706         struct intel_connector *intel_connector = intel_dp->attached_connector;
4707
4708         /* use cached edid if we have one */
4709         if (intel_connector->edid) {
4710                 /* invalid edid */
4711                 if (IS_ERR(intel_connector->edid))
4712                         return NULL;
4713
4714                 return drm_edid_duplicate(intel_connector->edid);
4715         } else
4716                 return drm_get_edid(&intel_connector->base,
4717                                     &intel_dp->aux.ddc);
4718 }
4719
4720 static void
4721 intel_dp_set_edid(struct intel_dp *intel_dp)
4722 {
4723         struct intel_connector *intel_connector = intel_dp->attached_connector;
4724         struct edid *edid;
4725
4726         edid = intel_dp_get_edid(intel_dp);
4727         intel_connector->detect_edid = edid;
4728
4729         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4730                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4731         else
4732                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4733 }
4734
4735 static void
4736 intel_dp_unset_edid(struct intel_dp *intel_dp)
4737 {
4738         struct intel_connector *intel_connector = intel_dp->attached_connector;
4739
4740         kfree(intel_connector->detect_edid);
4741         intel_connector->detect_edid = NULL;
4742
4743         intel_dp->has_audio = false;
4744 }
4745
4746 static enum intel_display_power_domain
4747 intel_dp_power_get(struct intel_dp *dp)
4748 {
4749         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4750         enum intel_display_power_domain power_domain;
4751
4752         power_domain = intel_display_port_power_domain(encoder);
4753         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4754
4755         return power_domain;
4756 }
4757
4758 static void
4759 intel_dp_power_put(struct intel_dp *dp,
4760                    enum intel_display_power_domain power_domain)
4761 {
4762         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4763         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4764 }
4765
4766 static enum drm_connector_status
4767 intel_dp_detect(struct drm_connector *connector, bool force)
4768 {
4769         struct intel_dp *intel_dp = intel_attached_dp(connector);
4770         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4771         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4772         struct drm_device *dev = connector->dev;
4773         enum drm_connector_status status;
4774         enum intel_display_power_domain power_domain;
4775         bool ret;
4776         u8 sink_irq_vector;
4777
4778         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4779                       connector->base.id, connector->name);
4780         intel_dp_unset_edid(intel_dp);
4781
4782         if (intel_dp->is_mst) {
4783                 /* MST devices are disconnected from a monitor POV */
4784                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4785                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4786                 return connector_status_disconnected;
4787         }
4788
4789         power_domain = intel_dp_power_get(intel_dp);
4790
4791         /* Can't disconnect eDP, but you can close the lid... */
4792         if (is_edp(intel_dp))
4793                 status = edp_detect(intel_dp);
4794         else if (HAS_PCH_SPLIT(dev))
4795                 status = ironlake_dp_detect(intel_dp);
4796         else
4797                 status = g4x_dp_detect(intel_dp);
4798         if (status != connector_status_connected)
4799                 goto out;
4800
4801         intel_dp_probe_oui(intel_dp);
4802
4803         ret = intel_dp_probe_mst(intel_dp);
4804         if (ret) {
4805                 /* if we are in MST mode then this connector
4806                    won't appear connected or have anything with EDID on it */
4807                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4808                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4809                 status = connector_status_disconnected;
4810                 goto out;
4811         }
4812
4813         intel_dp_set_edid(intel_dp);
4814
4815         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4816                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4817         status = connector_status_connected;
4818
4819         /* Try to read the source of the interrupt */
4820         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4821             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4822                 /* Clear interrupt source */
4823                 drm_dp_dpcd_writeb(&intel_dp->aux,
4824                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4825                                    sink_irq_vector);
4826
4827                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4828                         intel_dp_handle_test_request(intel_dp);
4829                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4830                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4831         }
4832
4833 out:
4834         intel_dp_power_put(intel_dp, power_domain);
4835         return status;
4836 }
4837
4838 static void
4839 intel_dp_force(struct drm_connector *connector)
4840 {
4841         struct intel_dp *intel_dp = intel_attached_dp(connector);
4842         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4843         enum intel_display_power_domain power_domain;
4844
4845         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4846                       connector->base.id, connector->name);
4847         intel_dp_unset_edid(intel_dp);
4848
4849         if (connector->status != connector_status_connected)
4850                 return;
4851
4852         power_domain = intel_dp_power_get(intel_dp);
4853
4854         intel_dp_set_edid(intel_dp);
4855
4856         intel_dp_power_put(intel_dp, power_domain);
4857
4858         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4859                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4860 }
4861
4862 static int intel_dp_get_modes(struct drm_connector *connector)
4863 {
4864         struct intel_connector *intel_connector = to_intel_connector(connector);
4865         struct edid *edid;
4866
4867         edid = intel_connector->detect_edid;
4868         if (edid) {
4869                 int ret = intel_connector_update_modes(connector, edid);
4870                 if (ret)
4871                         return ret;
4872         }
4873
4874         /* if eDP has no EDID, fall back to fixed mode */
4875         if (is_edp(intel_attached_dp(connector)) &&
4876             intel_connector->panel.fixed_mode) {
4877                 struct drm_display_mode *mode;
4878
4879                 mode = drm_mode_duplicate(connector->dev,
4880                                           intel_connector->panel.fixed_mode);
4881                 if (mode) {
4882                         drm_mode_probed_add(connector, mode);
4883                         return 1;
4884                 }
4885         }
4886
4887         return 0;
4888 }
4889
4890 static bool
4891 intel_dp_detect_audio(struct drm_connector *connector)
4892 {
4893         bool has_audio = false;
4894         struct edid *edid;
4895
4896         edid = to_intel_connector(connector)->detect_edid;
4897         if (edid)
4898                 has_audio = drm_detect_monitor_audio(edid);
4899
4900         return has_audio;
4901 }
4902
4903 static int
4904 intel_dp_set_property(struct drm_connector *connector,
4905                       struct drm_property *property,
4906                       uint64_t val)
4907 {
4908         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4909         struct intel_connector *intel_connector = to_intel_connector(connector);
4910         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4911         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4912         int ret;
4913
4914         ret = drm_object_property_set_value(&connector->base, property, val);
4915         if (ret)
4916                 return ret;
4917
4918         if (property == dev_priv->force_audio_property) {
4919                 int i = val;
4920                 bool has_audio;
4921
4922                 if (i == intel_dp->force_audio)
4923                         return 0;
4924
4925                 intel_dp->force_audio = i;
4926
4927                 if (i == HDMI_AUDIO_AUTO)
4928                         has_audio = intel_dp_detect_audio(connector);
4929                 else
4930                         has_audio = (i == HDMI_AUDIO_ON);
4931
4932                 if (has_audio == intel_dp->has_audio)
4933                         return 0;
4934
4935                 intel_dp->has_audio = has_audio;
4936                 goto done;
4937         }
4938
4939         if (property == dev_priv->broadcast_rgb_property) {
4940                 bool old_auto = intel_dp->color_range_auto;
4941                 bool old_range = intel_dp->limited_color_range;
4942
4943                 switch (val) {
4944                 case INTEL_BROADCAST_RGB_AUTO:
4945                         intel_dp->color_range_auto = true;
4946                         break;
4947                 case INTEL_BROADCAST_RGB_FULL:
4948                         intel_dp->color_range_auto = false;
4949                         intel_dp->limited_color_range = false;
4950                         break;
4951                 case INTEL_BROADCAST_RGB_LIMITED:
4952                         intel_dp->color_range_auto = false;
4953                         intel_dp->limited_color_range = true;
4954                         break;
4955                 default:
4956                         return -EINVAL;
4957                 }
4958
4959                 if (old_auto == intel_dp->color_range_auto &&
4960                     old_range == intel_dp->limited_color_range)
4961                         return 0;
4962
4963                 goto done;
4964         }
4965
4966         if (is_edp(intel_dp) &&
4967             property == connector->dev->mode_config.scaling_mode_property) {
4968                 if (val == DRM_MODE_SCALE_NONE) {
4969                         DRM_DEBUG_KMS("no scaling not supported\n");
4970                         return -EINVAL;
4971                 }
4972
4973                 if (intel_connector->panel.fitting_mode == val) {
4974                         /* the eDP scaling property is not changed */
4975                         return 0;
4976                 }
4977                 intel_connector->panel.fitting_mode = val;
4978
4979                 goto done;
4980         }
4981
4982         return -EINVAL;
4983
4984 done:
4985         if (intel_encoder->base.crtc)
4986                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4987
4988         return 0;
4989 }
4990
4991 static void
4992 intel_dp_connector_destroy(struct drm_connector *connector)
4993 {
4994         struct intel_connector *intel_connector = to_intel_connector(connector);
4995
4996         kfree(intel_connector->detect_edid);
4997
4998         if (!IS_ERR_OR_NULL(intel_connector->edid))
4999                 kfree(intel_connector->edid);
5000
5001         /* Can't call is_edp() since the encoder may have been destroyed
5002          * already. */
5003         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5004                 intel_panel_fini(&intel_connector->panel);
5005
5006         drm_connector_cleanup(connector);
5007         kfree(connector);
5008 }
5009
5010 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5011 {
5012         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5013         struct intel_dp *intel_dp = &intel_dig_port->dp;
5014
5015         drm_dp_aux_unregister(&intel_dp->aux);
5016         intel_dp_mst_encoder_cleanup(intel_dig_port);
5017         if (is_edp(intel_dp)) {
5018                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5019                 /*
5020                  * vdd might still be enabled do to the delayed vdd off.
5021                  * Make sure vdd is actually turned off here.
5022                  */
5023                 pps_lock(intel_dp);
5024                 edp_panel_vdd_off_sync(intel_dp);
5025                 pps_unlock(intel_dp);
5026
5027                 if (intel_dp->edp_notifier.notifier_call) {
5028                         unregister_reboot_notifier(&intel_dp->edp_notifier);
5029                         intel_dp->edp_notifier.notifier_call = NULL;
5030                 }
5031         }
5032         drm_encoder_cleanup(encoder);
5033         kfree(intel_dig_port);
5034 }
5035
5036 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5037 {
5038         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5039
5040         if (!is_edp(intel_dp))
5041                 return;
5042
5043         /*
5044          * vdd might still be enabled do to the delayed vdd off.
5045          * Make sure vdd is actually turned off here.
5046          */
5047         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5048         pps_lock(intel_dp);
5049         edp_panel_vdd_off_sync(intel_dp);
5050         pps_unlock(intel_dp);
5051 }
5052
5053 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5054 {
5055         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5056         struct drm_device *dev = intel_dig_port->base.base.dev;
5057         struct drm_i915_private *dev_priv = dev->dev_private;
5058         enum intel_display_power_domain power_domain;
5059
5060         lockdep_assert_held(&dev_priv->pps_mutex);
5061
5062         if (!edp_have_panel_vdd(intel_dp))
5063                 return;
5064
5065         /*
5066          * The VDD bit needs a power domain reference, so if the bit is
5067          * already enabled when we boot or resume, grab this reference and
5068          * schedule a vdd off, so we don't hold on to the reference
5069          * indefinitely.
5070          */
5071         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5072         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5073         intel_display_power_get(dev_priv, power_domain);
5074
5075         edp_panel_vdd_schedule_off(intel_dp);
5076 }
5077
5078 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5079 {
5080         struct intel_dp *intel_dp;
5081
5082         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5083                 return;
5084
5085         intel_dp = enc_to_intel_dp(encoder);
5086
5087         pps_lock(intel_dp);
5088
5089         /*
5090          * Read out the current power sequencer assignment,
5091          * in case the BIOS did something with it.
5092          */
5093         if (IS_VALLEYVIEW(encoder->dev))
5094                 vlv_initial_power_sequencer_setup(intel_dp);
5095
5096         intel_edp_panel_vdd_sanitize(intel_dp);
5097
5098         pps_unlock(intel_dp);
5099 }
5100
5101 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5102         .dpms = drm_atomic_helper_connector_dpms,
5103         .detect = intel_dp_detect,
5104         .force = intel_dp_force,
5105         .fill_modes = drm_helper_probe_single_connector_modes,
5106         .set_property = intel_dp_set_property,
5107         .atomic_get_property = intel_connector_atomic_get_property,
5108         .destroy = intel_dp_connector_destroy,
5109         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5110         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5111 };
5112
5113 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5114         .get_modes = intel_dp_get_modes,
5115         .mode_valid = intel_dp_mode_valid,
5116         .best_encoder = intel_best_encoder,
5117 };
5118
5119 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5120         .reset = intel_dp_encoder_reset,
5121         .destroy = intel_dp_encoder_destroy,
5122 };
5123
5124 enum irqreturn
5125 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5126 {
5127         struct intel_dp *intel_dp = &intel_dig_port->dp;
5128         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5129         struct drm_device *dev = intel_dig_port->base.base.dev;
5130         struct drm_i915_private *dev_priv = dev->dev_private;
5131         enum intel_display_power_domain power_domain;
5132         enum irqreturn ret = IRQ_NONE;
5133
5134         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5135                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5136
5137         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5138                 /*
5139                  * vdd off can generate a long pulse on eDP which
5140                  * would require vdd on to handle it, and thus we
5141                  * would end up in an endless cycle of
5142                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5143                  */
5144                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5145                               port_name(intel_dig_port->port));
5146                 return IRQ_HANDLED;
5147         }
5148
5149         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5150                       port_name(intel_dig_port->port),
5151                       long_hpd ? "long" : "short");
5152
5153         power_domain = intel_display_port_power_domain(intel_encoder);
5154         intel_display_power_get(dev_priv, power_domain);
5155
5156         if (long_hpd) {
5157                 /* indicate that we need to restart link training */
5158                 intel_dp->train_set_valid = false;
5159
5160                 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5161                         goto mst_fail;
5162
5163                 if (!intel_dp_get_dpcd(intel_dp)) {
5164                         goto mst_fail;
5165                 }
5166
5167                 intel_dp_probe_oui(intel_dp);
5168
5169                 if (!intel_dp_probe_mst(intel_dp))
5170                         goto mst_fail;
5171
5172         } else {
5173                 if (intel_dp->is_mst) {
5174                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5175                                 goto mst_fail;
5176                 }
5177
5178                 if (!intel_dp->is_mst) {
5179                         /*
5180                          * we'll check the link status via the normal hot plug path later -
5181                          * but for short hpds we should check it now
5182                          */
5183                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5184                         intel_dp_check_link_status(intel_dp);
5185                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
5186                 }
5187         }
5188
5189         ret = IRQ_HANDLED;
5190
5191         goto put_power;
5192 mst_fail:
5193         /* if we were in MST mode, and device is not there get out of MST mode */
5194         if (intel_dp->is_mst) {
5195                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5196                 intel_dp->is_mst = false;
5197                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5198         }
5199 put_power:
5200         intel_display_power_put(dev_priv, power_domain);
5201
5202         return ret;
5203 }
5204
5205 /* Return which DP Port should be selected for Transcoder DP control */
5206 int
5207 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5208 {
5209         struct drm_device *dev = crtc->dev;
5210         struct intel_encoder *intel_encoder;
5211         struct intel_dp *intel_dp;
5212
5213         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5214                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5215
5216                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5217                     intel_encoder->type == INTEL_OUTPUT_EDP)
5218                         return intel_dp->output_reg;
5219         }
5220
5221         return -1;
5222 }
5223
5224 /* check the VBT to see whether the eDP is on DP-D port */
5225 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5226 {
5227         struct drm_i915_private *dev_priv = dev->dev_private;
5228         union child_device_config *p_child;
5229         int i;
5230         static const short port_mapping[] = {
5231                 [PORT_B] = PORT_IDPB,
5232                 [PORT_C] = PORT_IDPC,
5233                 [PORT_D] = PORT_IDPD,
5234         };
5235
5236         if (port == PORT_A)
5237                 return true;
5238
5239         if (!dev_priv->vbt.child_dev_num)
5240                 return false;
5241
5242         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5243                 p_child = dev_priv->vbt.child_dev + i;
5244
5245                 if (p_child->common.dvo_port == port_mapping[port] &&
5246                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5247                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5248                         return true;
5249         }
5250         return false;
5251 }
5252
5253 void
5254 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5255 {
5256         struct intel_connector *intel_connector = to_intel_connector(connector);
5257
5258         intel_attach_force_audio_property(connector);
5259         intel_attach_broadcast_rgb_property(connector);
5260         intel_dp->color_range_auto = true;
5261
5262         if (is_edp(intel_dp)) {
5263                 drm_mode_create_scaling_mode_property(connector->dev);
5264                 drm_object_attach_property(
5265                         &connector->base,
5266                         connector->dev->mode_config.scaling_mode_property,
5267                         DRM_MODE_SCALE_ASPECT);
5268                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5269         }
5270 }
5271
5272 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5273 {
5274         intel_dp->last_power_cycle = jiffies;
5275         intel_dp->last_power_on = jiffies;
5276         intel_dp->last_backlight_off = jiffies;
5277 }
5278
5279 static void
5280 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5281                                     struct intel_dp *intel_dp)
5282 {
5283         struct drm_i915_private *dev_priv = dev->dev_private;
5284         struct edp_power_seq cur, vbt, spec,
5285                 *final = &intel_dp->pps_delays;
5286         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5287         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5288
5289         lockdep_assert_held(&dev_priv->pps_mutex);
5290
5291         /* already initialized? */
5292         if (final->t11_t12 != 0)
5293                 return;
5294
5295         if (IS_BROXTON(dev)) {
5296                 /*
5297                  * TODO: BXT has 2 sets of PPS registers.
5298                  * Correct Register for Broxton need to be identified
5299                  * using VBT. hardcoding for now
5300                  */
5301                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5302                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5303                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5304         } else if (HAS_PCH_SPLIT(dev)) {
5305                 pp_ctrl_reg = PCH_PP_CONTROL;
5306                 pp_on_reg = PCH_PP_ON_DELAYS;
5307                 pp_off_reg = PCH_PP_OFF_DELAYS;
5308                 pp_div_reg = PCH_PP_DIVISOR;
5309         } else {
5310                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5311
5312                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5313                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5314                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5315                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5316         }
5317
5318         /* Workaround: Need to write PP_CONTROL with the unlock key as
5319          * the very first thing. */
5320         pp_ctl = ironlake_get_pp_control(intel_dp);
5321
5322         pp_on = I915_READ(pp_on_reg);
5323         pp_off = I915_READ(pp_off_reg);
5324         if (!IS_BROXTON(dev)) {
5325                 I915_WRITE(pp_ctrl_reg, pp_ctl);
5326                 pp_div = I915_READ(pp_div_reg);
5327         }
5328
5329         /* Pull timing values out of registers */
5330         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5331                 PANEL_POWER_UP_DELAY_SHIFT;
5332
5333         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5334                 PANEL_LIGHT_ON_DELAY_SHIFT;
5335
5336         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5337                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5338
5339         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5340                 PANEL_POWER_DOWN_DELAY_SHIFT;
5341
5342         if (IS_BROXTON(dev)) {
5343                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5344                         BXT_POWER_CYCLE_DELAY_SHIFT;
5345                 if (tmp > 0)
5346                         cur.t11_t12 = (tmp - 1) * 1000;
5347                 else
5348                         cur.t11_t12 = 0;
5349         } else {
5350                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5351                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5352         }
5353
5354         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5355                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5356
5357         vbt = dev_priv->vbt.edp_pps;
5358
5359         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5360          * our hw here, which are all in 100usec. */
5361         spec.t1_t3 = 210 * 10;
5362         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5363         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5364         spec.t10 = 500 * 10;
5365         /* This one is special and actually in units of 100ms, but zero
5366          * based in the hw (so we need to add 100 ms). But the sw vbt
5367          * table multiplies it with 1000 to make it in units of 100usec,
5368          * too. */
5369         spec.t11_t12 = (510 + 100) * 10;
5370
5371         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5372                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5373
5374         /* Use the max of the register settings and vbt. If both are
5375          * unset, fall back to the spec limits. */
5376 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5377                                        spec.field : \
5378                                        max(cur.field, vbt.field))
5379         assign_final(t1_t3);
5380         assign_final(t8);
5381         assign_final(t9);
5382         assign_final(t10);
5383         assign_final(t11_t12);
5384 #undef assign_final
5385
5386 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5387         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5388         intel_dp->backlight_on_delay = get_delay(t8);
5389         intel_dp->backlight_off_delay = get_delay(t9);
5390         intel_dp->panel_power_down_delay = get_delay(t10);
5391         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5392 #undef get_delay
5393
5394         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5395                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5396                       intel_dp->panel_power_cycle_delay);
5397
5398         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5399                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5400 }
5401
5402 static void
5403 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5404                                               struct intel_dp *intel_dp)
5405 {
5406         struct drm_i915_private *dev_priv = dev->dev_private;
5407         u32 pp_on, pp_off, pp_div, port_sel = 0;
5408         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5409         int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5410         enum port port = dp_to_dig_port(intel_dp)->port;
5411         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5412
5413         lockdep_assert_held(&dev_priv->pps_mutex);
5414
5415         if (IS_BROXTON(dev)) {
5416                 /*
5417                  * TODO: BXT has 2 sets of PPS registers.
5418                  * Correct Register for Broxton need to be identified
5419                  * using VBT. hardcoding for now
5420                  */
5421                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5422                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5423                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5424
5425         } else if (HAS_PCH_SPLIT(dev)) {
5426                 pp_on_reg = PCH_PP_ON_DELAYS;
5427                 pp_off_reg = PCH_PP_OFF_DELAYS;
5428                 pp_div_reg = PCH_PP_DIVISOR;
5429         } else {
5430                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5431
5432                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5433                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5434                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5435         }
5436
5437         /*
5438          * And finally store the new values in the power sequencer. The
5439          * backlight delays are set to 1 because we do manual waits on them. For
5440          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5441          * we'll end up waiting for the backlight off delay twice: once when we
5442          * do the manual sleep, and once when we disable the panel and wait for
5443          * the PP_STATUS bit to become zero.
5444          */
5445         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5446                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5447         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5448                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5449         /* Compute the divisor for the pp clock, simply match the Bspec
5450          * formula. */
5451         if (IS_BROXTON(dev)) {
5452                 pp_div = I915_READ(pp_ctrl_reg);
5453                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5454                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5455                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5456         } else {
5457                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5458                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5459                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5460         }
5461
5462         /* Haswell doesn't have any port selection bits for the panel
5463          * power sequencer any more. */
5464         if (IS_VALLEYVIEW(dev)) {
5465                 port_sel = PANEL_PORT_SELECT_VLV(port);
5466         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5467                 if (port == PORT_A)
5468                         port_sel = PANEL_PORT_SELECT_DPA;
5469                 else
5470                         port_sel = PANEL_PORT_SELECT_DPD;
5471         }
5472
5473         pp_on |= port_sel;
5474
5475         I915_WRITE(pp_on_reg, pp_on);
5476         I915_WRITE(pp_off_reg, pp_off);
5477         if (IS_BROXTON(dev))
5478                 I915_WRITE(pp_ctrl_reg, pp_div);
5479         else
5480                 I915_WRITE(pp_div_reg, pp_div);
5481
5482         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5483                       I915_READ(pp_on_reg),
5484                       I915_READ(pp_off_reg),
5485                       IS_BROXTON(dev) ?
5486                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5487                       I915_READ(pp_div_reg));
5488 }
5489
5490 /**
5491  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5492  * @dev: DRM device
5493  * @refresh_rate: RR to be programmed
5494  *
5495  * This function gets called when refresh rate (RR) has to be changed from
5496  * one frequency to another. Switches can be between high and low RR
5497  * supported by the panel or to any other RR based on media playback (in
5498  * this case, RR value needs to be passed from user space).
5499  *
5500  * The caller of this function needs to take a lock on dev_priv->drrs.
5501  */
5502 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5503 {
5504         struct drm_i915_private *dev_priv = dev->dev_private;
5505         struct intel_encoder *encoder;
5506         struct intel_digital_port *dig_port = NULL;
5507         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5508         struct intel_crtc_state *config = NULL;
5509         struct intel_crtc *intel_crtc = NULL;
5510         u32 reg, val;
5511         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5512
5513         if (refresh_rate <= 0) {
5514                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5515                 return;
5516         }
5517
5518         if (intel_dp == NULL) {
5519                 DRM_DEBUG_KMS("DRRS not supported.\n");
5520                 return;
5521         }
5522
5523         /*
5524          * FIXME: This needs proper synchronization with psr state for some
5525          * platforms that cannot have PSR and DRRS enabled at the same time.
5526          */
5527
5528         dig_port = dp_to_dig_port(intel_dp);
5529         encoder = &dig_port->base;
5530         intel_crtc = to_intel_crtc(encoder->base.crtc);
5531
5532         if (!intel_crtc) {
5533                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5534                 return;
5535         }
5536
5537         config = intel_crtc->config;
5538
5539         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5540                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5541                 return;
5542         }
5543
5544         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5545                         refresh_rate)
5546                 index = DRRS_LOW_RR;
5547
5548         if (index == dev_priv->drrs.refresh_rate_type) {
5549                 DRM_DEBUG_KMS(
5550                         "DRRS requested for previously set RR...ignoring\n");
5551                 return;
5552         }
5553
5554         if (!intel_crtc->active) {
5555                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5556                 return;
5557         }
5558
5559         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5560                 switch (index) {
5561                 case DRRS_HIGH_RR:
5562                         intel_dp_set_m_n(intel_crtc, M1_N1);
5563                         break;
5564                 case DRRS_LOW_RR:
5565                         intel_dp_set_m_n(intel_crtc, M2_N2);
5566                         break;
5567                 case DRRS_MAX_RR:
5568                 default:
5569                         DRM_ERROR("Unsupported refreshrate type\n");
5570                 }
5571         } else if (INTEL_INFO(dev)->gen > 6) {
5572                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5573                 val = I915_READ(reg);
5574
5575                 if (index > DRRS_HIGH_RR) {
5576                         if (IS_VALLEYVIEW(dev))
5577                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5578                         else
5579                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5580                 } else {
5581                         if (IS_VALLEYVIEW(dev))
5582                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5583                         else
5584                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5585                 }
5586                 I915_WRITE(reg, val);
5587         }
5588
5589         dev_priv->drrs.refresh_rate_type = index;
5590
5591         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5592 }
5593
5594 /**
5595  * intel_edp_drrs_enable - init drrs struct if supported
5596  * @intel_dp: DP struct
5597  *
5598  * Initializes frontbuffer_bits and drrs.dp
5599  */
5600 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5601 {
5602         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5603         struct drm_i915_private *dev_priv = dev->dev_private;
5604         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5605         struct drm_crtc *crtc = dig_port->base.base.crtc;
5606         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5607
5608         if (!intel_crtc->config->has_drrs) {
5609                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5610                 return;
5611         }
5612
5613         mutex_lock(&dev_priv->drrs.mutex);
5614         if (WARN_ON(dev_priv->drrs.dp)) {
5615                 DRM_ERROR("DRRS already enabled\n");
5616                 goto unlock;
5617         }
5618
5619         dev_priv->drrs.busy_frontbuffer_bits = 0;
5620
5621         dev_priv->drrs.dp = intel_dp;
5622
5623 unlock:
5624         mutex_unlock(&dev_priv->drrs.mutex);
5625 }
5626
5627 /**
5628  * intel_edp_drrs_disable - Disable DRRS
5629  * @intel_dp: DP struct
5630  *
5631  */
5632 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5633 {
5634         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5635         struct drm_i915_private *dev_priv = dev->dev_private;
5636         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5637         struct drm_crtc *crtc = dig_port->base.base.crtc;
5638         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5639
5640         if (!intel_crtc->config->has_drrs)
5641                 return;
5642
5643         mutex_lock(&dev_priv->drrs.mutex);
5644         if (!dev_priv->drrs.dp) {
5645                 mutex_unlock(&dev_priv->drrs.mutex);
5646                 return;
5647         }
5648
5649         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5650                 intel_dp_set_drrs_state(dev_priv->dev,
5651                         intel_dp->attached_connector->panel.
5652                         fixed_mode->vrefresh);
5653
5654         dev_priv->drrs.dp = NULL;
5655         mutex_unlock(&dev_priv->drrs.mutex);
5656
5657         cancel_delayed_work_sync(&dev_priv->drrs.work);
5658 }
5659
5660 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5661 {
5662         struct drm_i915_private *dev_priv =
5663                 container_of(work, typeof(*dev_priv), drrs.work.work);
5664         struct intel_dp *intel_dp;
5665
5666         mutex_lock(&dev_priv->drrs.mutex);
5667
5668         intel_dp = dev_priv->drrs.dp;
5669
5670         if (!intel_dp)
5671                 goto unlock;
5672
5673         /*
5674          * The delayed work can race with an invalidate hence we need to
5675          * recheck.
5676          */
5677
5678         if (dev_priv->drrs.busy_frontbuffer_bits)
5679                 goto unlock;
5680
5681         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5682                 intel_dp_set_drrs_state(dev_priv->dev,
5683                         intel_dp->attached_connector->panel.
5684                         downclock_mode->vrefresh);
5685
5686 unlock:
5687         mutex_unlock(&dev_priv->drrs.mutex);
5688 }
5689
5690 /**
5691  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5692  * @dev: DRM device
5693  * @frontbuffer_bits: frontbuffer plane tracking bits
5694  *
5695  * This function gets called everytime rendering on the given planes start.
5696  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5697  *
5698  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5699  */
5700 void intel_edp_drrs_invalidate(struct drm_device *dev,
5701                 unsigned frontbuffer_bits)
5702 {
5703         struct drm_i915_private *dev_priv = dev->dev_private;
5704         struct drm_crtc *crtc;
5705         enum pipe pipe;
5706
5707         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5708                 return;
5709
5710         cancel_delayed_work(&dev_priv->drrs.work);
5711
5712         mutex_lock(&dev_priv->drrs.mutex);
5713         if (!dev_priv->drrs.dp) {
5714                 mutex_unlock(&dev_priv->drrs.mutex);
5715                 return;
5716         }
5717
5718         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5719         pipe = to_intel_crtc(crtc)->pipe;
5720
5721         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5722         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5723
5724         /* invalidate means busy screen hence upclock */
5725         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5726                 intel_dp_set_drrs_state(dev_priv->dev,
5727                                 dev_priv->drrs.dp->attached_connector->panel.
5728                                 fixed_mode->vrefresh);
5729
5730         mutex_unlock(&dev_priv->drrs.mutex);
5731 }
5732
5733 /**
5734  * intel_edp_drrs_flush - Restart Idleness DRRS
5735  * @dev: DRM device
5736  * @frontbuffer_bits: frontbuffer plane tracking bits
5737  *
5738  * This function gets called every time rendering on the given planes has
5739  * completed or flip on a crtc is completed. So DRRS should be upclocked
5740  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5741  * if no other planes are dirty.
5742  *
5743  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5744  */
5745 void intel_edp_drrs_flush(struct drm_device *dev,
5746                 unsigned frontbuffer_bits)
5747 {
5748         struct drm_i915_private *dev_priv = dev->dev_private;
5749         struct drm_crtc *crtc;
5750         enum pipe pipe;
5751
5752         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5753                 return;
5754
5755         cancel_delayed_work(&dev_priv->drrs.work);
5756
5757         mutex_lock(&dev_priv->drrs.mutex);
5758         if (!dev_priv->drrs.dp) {
5759                 mutex_unlock(&dev_priv->drrs.mutex);
5760                 return;
5761         }
5762
5763         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5764         pipe = to_intel_crtc(crtc)->pipe;
5765
5766         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5767         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5768
5769         /* flush means busy screen hence upclock */
5770         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5771                 intel_dp_set_drrs_state(dev_priv->dev,
5772                                 dev_priv->drrs.dp->attached_connector->panel.
5773                                 fixed_mode->vrefresh);
5774
5775         /*
5776          * flush also means no more activity hence schedule downclock, if all
5777          * other fbs are quiescent too
5778          */
5779         if (!dev_priv->drrs.busy_frontbuffer_bits)
5780                 schedule_delayed_work(&dev_priv->drrs.work,
5781                                 msecs_to_jiffies(1000));
5782         mutex_unlock(&dev_priv->drrs.mutex);
5783 }
5784
5785 /**
5786  * DOC: Display Refresh Rate Switching (DRRS)
5787  *
5788  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5789  * which enables swtching between low and high refresh rates,
5790  * dynamically, based on the usage scenario. This feature is applicable
5791  * for internal panels.
5792  *
5793  * Indication that the panel supports DRRS is given by the panel EDID, which
5794  * would list multiple refresh rates for one resolution.
5795  *
5796  * DRRS is of 2 types - static and seamless.
5797  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5798  * (may appear as a blink on screen) and is used in dock-undock scenario.
5799  * Seamless DRRS involves changing RR without any visual effect to the user
5800  * and can be used during normal system usage. This is done by programming
5801  * certain registers.
5802  *
5803  * Support for static/seamless DRRS may be indicated in the VBT based on
5804  * inputs from the panel spec.
5805  *
5806  * DRRS saves power by switching to low RR based on usage scenarios.
5807  *
5808  * eDP DRRS:-
5809  *        The implementation is based on frontbuffer tracking implementation.
5810  * When there is a disturbance on the screen triggered by user activity or a
5811  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5812  * When there is no movement on screen, after a timeout of 1 second, a switch
5813  * to low RR is made.
5814  *        For integration with frontbuffer tracking code,
5815  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5816  *
5817  * DRRS can be further extended to support other internal panels and also
5818  * the scenario of video playback wherein RR is set based on the rate
5819  * requested by userspace.
5820  */
5821
5822 /**
5823  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5824  * @intel_connector: eDP connector
5825  * @fixed_mode: preferred mode of panel
5826  *
5827  * This function is  called only once at driver load to initialize basic
5828  * DRRS stuff.
5829  *
5830  * Returns:
5831  * Downclock mode if panel supports it, else return NULL.
5832  * DRRS support is determined by the presence of downclock mode (apart
5833  * from VBT setting).
5834  */
5835 static struct drm_display_mode *
5836 intel_dp_drrs_init(struct intel_connector *intel_connector,
5837                 struct drm_display_mode *fixed_mode)
5838 {
5839         struct drm_connector *connector = &intel_connector->base;
5840         struct drm_device *dev = connector->dev;
5841         struct drm_i915_private *dev_priv = dev->dev_private;
5842         struct drm_display_mode *downclock_mode = NULL;
5843
5844         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5845         mutex_init(&dev_priv->drrs.mutex);
5846
5847         if (INTEL_INFO(dev)->gen <= 6) {
5848                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5849                 return NULL;
5850         }
5851
5852         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5853                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5854                 return NULL;
5855         }
5856
5857         downclock_mode = intel_find_panel_downclock
5858                                         (dev, fixed_mode, connector);
5859
5860         if (!downclock_mode) {
5861                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5862                 return NULL;
5863         }
5864
5865         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5866
5867         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5868         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5869         return downclock_mode;
5870 }
5871
5872 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5873                                      struct intel_connector *intel_connector)
5874 {
5875         struct drm_connector *connector = &intel_connector->base;
5876         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5877         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5878         struct drm_device *dev = intel_encoder->base.dev;
5879         struct drm_i915_private *dev_priv = dev->dev_private;
5880         struct drm_display_mode *fixed_mode = NULL;
5881         struct drm_display_mode *downclock_mode = NULL;
5882         bool has_dpcd;
5883         struct drm_display_mode *scan;
5884         struct edid *edid;
5885         enum pipe pipe = INVALID_PIPE;
5886
5887         if (!is_edp(intel_dp))
5888                 return true;
5889
5890         pps_lock(intel_dp);
5891         intel_edp_panel_vdd_sanitize(intel_dp);
5892         pps_unlock(intel_dp);
5893
5894         /* Cache DPCD and EDID for edp. */
5895         has_dpcd = intel_dp_get_dpcd(intel_dp);
5896
5897         if (has_dpcd) {
5898                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5899                         dev_priv->no_aux_handshake =
5900                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5901                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5902         } else {
5903                 /* if this fails, presume the device is a ghost */
5904                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5905                 return false;
5906         }
5907
5908         /* We now know it's not a ghost, init power sequence regs. */
5909         pps_lock(intel_dp);
5910         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5911         pps_unlock(intel_dp);
5912
5913         mutex_lock(&dev->mode_config.mutex);
5914         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5915         if (edid) {
5916                 if (drm_add_edid_modes(connector, edid)) {
5917                         drm_mode_connector_update_edid_property(connector,
5918                                                                 edid);
5919                         drm_edid_to_eld(connector, edid);
5920                 } else {
5921                         kfree(edid);
5922                         edid = ERR_PTR(-EINVAL);
5923                 }
5924         } else {
5925                 edid = ERR_PTR(-ENOENT);
5926         }
5927         intel_connector->edid = edid;
5928
5929         /* prefer fixed mode from EDID if available */
5930         list_for_each_entry(scan, &connector->probed_modes, head) {
5931                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5932                         fixed_mode = drm_mode_duplicate(dev, scan);
5933                         downclock_mode = intel_dp_drrs_init(
5934                                                 intel_connector, fixed_mode);
5935                         break;
5936                 }
5937         }
5938
5939         /* fallback to VBT if available for eDP */
5940         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5941                 fixed_mode = drm_mode_duplicate(dev,
5942                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5943                 if (fixed_mode)
5944                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5945         }
5946         mutex_unlock(&dev->mode_config.mutex);
5947
5948         if (IS_VALLEYVIEW(dev)) {
5949                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5950                 register_reboot_notifier(&intel_dp->edp_notifier);
5951
5952                 /*
5953                  * Figure out the current pipe for the initial backlight setup.
5954                  * If the current pipe isn't valid, try the PPS pipe, and if that
5955                  * fails just assume pipe A.
5956                  */
5957                 if (IS_CHERRYVIEW(dev))
5958                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5959                 else
5960                         pipe = PORT_TO_PIPE(intel_dp->DP);
5961
5962                 if (pipe != PIPE_A && pipe != PIPE_B)
5963                         pipe = intel_dp->pps_pipe;
5964
5965                 if (pipe != PIPE_A && pipe != PIPE_B)
5966                         pipe = PIPE_A;
5967
5968                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5969                               pipe_name(pipe));
5970         }
5971
5972         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5973         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5974         intel_panel_setup_backlight(connector, pipe);
5975
5976         return true;
5977 }
5978
5979 bool
5980 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5981                         struct intel_connector *intel_connector)
5982 {
5983         struct drm_connector *connector = &intel_connector->base;
5984         struct intel_dp *intel_dp = &intel_dig_port->dp;
5985         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5986         struct drm_device *dev = intel_encoder->base.dev;
5987         struct drm_i915_private *dev_priv = dev->dev_private;
5988         enum port port = intel_dig_port->port;
5989         int type;
5990
5991         intel_dp->pps_pipe = INVALID_PIPE;
5992
5993         /* intel_dp vfuncs */
5994         if (INTEL_INFO(dev)->gen >= 9)
5995                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5996         else if (IS_VALLEYVIEW(dev))
5997                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5998         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5999                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6000         else if (HAS_PCH_SPLIT(dev))
6001                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6002         else
6003                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6004
6005         if (INTEL_INFO(dev)->gen >= 9)
6006                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6007         else
6008                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6009
6010         /* Preserve the current hw state. */
6011         intel_dp->DP = I915_READ(intel_dp->output_reg);
6012         intel_dp->attached_connector = intel_connector;
6013
6014         if (intel_dp_is_edp(dev, port))
6015                 type = DRM_MODE_CONNECTOR_eDP;
6016         else
6017                 type = DRM_MODE_CONNECTOR_DisplayPort;
6018
6019         /*
6020          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6021          * for DP the encoder type can be set by the caller to
6022          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6023          */
6024         if (type == DRM_MODE_CONNECTOR_eDP)
6025                 intel_encoder->type = INTEL_OUTPUT_EDP;
6026
6027         /* eDP only on port B and/or C on vlv/chv */
6028         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6029                     port != PORT_B && port != PORT_C))
6030                 return false;
6031
6032         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6033                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6034                         port_name(port));
6035
6036         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6037         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6038
6039         connector->interlace_allowed = true;
6040         connector->doublescan_allowed = 0;
6041
6042         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6043                           edp_panel_vdd_work);
6044
6045         intel_connector_attach_encoder(intel_connector, intel_encoder);
6046         drm_connector_register(connector);
6047
6048         if (HAS_DDI(dev))
6049                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6050         else
6051                 intel_connector->get_hw_state = intel_connector_get_hw_state;
6052         intel_connector->unregister = intel_dp_connector_unregister;
6053
6054         /* Set up the hotplug pin. */
6055         switch (port) {
6056         case PORT_A:
6057                 intel_encoder->hpd_pin = HPD_PORT_A;
6058                 break;
6059         case PORT_B:
6060                 intel_encoder->hpd_pin = HPD_PORT_B;
6061                 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6062                         intel_encoder->hpd_pin = HPD_PORT_A;
6063                 break;
6064         case PORT_C:
6065                 intel_encoder->hpd_pin = HPD_PORT_C;
6066                 break;
6067         case PORT_D:
6068                 intel_encoder->hpd_pin = HPD_PORT_D;
6069                 break;
6070         default:
6071                 BUG();
6072         }
6073
6074         if (is_edp(intel_dp)) {
6075                 pps_lock(intel_dp);
6076                 intel_dp_init_panel_power_timestamps(intel_dp);
6077                 if (IS_VALLEYVIEW(dev))
6078                         vlv_initial_power_sequencer_setup(intel_dp);
6079                 else
6080                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
6081                 pps_unlock(intel_dp);
6082         }
6083
6084         intel_dp_aux_init(intel_dp, intel_connector);
6085
6086         /* init MST on ports that can support it */
6087         if (HAS_DP_MST(dev) &&
6088             (port == PORT_B || port == PORT_C || port == PORT_D))
6089                 intel_dp_mst_encoder_init(intel_dig_port,
6090                                           intel_connector->base.base.id);
6091
6092         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6093                 drm_dp_aux_unregister(&intel_dp->aux);
6094                 if (is_edp(intel_dp)) {
6095                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6096                         /*
6097                          * vdd might still be enabled do to the delayed vdd off.
6098                          * Make sure vdd is actually turned off here.
6099                          */
6100                         pps_lock(intel_dp);
6101                         edp_panel_vdd_off_sync(intel_dp);
6102                         pps_unlock(intel_dp);
6103                 }
6104                 drm_connector_unregister(connector);
6105                 drm_connector_cleanup(connector);
6106                 return false;
6107         }
6108
6109         intel_dp_add_properties(intel_dp, connector);
6110
6111         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6112          * 0xd.  Failure to do so will result in spurious interrupts being
6113          * generated on the port when a cable is not attached.
6114          */
6115         if (IS_G4X(dev) && !IS_GM45(dev)) {
6116                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6117                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6118         }
6119
6120         i915_debugfs_connector_add(connector);
6121
6122         return true;
6123 }
6124
6125 void
6126 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6127 {
6128         struct drm_i915_private *dev_priv = dev->dev_private;
6129         struct intel_digital_port *intel_dig_port;
6130         struct intel_encoder *intel_encoder;
6131         struct drm_encoder *encoder;
6132         struct intel_connector *intel_connector;
6133
6134         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6135         if (!intel_dig_port)
6136                 return;
6137
6138         intel_connector = intel_connector_alloc();
6139         if (!intel_connector) {
6140                 kfree(intel_dig_port);
6141                 return;
6142         }
6143
6144         intel_encoder = &intel_dig_port->base;
6145         encoder = &intel_encoder->base;
6146
6147         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6148                          DRM_MODE_ENCODER_TMDS);
6149
6150         intel_encoder->compute_config = intel_dp_compute_config;
6151         intel_encoder->disable = intel_disable_dp;
6152         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6153         intel_encoder->get_config = intel_dp_get_config;
6154         intel_encoder->suspend = intel_dp_encoder_suspend;
6155         if (IS_CHERRYVIEW(dev)) {
6156                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6157                 intel_encoder->pre_enable = chv_pre_enable_dp;
6158                 intel_encoder->enable = vlv_enable_dp;
6159                 intel_encoder->post_disable = chv_post_disable_dp;
6160                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6161         } else if (IS_VALLEYVIEW(dev)) {
6162                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6163                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6164                 intel_encoder->enable = vlv_enable_dp;
6165                 intel_encoder->post_disable = vlv_post_disable_dp;
6166         } else {
6167                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6168                 intel_encoder->enable = g4x_enable_dp;
6169                 if (INTEL_INFO(dev)->gen >= 5)
6170                         intel_encoder->post_disable = ilk_post_disable_dp;
6171         }
6172
6173         intel_dig_port->port = port;
6174         intel_dig_port->dp.output_reg = output_reg;
6175
6176         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6177         if (IS_CHERRYVIEW(dev)) {
6178                 if (port == PORT_D)
6179                         intel_encoder->crtc_mask = 1 << 2;
6180                 else
6181                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6182         } else {
6183                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6184         }
6185         intel_encoder->cloneable = 0;
6186
6187         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6188         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6189
6190         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6191                 drm_encoder_cleanup(encoder);
6192                 kfree(intel_dig_port);
6193                 kfree(intel_connector);
6194         }
6195 }
6196
6197 void intel_dp_mst_suspend(struct drm_device *dev)
6198 {
6199         struct drm_i915_private *dev_priv = dev->dev_private;
6200         int i;
6201
6202         /* disable MST */
6203         for (i = 0; i < I915_MAX_PORTS; i++) {
6204                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6205                 if (!intel_dig_port)
6206                         continue;
6207
6208                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6209                         if (!intel_dig_port->dp.can_mst)
6210                                 continue;
6211                         if (intel_dig_port->dp.is_mst)
6212                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6213                 }
6214         }
6215 }
6216
6217 void intel_dp_mst_resume(struct drm_device *dev)
6218 {
6219         struct drm_i915_private *dev_priv = dev->dev_private;
6220         int i;
6221
6222         for (i = 0; i < I915_MAX_PORTS; i++) {
6223                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6224                 if (!intel_dig_port)
6225                         continue;
6226                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6227                         int ret;
6228
6229                         if (!intel_dig_port->dp.can_mst)
6230                                 continue;
6231
6232                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6233                         if (ret != 0) {
6234                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6235                         }
6236                 }
6237         }
6238 }