drm/i915: Fully separate source vs. sink rates
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 struct dp_link_dpll {
45         int link_bw;
46         struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50         { DP_LINK_BW_1_62,
51                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52         { DP_LINK_BW_2_7,
53                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57         { DP_LINK_BW_1_62,
58                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59         { DP_LINK_BW_2_7,
60                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64         { DP_LINK_BW_1_62,
65                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66         { DP_LINK_BW_2_7,
67                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71  * CHV supports eDP 1.4 that have  more link rates.
72  * Below only provides the fixed rate but exclude variable rate.
73  */
74 static const struct dp_link_dpll chv_dpll[] = {
75         /*
76          * CHV requires to program fractional division for m2.
77          * m2 is stored in fixed point format using formula below
78          * (m2_int << 22) | m2_fraction
79          */
80         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
81                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
83                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
85                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89                                   324000, 432000, 540000 };
90 static const int default_rates[] = { 162000, 270000, 540000 };
91
92 /**
93  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94  * @intel_dp: DP struct
95  *
96  * If a CPU or PCH DP output is attached to an eDP panel, this function
97  * will return true, and false otherwise.
98  */
99 static bool is_edp(struct intel_dp *intel_dp)
100 {
101         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
104 }
105
106 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
107 {
108         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110         return intel_dig_port->base.base.dev;
111 }
112
113 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114 {
115         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
116 }
117
118 static void intel_dp_link_down(struct intel_dp *intel_dp);
119 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
120 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
121 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
122 static void vlv_steal_power_sequencer(struct drm_device *dev,
123                                       enum pipe pipe);
124
125 int
126 intel_dp_max_link_bw(struct intel_dp *intel_dp)
127 {
128         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
129
130         switch (max_link_bw) {
131         case DP_LINK_BW_1_62:
132         case DP_LINK_BW_2_7:
133         case DP_LINK_BW_5_4:
134                 break;
135         default:
136                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137                      max_link_bw);
138                 max_link_bw = DP_LINK_BW_1_62;
139                 break;
140         }
141         return max_link_bw;
142 }
143
144 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145 {
146         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147         struct drm_device *dev = intel_dig_port->base.base.dev;
148         u8 source_max, sink_max;
149
150         source_max = 4;
151         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153                 source_max = 2;
154
155         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157         return min(source_max, sink_max);
158 }
159
160 /*
161  * The units on the numbers in the next two are... bizarre.  Examples will
162  * make it clearer; this one parallels an example in the eDP spec.
163  *
164  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165  *
166  *     270000 * 1 * 8 / 10 == 216000
167  *
168  * The actual data capacity of that configuration is 2.16Gbit/s, so the
169  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
170  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171  * 119000.  At 18bpp that's 2142000 kilobits per second.
172  *
173  * Thus the strange-looking division by 10 in intel_dp_link_required, to
174  * get the result in decakilobits instead of kilobits.
175  */
176
177 static int
178 intel_dp_link_required(int pixel_clock, int bpp)
179 {
180         return (pixel_clock * bpp + 9) / 10;
181 }
182
183 static int
184 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185 {
186         return (max_link_clock * max_lanes * 8) / 10;
187 }
188
189 static enum drm_mode_status
190 intel_dp_mode_valid(struct drm_connector *connector,
191                     struct drm_display_mode *mode)
192 {
193         struct intel_dp *intel_dp = intel_attached_dp(connector);
194         struct intel_connector *intel_connector = to_intel_connector(connector);
195         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
196         int target_clock = mode->clock;
197         int max_rate, mode_rate, max_lanes, max_link_clock;
198
199         if (is_edp(intel_dp) && fixed_mode) {
200                 if (mode->hdisplay > fixed_mode->hdisplay)
201                         return MODE_PANEL;
202
203                 if (mode->vdisplay > fixed_mode->vdisplay)
204                         return MODE_PANEL;
205
206                 target_clock = fixed_mode->clock;
207         }
208
209         max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
210         max_lanes = intel_dp_max_lane_count(intel_dp);
211
212         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213         mode_rate = intel_dp_link_required(target_clock, 18);
214
215         if (mode_rate > max_rate)
216                 return MODE_CLOCK_HIGH;
217
218         if (mode->clock < 10000)
219                 return MODE_CLOCK_LOW;
220
221         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222                 return MODE_H_ILLEGAL;
223
224         return MODE_OK;
225 }
226
227 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
228 {
229         int     i;
230         uint32_t v = 0;
231
232         if (src_bytes > 4)
233                 src_bytes = 4;
234         for (i = 0; i < src_bytes; i++)
235                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
236         return v;
237 }
238
239 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
240 {
241         int i;
242         if (dst_bytes > 4)
243                 dst_bytes = 4;
244         for (i = 0; i < dst_bytes; i++)
245                 dst[i] = src >> ((3-i) * 8);
246 }
247
248 /* hrawclock is 1/4 the FSB frequency */
249 static int
250 intel_hrawclk(struct drm_device *dev)
251 {
252         struct drm_i915_private *dev_priv = dev->dev_private;
253         uint32_t clkcfg;
254
255         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256         if (IS_VALLEYVIEW(dev))
257                 return 200;
258
259         clkcfg = I915_READ(CLKCFG);
260         switch (clkcfg & CLKCFG_FSB_MASK) {
261         case CLKCFG_FSB_400:
262                 return 100;
263         case CLKCFG_FSB_533:
264                 return 133;
265         case CLKCFG_FSB_667:
266                 return 166;
267         case CLKCFG_FSB_800:
268                 return 200;
269         case CLKCFG_FSB_1067:
270                 return 266;
271         case CLKCFG_FSB_1333:
272                 return 333;
273         /* these two are just a guess; one of them might be right */
274         case CLKCFG_FSB_1600:
275         case CLKCFG_FSB_1600_ALT:
276                 return 400;
277         default:
278                 return 133;
279         }
280 }
281
282 static void
283 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
284                                     struct intel_dp *intel_dp);
285 static void
286 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
287                                               struct intel_dp *intel_dp);
288
289 static void pps_lock(struct intel_dp *intel_dp)
290 {
291         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292         struct intel_encoder *encoder = &intel_dig_port->base;
293         struct drm_device *dev = encoder->base.dev;
294         struct drm_i915_private *dev_priv = dev->dev_private;
295         enum intel_display_power_domain power_domain;
296
297         /*
298          * See vlv_power_sequencer_reset() why we need
299          * a power domain reference here.
300          */
301         power_domain = intel_display_port_power_domain(encoder);
302         intel_display_power_get(dev_priv, power_domain);
303
304         mutex_lock(&dev_priv->pps_mutex);
305 }
306
307 static void pps_unlock(struct intel_dp *intel_dp)
308 {
309         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
310         struct intel_encoder *encoder = &intel_dig_port->base;
311         struct drm_device *dev = encoder->base.dev;
312         struct drm_i915_private *dev_priv = dev->dev_private;
313         enum intel_display_power_domain power_domain;
314
315         mutex_unlock(&dev_priv->pps_mutex);
316
317         power_domain = intel_display_port_power_domain(encoder);
318         intel_display_power_put(dev_priv, power_domain);
319 }
320
321 static void
322 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
323 {
324         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
325         struct drm_device *dev = intel_dig_port->base.base.dev;
326         struct drm_i915_private *dev_priv = dev->dev_private;
327         enum pipe pipe = intel_dp->pps_pipe;
328         bool pll_enabled;
329         uint32_t DP;
330
331         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
332                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
333                  pipe_name(pipe), port_name(intel_dig_port->port)))
334                 return;
335
336         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
337                       pipe_name(pipe), port_name(intel_dig_port->port));
338
339         /* Preserve the BIOS-computed detected bit. This is
340          * supposed to be read-only.
341          */
342         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
343         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
344         DP |= DP_PORT_WIDTH(1);
345         DP |= DP_LINK_TRAIN_PAT_1;
346
347         if (IS_CHERRYVIEW(dev))
348                 DP |= DP_PIPE_SELECT_CHV(pipe);
349         else if (pipe == PIPE_B)
350                 DP |= DP_PIPEB_SELECT;
351
352         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
353
354         /*
355          * The DPLL for the pipe must be enabled for this to work.
356          * So enable temporarily it if it's not already enabled.
357          */
358         if (!pll_enabled)
359                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
360                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
361
362         /*
363          * Similar magic as in intel_dp_enable_port().
364          * We _must_ do this port enable + disable trick
365          * to make this power seqeuencer lock onto the port.
366          * Otherwise even VDD force bit won't work.
367          */
368         I915_WRITE(intel_dp->output_reg, DP);
369         POSTING_READ(intel_dp->output_reg);
370
371         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
372         POSTING_READ(intel_dp->output_reg);
373
374         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
375         POSTING_READ(intel_dp->output_reg);
376
377         if (!pll_enabled)
378                 vlv_force_pll_off(dev, pipe);
379 }
380
381 static enum pipe
382 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
383 {
384         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
385         struct drm_device *dev = intel_dig_port->base.base.dev;
386         struct drm_i915_private *dev_priv = dev->dev_private;
387         struct intel_encoder *encoder;
388         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
389         enum pipe pipe;
390
391         lockdep_assert_held(&dev_priv->pps_mutex);
392
393         /* We should never land here with regular DP ports */
394         WARN_ON(!is_edp(intel_dp));
395
396         if (intel_dp->pps_pipe != INVALID_PIPE)
397                 return intel_dp->pps_pipe;
398
399         /*
400          * We don't have power sequencer currently.
401          * Pick one that's not used by other ports.
402          */
403         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
404                             base.head) {
405                 struct intel_dp *tmp;
406
407                 if (encoder->type != INTEL_OUTPUT_EDP)
408                         continue;
409
410                 tmp = enc_to_intel_dp(&encoder->base);
411
412                 if (tmp->pps_pipe != INVALID_PIPE)
413                         pipes &= ~(1 << tmp->pps_pipe);
414         }
415
416         /*
417          * Didn't find one. This should not happen since there
418          * are two power sequencers and up to two eDP ports.
419          */
420         if (WARN_ON(pipes == 0))
421                 pipe = PIPE_A;
422         else
423                 pipe = ffs(pipes) - 1;
424
425         vlv_steal_power_sequencer(dev, pipe);
426         intel_dp->pps_pipe = pipe;
427
428         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
429                       pipe_name(intel_dp->pps_pipe),
430                       port_name(intel_dig_port->port));
431
432         /* init power sequencer on this pipe and port */
433         intel_dp_init_panel_power_sequencer(dev, intel_dp);
434         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
435
436         /*
437          * Even vdd force doesn't work until we've made
438          * the power sequencer lock in on the port.
439          */
440         vlv_power_sequencer_kick(intel_dp);
441
442         return intel_dp->pps_pipe;
443 }
444
445 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
446                                enum pipe pipe);
447
448 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
449                                enum pipe pipe)
450 {
451         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
452 }
453
454 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
455                                 enum pipe pipe)
456 {
457         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
458 }
459
460 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
461                          enum pipe pipe)
462 {
463         return true;
464 }
465
466 static enum pipe
467 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
468                      enum port port,
469                      vlv_pipe_check pipe_check)
470 {
471         enum pipe pipe;
472
473         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
474                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
475                         PANEL_PORT_SELECT_MASK;
476
477                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
478                         continue;
479
480                 if (!pipe_check(dev_priv, pipe))
481                         continue;
482
483                 return pipe;
484         }
485
486         return INVALID_PIPE;
487 }
488
489 static void
490 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
491 {
492         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
493         struct drm_device *dev = intel_dig_port->base.base.dev;
494         struct drm_i915_private *dev_priv = dev->dev_private;
495         enum port port = intel_dig_port->port;
496
497         lockdep_assert_held(&dev_priv->pps_mutex);
498
499         /* try to find a pipe with this port selected */
500         /* first pick one where the panel is on */
501         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502                                                   vlv_pipe_has_pp_on);
503         /* didn't find one? pick one where vdd is on */
504         if (intel_dp->pps_pipe == INVALID_PIPE)
505                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506                                                           vlv_pipe_has_vdd_on);
507         /* didn't find one? pick one with just the correct port */
508         if (intel_dp->pps_pipe == INVALID_PIPE)
509                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510                                                           vlv_pipe_any);
511
512         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
513         if (intel_dp->pps_pipe == INVALID_PIPE) {
514                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
515                               port_name(port));
516                 return;
517         }
518
519         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
520                       port_name(port), pipe_name(intel_dp->pps_pipe));
521
522         intel_dp_init_panel_power_sequencer(dev, intel_dp);
523         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
524 }
525
526 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
527 {
528         struct drm_device *dev = dev_priv->dev;
529         struct intel_encoder *encoder;
530
531         if (WARN_ON(!IS_VALLEYVIEW(dev)))
532                 return;
533
534         /*
535          * We can't grab pps_mutex here due to deadlock with power_domain
536          * mutex when power_domain functions are called while holding pps_mutex.
537          * That also means that in order to use pps_pipe the code needs to
538          * hold both a power domain reference and pps_mutex, and the power domain
539          * reference get/put must be done while _not_ holding pps_mutex.
540          * pps_{lock,unlock}() do these steps in the correct order, so one
541          * should use them always.
542          */
543
544         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
545                 struct intel_dp *intel_dp;
546
547                 if (encoder->type != INTEL_OUTPUT_EDP)
548                         continue;
549
550                 intel_dp = enc_to_intel_dp(&encoder->base);
551                 intel_dp->pps_pipe = INVALID_PIPE;
552         }
553 }
554
555 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
556 {
557         struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
559         if (HAS_PCH_SPLIT(dev))
560                 return PCH_PP_CONTROL;
561         else
562                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563 }
564
565 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566 {
567         struct drm_device *dev = intel_dp_to_dev(intel_dp);
568
569         if (HAS_PCH_SPLIT(dev))
570                 return PCH_PP_STATUS;
571         else
572                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
573 }
574
575 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
576    This function only applicable when panel PM state is not to be tracked */
577 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
578                               void *unused)
579 {
580         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
581                                                  edp_notifier);
582         struct drm_device *dev = intel_dp_to_dev(intel_dp);
583         struct drm_i915_private *dev_priv = dev->dev_private;
584         u32 pp_div;
585         u32 pp_ctrl_reg, pp_div_reg;
586
587         if (!is_edp(intel_dp) || code != SYS_RESTART)
588                 return 0;
589
590         pps_lock(intel_dp);
591
592         if (IS_VALLEYVIEW(dev)) {
593                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594
595                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
596                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
597                 pp_div = I915_READ(pp_div_reg);
598                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
599
600                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
601                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
602                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
603                 msleep(intel_dp->panel_power_cycle_delay);
604         }
605
606         pps_unlock(intel_dp);
607
608         return 0;
609 }
610
611 static bool edp_have_panel_power(struct intel_dp *intel_dp)
612 {
613         struct drm_device *dev = intel_dp_to_dev(intel_dp);
614         struct drm_i915_private *dev_priv = dev->dev_private;
615
616         lockdep_assert_held(&dev_priv->pps_mutex);
617
618         if (IS_VALLEYVIEW(dev) &&
619             intel_dp->pps_pipe == INVALID_PIPE)
620                 return false;
621
622         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
623 }
624
625 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
626 {
627         struct drm_device *dev = intel_dp_to_dev(intel_dp);
628         struct drm_i915_private *dev_priv = dev->dev_private;
629
630         lockdep_assert_held(&dev_priv->pps_mutex);
631
632         if (IS_VALLEYVIEW(dev) &&
633             intel_dp->pps_pipe == INVALID_PIPE)
634                 return false;
635
636         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
637 }
638
639 static void
640 intel_dp_check_edp(struct intel_dp *intel_dp)
641 {
642         struct drm_device *dev = intel_dp_to_dev(intel_dp);
643         struct drm_i915_private *dev_priv = dev->dev_private;
644
645         if (!is_edp(intel_dp))
646                 return;
647
648         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
649                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
650                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
651                               I915_READ(_pp_stat_reg(intel_dp)),
652                               I915_READ(_pp_ctrl_reg(intel_dp)));
653         }
654 }
655
656 static uint32_t
657 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658 {
659         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
660         struct drm_device *dev = intel_dig_port->base.base.dev;
661         struct drm_i915_private *dev_priv = dev->dev_private;
662         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
663         uint32_t status;
664         bool done;
665
666 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
667         if (has_aux_irq)
668                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
669                                           msecs_to_jiffies_timeout(10));
670         else
671                 done = wait_for_atomic(C, 10) == 0;
672         if (!done)
673                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674                           has_aux_irq);
675 #undef C
676
677         return status;
678 }
679
680 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
681 {
682         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
683         struct drm_device *dev = intel_dig_port->base.base.dev;
684
685         /*
686          * The clock divider is based off the hrawclk, and would like to run at
687          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
688          */
689         return index ? 0 : intel_hrawclk(dev) / 2;
690 }
691
692 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693 {
694         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695         struct drm_device *dev = intel_dig_port->base.base.dev;
696
697         if (index)
698                 return 0;
699
700         if (intel_dig_port->port == PORT_A) {
701                 if (IS_GEN6(dev) || IS_GEN7(dev))
702                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
703                 else
704                         return 225; /* eDP input clock at 450Mhz */
705         } else {
706                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
707         }
708 }
709
710 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711 {
712         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713         struct drm_device *dev = intel_dig_port->base.base.dev;
714         struct drm_i915_private *dev_priv = dev->dev_private;
715
716         if (intel_dig_port->port == PORT_A) {
717                 if (index)
718                         return 0;
719                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
720         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
721                 /* Workaround for non-ULT HSW */
722                 switch (index) {
723                 case 0: return 63;
724                 case 1: return 72;
725                 default: return 0;
726                 }
727         } else  {
728                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
729         }
730 }
731
732 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733 {
734         return index ? 0 : 100;
735 }
736
737 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738 {
739         /*
740          * SKL doesn't need us to program the AUX clock divider (Hardware will
741          * derive the clock from CDCLK automatically). We still implement the
742          * get_aux_clock_divider vfunc to plug-in into the existing code.
743          */
744         return index ? 0 : 1;
745 }
746
747 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
748                                       bool has_aux_irq,
749                                       int send_bytes,
750                                       uint32_t aux_clock_divider)
751 {
752         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
753         struct drm_device *dev = intel_dig_port->base.base.dev;
754         uint32_t precharge, timeout;
755
756         if (IS_GEN6(dev))
757                 precharge = 3;
758         else
759                 precharge = 5;
760
761         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
762                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
763         else
764                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
765
766         return DP_AUX_CH_CTL_SEND_BUSY |
767                DP_AUX_CH_CTL_DONE |
768                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
769                DP_AUX_CH_CTL_TIME_OUT_ERROR |
770                timeout |
771                DP_AUX_CH_CTL_RECEIVE_ERROR |
772                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
774                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
775 }
776
777 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778                                       bool has_aux_irq,
779                                       int send_bytes,
780                                       uint32_t unused)
781 {
782         return DP_AUX_CH_CTL_SEND_BUSY |
783                DP_AUX_CH_CTL_DONE |
784                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
785                DP_AUX_CH_CTL_TIME_OUT_ERROR |
786                DP_AUX_CH_CTL_TIME_OUT_1600us |
787                DP_AUX_CH_CTL_RECEIVE_ERROR |
788                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
789                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
790 }
791
792 static int
793 intel_dp_aux_ch(struct intel_dp *intel_dp,
794                 const uint8_t *send, int send_bytes,
795                 uint8_t *recv, int recv_size)
796 {
797         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
798         struct drm_device *dev = intel_dig_port->base.base.dev;
799         struct drm_i915_private *dev_priv = dev->dev_private;
800         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
801         uint32_t ch_data = ch_ctl + 4;
802         uint32_t aux_clock_divider;
803         int i, ret, recv_bytes;
804         uint32_t status;
805         int try, clock = 0;
806         bool has_aux_irq = HAS_AUX_IRQ(dev);
807         bool vdd;
808
809         pps_lock(intel_dp);
810
811         /*
812          * We will be called with VDD already enabled for dpcd/edid/oui reads.
813          * In such cases we want to leave VDD enabled and it's up to upper layers
814          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815          * ourselves.
816          */
817         vdd = edp_panel_vdd_on(intel_dp);
818
819         /* dp aux is extremely sensitive to irq latency, hence request the
820          * lowest possible wakeup latency and so prevent the cpu from going into
821          * deep sleep states.
822          */
823         pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825         intel_dp_check_edp(intel_dp);
826
827         intel_aux_display_runtime_get(dev_priv);
828
829         /* Try to wait for any previous AUX channel activity */
830         for (try = 0; try < 3; try++) {
831                 status = I915_READ_NOTRACE(ch_ctl);
832                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833                         break;
834                 msleep(1);
835         }
836
837         if (try == 3) {
838                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839                      I915_READ(ch_ctl));
840                 ret = -EBUSY;
841                 goto out;
842         }
843
844         /* Only 5 data registers! */
845         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846                 ret = -E2BIG;
847                 goto out;
848         }
849
850         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
851                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852                                                           has_aux_irq,
853                                                           send_bytes,
854                                                           aux_clock_divider);
855
856                 /* Must try at least 3 times according to DP spec */
857                 for (try = 0; try < 5; try++) {
858                         /* Load the send data into the aux channel data registers */
859                         for (i = 0; i < send_bytes; i += 4)
860                                 I915_WRITE(ch_data + i,
861                                            intel_dp_pack_aux(send + i,
862                                                              send_bytes - i));
863
864                         /* Send the command and wait for it to complete */
865                         I915_WRITE(ch_ctl, send_ctl);
866
867                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868
869                         /* Clear done status and any errors */
870                         I915_WRITE(ch_ctl,
871                                    status |
872                                    DP_AUX_CH_CTL_DONE |
873                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
874                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
875
876                         if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
877                                       DP_AUX_CH_CTL_RECEIVE_ERROR))
878                                 continue;
879                         if (status & DP_AUX_CH_CTL_DONE)
880                                 break;
881                 }
882                 if (status & DP_AUX_CH_CTL_DONE)
883                         break;
884         }
885
886         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
887                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
888                 ret = -EBUSY;
889                 goto out;
890         }
891
892         /* Check for timeout or receive error.
893          * Timeouts occur when the sink is not connected
894          */
895         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
896                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
897                 ret = -EIO;
898                 goto out;
899         }
900
901         /* Timeouts occur when the device isn't connected, so they're
902          * "normal" -- don't fill the kernel log with these */
903         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
904                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
905                 ret = -ETIMEDOUT;
906                 goto out;
907         }
908
909         /* Unload any bytes sent back from the other side */
910         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
912         if (recv_bytes > recv_size)
913                 recv_bytes = recv_size;
914
915         for (i = 0; i < recv_bytes; i += 4)
916                 intel_dp_unpack_aux(I915_READ(ch_data + i),
917                                     recv + i, recv_bytes - i);
918
919         ret = recv_bytes;
920 out:
921         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
922         intel_aux_display_runtime_put(dev_priv);
923
924         if (vdd)
925                 edp_panel_vdd_off(intel_dp, false);
926
927         pps_unlock(intel_dp);
928
929         return ret;
930 }
931
932 #define BARE_ADDRESS_SIZE       3
933 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
934 static ssize_t
935 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
936 {
937         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
938         uint8_t txbuf[20], rxbuf[20];
939         size_t txsize, rxsize;
940         int ret;
941
942         txbuf[0] = msg->request << 4;
943         txbuf[1] = msg->address >> 8;
944         txbuf[2] = msg->address & 0xff;
945         txbuf[3] = msg->size - 1;
946
947         switch (msg->request & ~DP_AUX_I2C_MOT) {
948         case DP_AUX_NATIVE_WRITE:
949         case DP_AUX_I2C_WRITE:
950                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
951                 rxsize = 1;
952
953                 if (WARN_ON(txsize > 20))
954                         return -E2BIG;
955
956                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
957
958                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
959                 if (ret > 0) {
960                         msg->reply = rxbuf[0] >> 4;
961
962                         /* Return payload size. */
963                         ret = msg->size;
964                 }
965                 break;
966
967         case DP_AUX_NATIVE_READ:
968         case DP_AUX_I2C_READ:
969                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
970                 rxsize = msg->size + 1;
971
972                 if (WARN_ON(rxsize > 20))
973                         return -E2BIG;
974
975                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976                 if (ret > 0) {
977                         msg->reply = rxbuf[0] >> 4;
978                         /*
979                          * Assume happy day, and copy the data. The caller is
980                          * expected to check msg->reply before touching it.
981                          *
982                          * Return payload size.
983                          */
984                         ret--;
985                         memcpy(msg->buffer, rxbuf + 1, ret);
986                 }
987                 break;
988
989         default:
990                 ret = -EINVAL;
991                 break;
992         }
993
994         return ret;
995 }
996
997 static void
998 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
999 {
1000         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1001         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1002         enum port port = intel_dig_port->port;
1003         const char *name = NULL;
1004         int ret;
1005
1006         switch (port) {
1007         case PORT_A:
1008                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1009                 name = "DPDDC-A";
1010                 break;
1011         case PORT_B:
1012                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1013                 name = "DPDDC-B";
1014                 break;
1015         case PORT_C:
1016                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1017                 name = "DPDDC-C";
1018                 break;
1019         case PORT_D:
1020                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1021                 name = "DPDDC-D";
1022                 break;
1023         default:
1024                 BUG();
1025         }
1026
1027         /*
1028          * The AUX_CTL register is usually DP_CTL + 0x10.
1029          *
1030          * On Haswell and Broadwell though:
1031          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1032          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1033          *
1034          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1035          */
1036         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1037                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1038
1039         intel_dp->aux.name = name;
1040         intel_dp->aux.dev = dev->dev;
1041         intel_dp->aux.transfer = intel_dp_aux_transfer;
1042
1043         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1044                       connector->base.kdev->kobj.name);
1045
1046         ret = drm_dp_aux_register(&intel_dp->aux);
1047         if (ret < 0) {
1048                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1049                           name, ret);
1050                 return;
1051         }
1052
1053         ret = sysfs_create_link(&connector->base.kdev->kobj,
1054                                 &intel_dp->aux.ddc.dev.kobj,
1055                                 intel_dp->aux.ddc.dev.kobj.name);
1056         if (ret < 0) {
1057                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1058                 drm_dp_aux_unregister(&intel_dp->aux);
1059         }
1060 }
1061
1062 static void
1063 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1064 {
1065         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1066
1067         if (!intel_connector->mst_port)
1068                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1069                                   intel_dp->aux.ddc.dev.kobj.name);
1070         intel_connector_unregister(intel_connector);
1071 }
1072
1073 static void
1074 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1075 {
1076         u32 ctrl1;
1077
1078         pipe_config->ddi_pll_sel = SKL_DPLL0;
1079         pipe_config->dpll_hw_state.cfgcr1 = 0;
1080         pipe_config->dpll_hw_state.cfgcr2 = 0;
1081
1082         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1083         switch (link_clock / 2) {
1084         case 81000:
1085                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1086                                               SKL_DPLL0);
1087                 break;
1088         case 135000:
1089                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1090                                               SKL_DPLL0);
1091                 break;
1092         case 270000:
1093                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1094                                               SKL_DPLL0);
1095                 break;
1096         case 162000:
1097                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1098                                               SKL_DPLL0);
1099                 break;
1100         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1101         results in CDCLK change. Need to handle the change of CDCLK by
1102         disabling pipes and re-enabling them */
1103         case 108000:
1104                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1105                                               SKL_DPLL0);
1106                 break;
1107         case 216000:
1108                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1109                                               SKL_DPLL0);
1110                 break;
1111
1112         }
1113         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1114 }
1115
1116 static void
1117 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1118 {
1119         switch (link_bw) {
1120         case DP_LINK_BW_1_62:
1121                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1122                 break;
1123         case DP_LINK_BW_2_7:
1124                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1125                 break;
1126         case DP_LINK_BW_5_4:
1127                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1128                 break;
1129         }
1130 }
1131
1132 static int
1133 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1134 {
1135         if (intel_dp->num_supported_rates) {
1136                 *sink_rates = intel_dp->supported_rates;
1137                 return intel_dp->num_supported_rates;
1138         }
1139
1140         *sink_rates = default_rates;
1141
1142         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1143 }
1144
1145 static int
1146 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1147 {
1148         if (INTEL_INFO(dev)->gen >= 9) {
1149                 *source_rates = gen9_rates;
1150                 return ARRAY_SIZE(gen9_rates);
1151         }
1152
1153         *source_rates = default_rates;
1154
1155         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1156                 /* WaDisableHBR2:skl */
1157                 return (DP_LINK_BW_2_7 >> 3) + 1;
1158         else if (INTEL_INFO(dev)->gen >= 8 ||
1159             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1160                 return (DP_LINK_BW_5_4 >> 3) + 1;
1161         else
1162                 return (DP_LINK_BW_2_7 >> 3) + 1;
1163 }
1164
1165 static void
1166 intel_dp_set_clock(struct intel_encoder *encoder,
1167                    struct intel_crtc_state *pipe_config, int link_bw)
1168 {
1169         struct drm_device *dev = encoder->base.dev;
1170         const struct dp_link_dpll *divisor = NULL;
1171         int i, count = 0;
1172
1173         if (IS_G4X(dev)) {
1174                 divisor = gen4_dpll;
1175                 count = ARRAY_SIZE(gen4_dpll);
1176         } else if (HAS_PCH_SPLIT(dev)) {
1177                 divisor = pch_dpll;
1178                 count = ARRAY_SIZE(pch_dpll);
1179         } else if (IS_CHERRYVIEW(dev)) {
1180                 divisor = chv_dpll;
1181                 count = ARRAY_SIZE(chv_dpll);
1182         } else if (IS_VALLEYVIEW(dev)) {
1183                 divisor = vlv_dpll;
1184                 count = ARRAY_SIZE(vlv_dpll);
1185         }
1186
1187         if (divisor && count) {
1188                 for (i = 0; i < count; i++) {
1189                         if (link_bw == divisor[i].link_bw) {
1190                                 pipe_config->dpll = divisor[i].dpll;
1191                                 pipe_config->clock_set = true;
1192                                 break;
1193                         }
1194                 }
1195         }
1196 }
1197
1198 static int intel_supported_rates(const int *source_rates, int source_len,
1199                                  const int *sink_rates, int sink_len,
1200                                  int *supported_rates)
1201 {
1202         int i = 0, j = 0, k = 0;
1203
1204         while (i < source_len && j < sink_len) {
1205                 if (source_rates[i] == sink_rates[j]) {
1206                         supported_rates[k] = source_rates[i];
1207                         ++k;
1208                         ++i;
1209                         ++j;
1210                 } else if (source_rates[i] < sink_rates[j]) {
1211                         ++i;
1212                 } else {
1213                         ++j;
1214                 }
1215         }
1216         return k;
1217 }
1218
1219 static int rate_to_index(int find, const int *rates)
1220 {
1221         int i = 0;
1222
1223         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1224                 if (find == rates[i])
1225                         break;
1226
1227         return i;
1228 }
1229
1230 bool
1231 intel_dp_compute_config(struct intel_encoder *encoder,
1232                         struct intel_crtc_state *pipe_config)
1233 {
1234         struct drm_device *dev = encoder->base.dev;
1235         struct drm_i915_private *dev_priv = dev->dev_private;
1236         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1237         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1238         enum port port = dp_to_dig_port(intel_dp)->port;
1239         struct intel_crtc *intel_crtc = encoder->new_crtc;
1240         struct intel_connector *intel_connector = intel_dp->attached_connector;
1241         int lane_count, clock;
1242         int min_lane_count = 1;
1243         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1244         /* Conveniently, the link BW constants become indices with a shift...*/
1245         int min_clock = 0;
1246         int max_clock;
1247         int bpp, mode_rate;
1248         int link_avail, link_clock;
1249         const int *sink_rates;
1250         int supported_rates[8] = {0};
1251         const int *source_rates;
1252         int source_len, sink_len, supported_len;
1253
1254         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1255
1256         source_len = intel_dp_source_rates(dev, &source_rates);
1257
1258         supported_len = intel_supported_rates(source_rates, source_len,
1259                                 sink_rates, sink_len, supported_rates);
1260
1261         /* No common link rates between source and sink */
1262         WARN_ON(supported_len <= 0);
1263
1264         max_clock = supported_len - 1;
1265
1266         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1267                 pipe_config->has_pch_encoder = true;
1268
1269         pipe_config->has_dp_encoder = true;
1270         pipe_config->has_drrs = false;
1271         pipe_config->has_audio = intel_dp->has_audio;
1272
1273         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1274                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1275                                        adjusted_mode);
1276                 if (!HAS_PCH_SPLIT(dev))
1277                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1278                                                  intel_connector->panel.fitting_mode);
1279                 else
1280                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1281                                                 intel_connector->panel.fitting_mode);
1282         }
1283
1284         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1285                 return false;
1286
1287         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1288                       "max bw %d pixel clock %iKHz\n",
1289                       max_lane_count, supported_rates[max_clock],
1290                       adjusted_mode->crtc_clock);
1291
1292         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1293          * bpc in between. */
1294         bpp = pipe_config->pipe_bpp;
1295         if (is_edp(intel_dp)) {
1296                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1297                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1298                                       dev_priv->vbt.edp_bpp);
1299                         bpp = dev_priv->vbt.edp_bpp;
1300                 }
1301
1302                 /*
1303                  * Use the maximum clock and number of lanes the eDP panel
1304                  * advertizes being capable of. The panels are generally
1305                  * designed to support only a single clock and lane
1306                  * configuration, and typically these values correspond to the
1307                  * native resolution of the panel.
1308                  */
1309                 min_lane_count = max_lane_count;
1310                 min_clock = max_clock;
1311         }
1312
1313         for (; bpp >= 6*3; bpp -= 2*3) {
1314                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1315                                                    bpp);
1316
1317                 for (clock = min_clock; clock <= max_clock; clock++) {
1318                         for (lane_count = min_lane_count;
1319                                 lane_count <= max_lane_count;
1320                                 lane_count <<= 1) {
1321
1322                                 link_clock = supported_rates[clock];
1323                                 link_avail = intel_dp_max_data_rate(link_clock,
1324                                                                     lane_count);
1325
1326                                 if (mode_rate <= link_avail) {
1327                                         goto found;
1328                                 }
1329                         }
1330                 }
1331         }
1332
1333         return false;
1334
1335 found:
1336         if (intel_dp->color_range_auto) {
1337                 /*
1338                  * See:
1339                  * CEA-861-E - 5.1 Default Encoding Parameters
1340                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1341                  */
1342                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1343                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1344                 else
1345                         intel_dp->color_range = 0;
1346         }
1347
1348         if (intel_dp->color_range)
1349                 pipe_config->limited_color_range = true;
1350
1351         intel_dp->lane_count = lane_count;
1352
1353         intel_dp->link_bw =
1354                 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1355
1356         if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1357                 intel_dp->rate_select =
1358                         rate_to_index(supported_rates[clock], sink_rates);
1359                 intel_dp->link_bw = 0;
1360         }
1361
1362         pipe_config->pipe_bpp = bpp;
1363         pipe_config->port_clock = supported_rates[clock];
1364
1365         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1366                       intel_dp->link_bw, intel_dp->lane_count,
1367                       pipe_config->port_clock, bpp);
1368         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1369                       mode_rate, link_avail);
1370
1371         intel_link_compute_m_n(bpp, lane_count,
1372                                adjusted_mode->crtc_clock,
1373                                pipe_config->port_clock,
1374                                &pipe_config->dp_m_n);
1375
1376         if (intel_connector->panel.downclock_mode != NULL &&
1377                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1378                         pipe_config->has_drrs = true;
1379                         intel_link_compute_m_n(bpp, lane_count,
1380                                 intel_connector->panel.downclock_mode->clock,
1381                                 pipe_config->port_clock,
1382                                 &pipe_config->dp_m2_n2);
1383         }
1384
1385         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1386                 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
1387         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1388                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1389         else
1390                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1391
1392         return true;
1393 }
1394
1395 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1396 {
1397         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1398         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1399         struct drm_device *dev = crtc->base.dev;
1400         struct drm_i915_private *dev_priv = dev->dev_private;
1401         u32 dpa_ctl;
1402
1403         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1404                       crtc->config->port_clock);
1405         dpa_ctl = I915_READ(DP_A);
1406         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1407
1408         if (crtc->config->port_clock == 162000) {
1409                 /* For a long time we've carried around a ILK-DevA w/a for the
1410                  * 160MHz clock. If we're really unlucky, it's still required.
1411                  */
1412                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1413                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1414                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1415         } else {
1416                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1417                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1418         }
1419
1420         I915_WRITE(DP_A, dpa_ctl);
1421
1422         POSTING_READ(DP_A);
1423         udelay(500);
1424 }
1425
1426 static void intel_dp_prepare(struct intel_encoder *encoder)
1427 {
1428         struct drm_device *dev = encoder->base.dev;
1429         struct drm_i915_private *dev_priv = dev->dev_private;
1430         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1431         enum port port = dp_to_dig_port(intel_dp)->port;
1432         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1433         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1434
1435         /*
1436          * There are four kinds of DP registers:
1437          *
1438          *      IBX PCH
1439          *      SNB CPU
1440          *      IVB CPU
1441          *      CPT PCH
1442          *
1443          * IBX PCH and CPU are the same for almost everything,
1444          * except that the CPU DP PLL is configured in this
1445          * register
1446          *
1447          * CPT PCH is quite different, having many bits moved
1448          * to the TRANS_DP_CTL register instead. That
1449          * configuration happens (oddly) in ironlake_pch_enable
1450          */
1451
1452         /* Preserve the BIOS-computed detected bit. This is
1453          * supposed to be read-only.
1454          */
1455         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1456
1457         /* Handle DP bits in common between all three register formats */
1458         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1459         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1460
1461         if (crtc->config->has_audio)
1462                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1463
1464         /* Split out the IBX/CPU vs CPT settings */
1465
1466         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1467                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1468                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1469                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1470                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1471                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1472
1473                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1474                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1475
1476                 intel_dp->DP |= crtc->pipe << 29;
1477         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1478                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1479                         intel_dp->DP |= intel_dp->color_range;
1480
1481                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1482                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1483                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1484                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1485                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1486
1487                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1488                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1489
1490                 if (!IS_CHERRYVIEW(dev)) {
1491                         if (crtc->pipe == 1)
1492                                 intel_dp->DP |= DP_PIPEB_SELECT;
1493                 } else {
1494                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1495                 }
1496         } else {
1497                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1498         }
1499 }
1500
1501 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1502 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1503
1504 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1505 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1506
1507 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1508 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1509
1510 static void wait_panel_status(struct intel_dp *intel_dp,
1511                                        u32 mask,
1512                                        u32 value)
1513 {
1514         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1515         struct drm_i915_private *dev_priv = dev->dev_private;
1516         u32 pp_stat_reg, pp_ctrl_reg;
1517
1518         lockdep_assert_held(&dev_priv->pps_mutex);
1519
1520         pp_stat_reg = _pp_stat_reg(intel_dp);
1521         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1522
1523         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1524                         mask, value,
1525                         I915_READ(pp_stat_reg),
1526                         I915_READ(pp_ctrl_reg));
1527
1528         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1529                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1530                                 I915_READ(pp_stat_reg),
1531                                 I915_READ(pp_ctrl_reg));
1532         }
1533
1534         DRM_DEBUG_KMS("Wait complete\n");
1535 }
1536
1537 static void wait_panel_on(struct intel_dp *intel_dp)
1538 {
1539         DRM_DEBUG_KMS("Wait for panel power on\n");
1540         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1541 }
1542
1543 static void wait_panel_off(struct intel_dp *intel_dp)
1544 {
1545         DRM_DEBUG_KMS("Wait for panel power off time\n");
1546         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1547 }
1548
1549 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1550 {
1551         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1552
1553         /* When we disable the VDD override bit last we have to do the manual
1554          * wait. */
1555         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1556                                        intel_dp->panel_power_cycle_delay);
1557
1558         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1559 }
1560
1561 static void wait_backlight_on(struct intel_dp *intel_dp)
1562 {
1563         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1564                                        intel_dp->backlight_on_delay);
1565 }
1566
1567 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1568 {
1569         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1570                                        intel_dp->backlight_off_delay);
1571 }
1572
1573 /* Read the current pp_control value, unlocking the register if it
1574  * is locked
1575  */
1576
1577 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1578 {
1579         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1580         struct drm_i915_private *dev_priv = dev->dev_private;
1581         u32 control;
1582
1583         lockdep_assert_held(&dev_priv->pps_mutex);
1584
1585         control = I915_READ(_pp_ctrl_reg(intel_dp));
1586         control &= ~PANEL_UNLOCK_MASK;
1587         control |= PANEL_UNLOCK_REGS;
1588         return control;
1589 }
1590
1591 /*
1592  * Must be paired with edp_panel_vdd_off().
1593  * Must hold pps_mutex around the whole on/off sequence.
1594  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1595  */
1596 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1597 {
1598         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1599         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1600         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1601         struct drm_i915_private *dev_priv = dev->dev_private;
1602         enum intel_display_power_domain power_domain;
1603         u32 pp;
1604         u32 pp_stat_reg, pp_ctrl_reg;
1605         bool need_to_disable = !intel_dp->want_panel_vdd;
1606
1607         lockdep_assert_held(&dev_priv->pps_mutex);
1608
1609         if (!is_edp(intel_dp))
1610                 return false;
1611
1612         cancel_delayed_work(&intel_dp->panel_vdd_work);
1613         intel_dp->want_panel_vdd = true;
1614
1615         if (edp_have_panel_vdd(intel_dp))
1616                 return need_to_disable;
1617
1618         power_domain = intel_display_port_power_domain(intel_encoder);
1619         intel_display_power_get(dev_priv, power_domain);
1620
1621         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1622                       port_name(intel_dig_port->port));
1623
1624         if (!edp_have_panel_power(intel_dp))
1625                 wait_panel_power_cycle(intel_dp);
1626
1627         pp = ironlake_get_pp_control(intel_dp);
1628         pp |= EDP_FORCE_VDD;
1629
1630         pp_stat_reg = _pp_stat_reg(intel_dp);
1631         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1632
1633         I915_WRITE(pp_ctrl_reg, pp);
1634         POSTING_READ(pp_ctrl_reg);
1635         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1636                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1637         /*
1638          * If the panel wasn't on, delay before accessing aux channel
1639          */
1640         if (!edp_have_panel_power(intel_dp)) {
1641                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1642                               port_name(intel_dig_port->port));
1643                 msleep(intel_dp->panel_power_up_delay);
1644         }
1645
1646         return need_to_disable;
1647 }
1648
1649 /*
1650  * Must be paired with intel_edp_panel_vdd_off() or
1651  * intel_edp_panel_off().
1652  * Nested calls to these functions are not allowed since
1653  * we drop the lock. Caller must use some higher level
1654  * locking to prevent nested calls from other threads.
1655  */
1656 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1657 {
1658         bool vdd;
1659
1660         if (!is_edp(intel_dp))
1661                 return;
1662
1663         pps_lock(intel_dp);
1664         vdd = edp_panel_vdd_on(intel_dp);
1665         pps_unlock(intel_dp);
1666
1667         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1668              port_name(dp_to_dig_port(intel_dp)->port));
1669 }
1670
1671 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1672 {
1673         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1674         struct drm_i915_private *dev_priv = dev->dev_private;
1675         struct intel_digital_port *intel_dig_port =
1676                 dp_to_dig_port(intel_dp);
1677         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1678         enum intel_display_power_domain power_domain;
1679         u32 pp;
1680         u32 pp_stat_reg, pp_ctrl_reg;
1681
1682         lockdep_assert_held(&dev_priv->pps_mutex);
1683
1684         WARN_ON(intel_dp->want_panel_vdd);
1685
1686         if (!edp_have_panel_vdd(intel_dp))
1687                 return;
1688
1689         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1690                       port_name(intel_dig_port->port));
1691
1692         pp = ironlake_get_pp_control(intel_dp);
1693         pp &= ~EDP_FORCE_VDD;
1694
1695         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1696         pp_stat_reg = _pp_stat_reg(intel_dp);
1697
1698         I915_WRITE(pp_ctrl_reg, pp);
1699         POSTING_READ(pp_ctrl_reg);
1700
1701         /* Make sure sequencer is idle before allowing subsequent activity */
1702         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1703         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1704
1705         if ((pp & POWER_TARGET_ON) == 0)
1706                 intel_dp->last_power_cycle = jiffies;
1707
1708         power_domain = intel_display_port_power_domain(intel_encoder);
1709         intel_display_power_put(dev_priv, power_domain);
1710 }
1711
1712 static void edp_panel_vdd_work(struct work_struct *__work)
1713 {
1714         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1715                                                  struct intel_dp, panel_vdd_work);
1716
1717         pps_lock(intel_dp);
1718         if (!intel_dp->want_panel_vdd)
1719                 edp_panel_vdd_off_sync(intel_dp);
1720         pps_unlock(intel_dp);
1721 }
1722
1723 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1724 {
1725         unsigned long delay;
1726
1727         /*
1728          * Queue the timer to fire a long time from now (relative to the power
1729          * down delay) to keep the panel power up across a sequence of
1730          * operations.
1731          */
1732         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1733         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1734 }
1735
1736 /*
1737  * Must be paired with edp_panel_vdd_on().
1738  * Must hold pps_mutex around the whole on/off sequence.
1739  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1740  */
1741 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1742 {
1743         struct drm_i915_private *dev_priv =
1744                 intel_dp_to_dev(intel_dp)->dev_private;
1745
1746         lockdep_assert_held(&dev_priv->pps_mutex);
1747
1748         if (!is_edp(intel_dp))
1749                 return;
1750
1751         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1752              port_name(dp_to_dig_port(intel_dp)->port));
1753
1754         intel_dp->want_panel_vdd = false;
1755
1756         if (sync)
1757                 edp_panel_vdd_off_sync(intel_dp);
1758         else
1759                 edp_panel_vdd_schedule_off(intel_dp);
1760 }
1761
1762 static void edp_panel_on(struct intel_dp *intel_dp)
1763 {
1764         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1765         struct drm_i915_private *dev_priv = dev->dev_private;
1766         u32 pp;
1767         u32 pp_ctrl_reg;
1768
1769         lockdep_assert_held(&dev_priv->pps_mutex);
1770
1771         if (!is_edp(intel_dp))
1772                 return;
1773
1774         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1775                       port_name(dp_to_dig_port(intel_dp)->port));
1776
1777         if (WARN(edp_have_panel_power(intel_dp),
1778                  "eDP port %c panel power already on\n",
1779                  port_name(dp_to_dig_port(intel_dp)->port)))
1780                 return;
1781
1782         wait_panel_power_cycle(intel_dp);
1783
1784         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1785         pp = ironlake_get_pp_control(intel_dp);
1786         if (IS_GEN5(dev)) {
1787                 /* ILK workaround: disable reset around power sequence */
1788                 pp &= ~PANEL_POWER_RESET;
1789                 I915_WRITE(pp_ctrl_reg, pp);
1790                 POSTING_READ(pp_ctrl_reg);
1791         }
1792
1793         pp |= POWER_TARGET_ON;
1794         if (!IS_GEN5(dev))
1795                 pp |= PANEL_POWER_RESET;
1796
1797         I915_WRITE(pp_ctrl_reg, pp);
1798         POSTING_READ(pp_ctrl_reg);
1799
1800         wait_panel_on(intel_dp);
1801         intel_dp->last_power_on = jiffies;
1802
1803         if (IS_GEN5(dev)) {
1804                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1805                 I915_WRITE(pp_ctrl_reg, pp);
1806                 POSTING_READ(pp_ctrl_reg);
1807         }
1808 }
1809
1810 void intel_edp_panel_on(struct intel_dp *intel_dp)
1811 {
1812         if (!is_edp(intel_dp))
1813                 return;
1814
1815         pps_lock(intel_dp);
1816         edp_panel_on(intel_dp);
1817         pps_unlock(intel_dp);
1818 }
1819
1820
1821 static void edp_panel_off(struct intel_dp *intel_dp)
1822 {
1823         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1824         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1825         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1826         struct drm_i915_private *dev_priv = dev->dev_private;
1827         enum intel_display_power_domain power_domain;
1828         u32 pp;
1829         u32 pp_ctrl_reg;
1830
1831         lockdep_assert_held(&dev_priv->pps_mutex);
1832
1833         if (!is_edp(intel_dp))
1834                 return;
1835
1836         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1837                       port_name(dp_to_dig_port(intel_dp)->port));
1838
1839         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1840              port_name(dp_to_dig_port(intel_dp)->port));
1841
1842         pp = ironlake_get_pp_control(intel_dp);
1843         /* We need to switch off panel power _and_ force vdd, for otherwise some
1844          * panels get very unhappy and cease to work. */
1845         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1846                 EDP_BLC_ENABLE);
1847
1848         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1849
1850         intel_dp->want_panel_vdd = false;
1851
1852         I915_WRITE(pp_ctrl_reg, pp);
1853         POSTING_READ(pp_ctrl_reg);
1854
1855         intel_dp->last_power_cycle = jiffies;
1856         wait_panel_off(intel_dp);
1857
1858         /* We got a reference when we enabled the VDD. */
1859         power_domain = intel_display_port_power_domain(intel_encoder);
1860         intel_display_power_put(dev_priv, power_domain);
1861 }
1862
1863 void intel_edp_panel_off(struct intel_dp *intel_dp)
1864 {
1865         if (!is_edp(intel_dp))
1866                 return;
1867
1868         pps_lock(intel_dp);
1869         edp_panel_off(intel_dp);
1870         pps_unlock(intel_dp);
1871 }
1872
1873 /* Enable backlight in the panel power control. */
1874 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1875 {
1876         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1877         struct drm_device *dev = intel_dig_port->base.base.dev;
1878         struct drm_i915_private *dev_priv = dev->dev_private;
1879         u32 pp;
1880         u32 pp_ctrl_reg;
1881
1882         /*
1883          * If we enable the backlight right away following a panel power
1884          * on, we may see slight flicker as the panel syncs with the eDP
1885          * link.  So delay a bit to make sure the image is solid before
1886          * allowing it to appear.
1887          */
1888         wait_backlight_on(intel_dp);
1889
1890         pps_lock(intel_dp);
1891
1892         pp = ironlake_get_pp_control(intel_dp);
1893         pp |= EDP_BLC_ENABLE;
1894
1895         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1896
1897         I915_WRITE(pp_ctrl_reg, pp);
1898         POSTING_READ(pp_ctrl_reg);
1899
1900         pps_unlock(intel_dp);
1901 }
1902
1903 /* Enable backlight PWM and backlight PP control. */
1904 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1905 {
1906         if (!is_edp(intel_dp))
1907                 return;
1908
1909         DRM_DEBUG_KMS("\n");
1910
1911         intel_panel_enable_backlight(intel_dp->attached_connector);
1912         _intel_edp_backlight_on(intel_dp);
1913 }
1914
1915 /* Disable backlight in the panel power control. */
1916 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1917 {
1918         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1919         struct drm_i915_private *dev_priv = dev->dev_private;
1920         u32 pp;
1921         u32 pp_ctrl_reg;
1922
1923         if (!is_edp(intel_dp))
1924                 return;
1925
1926         pps_lock(intel_dp);
1927
1928         pp = ironlake_get_pp_control(intel_dp);
1929         pp &= ~EDP_BLC_ENABLE;
1930
1931         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1932
1933         I915_WRITE(pp_ctrl_reg, pp);
1934         POSTING_READ(pp_ctrl_reg);
1935
1936         pps_unlock(intel_dp);
1937
1938         intel_dp->last_backlight_off = jiffies;
1939         edp_wait_backlight_off(intel_dp);
1940 }
1941
1942 /* Disable backlight PP control and backlight PWM. */
1943 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1944 {
1945         if (!is_edp(intel_dp))
1946                 return;
1947
1948         DRM_DEBUG_KMS("\n");
1949
1950         _intel_edp_backlight_off(intel_dp);
1951         intel_panel_disable_backlight(intel_dp->attached_connector);
1952 }
1953
1954 /*
1955  * Hook for controlling the panel power control backlight through the bl_power
1956  * sysfs attribute. Take care to handle multiple calls.
1957  */
1958 static void intel_edp_backlight_power(struct intel_connector *connector,
1959                                       bool enable)
1960 {
1961         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
1962         bool is_enabled;
1963
1964         pps_lock(intel_dp);
1965         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1966         pps_unlock(intel_dp);
1967
1968         if (is_enabled == enable)
1969                 return;
1970
1971         DRM_DEBUG_KMS("panel power control backlight %s\n",
1972                       enable ? "enable" : "disable");
1973
1974         if (enable)
1975                 _intel_edp_backlight_on(intel_dp);
1976         else
1977                 _intel_edp_backlight_off(intel_dp);
1978 }
1979
1980 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1981 {
1982         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1983         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1984         struct drm_device *dev = crtc->dev;
1985         struct drm_i915_private *dev_priv = dev->dev_private;
1986         u32 dpa_ctl;
1987
1988         assert_pipe_disabled(dev_priv,
1989                              to_intel_crtc(crtc)->pipe);
1990
1991         DRM_DEBUG_KMS("\n");
1992         dpa_ctl = I915_READ(DP_A);
1993         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1994         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1995
1996         /* We don't adjust intel_dp->DP while tearing down the link, to
1997          * facilitate link retraining (e.g. after hotplug). Hence clear all
1998          * enable bits here to ensure that we don't enable too much. */
1999         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2000         intel_dp->DP |= DP_PLL_ENABLE;
2001         I915_WRITE(DP_A, intel_dp->DP);
2002         POSTING_READ(DP_A);
2003         udelay(200);
2004 }
2005
2006 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2007 {
2008         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2009         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2010         struct drm_device *dev = crtc->dev;
2011         struct drm_i915_private *dev_priv = dev->dev_private;
2012         u32 dpa_ctl;
2013
2014         assert_pipe_disabled(dev_priv,
2015                              to_intel_crtc(crtc)->pipe);
2016
2017         dpa_ctl = I915_READ(DP_A);
2018         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2019              "dp pll off, should be on\n");
2020         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2021
2022         /* We can't rely on the value tracked for the DP register in
2023          * intel_dp->DP because link_down must not change that (otherwise link
2024          * re-training will fail. */
2025         dpa_ctl &= ~DP_PLL_ENABLE;
2026         I915_WRITE(DP_A, dpa_ctl);
2027         POSTING_READ(DP_A);
2028         udelay(200);
2029 }
2030
2031 /* If the sink supports it, try to set the power state appropriately */
2032 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2033 {
2034         int ret, i;
2035
2036         /* Should have a valid DPCD by this point */
2037         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2038                 return;
2039
2040         if (mode != DRM_MODE_DPMS_ON) {
2041                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2042                                          DP_SET_POWER_D3);
2043         } else {
2044                 /*
2045                  * When turning on, we need to retry for 1ms to give the sink
2046                  * time to wake up.
2047                  */
2048                 for (i = 0; i < 3; i++) {
2049                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2050                                                  DP_SET_POWER_D0);
2051                         if (ret == 1)
2052                                 break;
2053                         msleep(1);
2054                 }
2055         }
2056
2057         if (ret != 1)
2058                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2059                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2060 }
2061
2062 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2063                                   enum pipe *pipe)
2064 {
2065         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2066         enum port port = dp_to_dig_port(intel_dp)->port;
2067         struct drm_device *dev = encoder->base.dev;
2068         struct drm_i915_private *dev_priv = dev->dev_private;
2069         enum intel_display_power_domain power_domain;
2070         u32 tmp;
2071
2072         power_domain = intel_display_port_power_domain(encoder);
2073         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2074                 return false;
2075
2076         tmp = I915_READ(intel_dp->output_reg);
2077
2078         if (!(tmp & DP_PORT_EN))
2079                 return false;
2080
2081         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2082                 *pipe = PORT_TO_PIPE_CPT(tmp);
2083         } else if (IS_CHERRYVIEW(dev)) {
2084                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2085         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2086                 *pipe = PORT_TO_PIPE(tmp);
2087         } else {
2088                 u32 trans_sel;
2089                 u32 trans_dp;
2090                 int i;
2091
2092                 switch (intel_dp->output_reg) {
2093                 case PCH_DP_B:
2094                         trans_sel = TRANS_DP_PORT_SEL_B;
2095                         break;
2096                 case PCH_DP_C:
2097                         trans_sel = TRANS_DP_PORT_SEL_C;
2098                         break;
2099                 case PCH_DP_D:
2100                         trans_sel = TRANS_DP_PORT_SEL_D;
2101                         break;
2102                 default:
2103                         return true;
2104                 }
2105
2106                 for_each_pipe(dev_priv, i) {
2107                         trans_dp = I915_READ(TRANS_DP_CTL(i));
2108                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2109                                 *pipe = i;
2110                                 return true;
2111                         }
2112                 }
2113
2114                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2115                               intel_dp->output_reg);
2116         }
2117
2118         return true;
2119 }
2120
2121 static void intel_dp_get_config(struct intel_encoder *encoder,
2122                                 struct intel_crtc_state *pipe_config)
2123 {
2124         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2125         u32 tmp, flags = 0;
2126         struct drm_device *dev = encoder->base.dev;
2127         struct drm_i915_private *dev_priv = dev->dev_private;
2128         enum port port = dp_to_dig_port(intel_dp)->port;
2129         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2130         int dotclock;
2131
2132         tmp = I915_READ(intel_dp->output_reg);
2133         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2134                 pipe_config->has_audio = true;
2135
2136         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2137                 if (tmp & DP_SYNC_HS_HIGH)
2138                         flags |= DRM_MODE_FLAG_PHSYNC;
2139                 else
2140                         flags |= DRM_MODE_FLAG_NHSYNC;
2141
2142                 if (tmp & DP_SYNC_VS_HIGH)
2143                         flags |= DRM_MODE_FLAG_PVSYNC;
2144                 else
2145                         flags |= DRM_MODE_FLAG_NVSYNC;
2146         } else {
2147                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2148                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2149                         flags |= DRM_MODE_FLAG_PHSYNC;
2150                 else
2151                         flags |= DRM_MODE_FLAG_NHSYNC;
2152
2153                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2154                         flags |= DRM_MODE_FLAG_PVSYNC;
2155                 else
2156                         flags |= DRM_MODE_FLAG_NVSYNC;
2157         }
2158
2159         pipe_config->base.adjusted_mode.flags |= flags;
2160
2161         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2162             tmp & DP_COLOR_RANGE_16_235)
2163                 pipe_config->limited_color_range = true;
2164
2165         pipe_config->has_dp_encoder = true;
2166
2167         intel_dp_get_m_n(crtc, pipe_config);
2168
2169         if (port == PORT_A) {
2170                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2171                         pipe_config->port_clock = 162000;
2172                 else
2173                         pipe_config->port_clock = 270000;
2174         }
2175
2176         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2177                                             &pipe_config->dp_m_n);
2178
2179         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2180                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2181
2182         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2183
2184         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2185             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2186                 /*
2187                  * This is a big fat ugly hack.
2188                  *
2189                  * Some machines in UEFI boot mode provide us a VBT that has 18
2190                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2191                  * unknown we fail to light up. Yet the same BIOS boots up with
2192                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2193                  * max, not what it tells us to use.
2194                  *
2195                  * Note: This will still be broken if the eDP panel is not lit
2196                  * up by the BIOS, and thus we can't get the mode at module
2197                  * load.
2198                  */
2199                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2200                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2201                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2202         }
2203 }
2204
2205 static void intel_disable_dp(struct intel_encoder *encoder)
2206 {
2207         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2208         struct drm_device *dev = encoder->base.dev;
2209         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2210
2211         if (crtc->config->has_audio)
2212                 intel_audio_codec_disable(encoder);
2213
2214         if (HAS_PSR(dev) && !HAS_DDI(dev))
2215                 intel_psr_disable(intel_dp);
2216
2217         /* Make sure the panel is off before trying to change the mode. But also
2218          * ensure that we have vdd while we switch off the panel. */
2219         intel_edp_panel_vdd_on(intel_dp);
2220         intel_edp_backlight_off(intel_dp);
2221         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2222         intel_edp_panel_off(intel_dp);
2223
2224         /* disable the port before the pipe on g4x */
2225         if (INTEL_INFO(dev)->gen < 5)
2226                 intel_dp_link_down(intel_dp);
2227 }
2228
2229 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2230 {
2231         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2232         enum port port = dp_to_dig_port(intel_dp)->port;
2233
2234         intel_dp_link_down(intel_dp);
2235         if (port == PORT_A)
2236                 ironlake_edp_pll_off(intel_dp);
2237 }
2238
2239 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2240 {
2241         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2242
2243         intel_dp_link_down(intel_dp);
2244 }
2245
2246 static void chv_post_disable_dp(struct intel_encoder *encoder)
2247 {
2248         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2249         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2250         struct drm_device *dev = encoder->base.dev;
2251         struct drm_i915_private *dev_priv = dev->dev_private;
2252         struct intel_crtc *intel_crtc =
2253                 to_intel_crtc(encoder->base.crtc);
2254         enum dpio_channel ch = vlv_dport_to_channel(dport);
2255         enum pipe pipe = intel_crtc->pipe;
2256         u32 val;
2257
2258         intel_dp_link_down(intel_dp);
2259
2260         mutex_lock(&dev_priv->dpio_lock);
2261
2262         /* Propagate soft reset to data lane reset */
2263         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2264         val |= CHV_PCS_REQ_SOFTRESET_EN;
2265         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2266
2267         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2268         val |= CHV_PCS_REQ_SOFTRESET_EN;
2269         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2270
2271         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2272         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2273         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2274
2275         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2276         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2277         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2278
2279         mutex_unlock(&dev_priv->dpio_lock);
2280 }
2281
2282 static void
2283 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2284                          uint32_t *DP,
2285                          uint8_t dp_train_pat)
2286 {
2287         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2288         struct drm_device *dev = intel_dig_port->base.base.dev;
2289         struct drm_i915_private *dev_priv = dev->dev_private;
2290         enum port port = intel_dig_port->port;
2291
2292         if (HAS_DDI(dev)) {
2293                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2294
2295                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2296                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2297                 else
2298                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2299
2300                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2301                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2302                 case DP_TRAINING_PATTERN_DISABLE:
2303                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2304
2305                         break;
2306                 case DP_TRAINING_PATTERN_1:
2307                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2308                         break;
2309                 case DP_TRAINING_PATTERN_2:
2310                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2311                         break;
2312                 case DP_TRAINING_PATTERN_3:
2313                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2314                         break;
2315                 }
2316                 I915_WRITE(DP_TP_CTL(port), temp);
2317
2318         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2319                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2320
2321                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2322                 case DP_TRAINING_PATTERN_DISABLE:
2323                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2324                         break;
2325                 case DP_TRAINING_PATTERN_1:
2326                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2327                         break;
2328                 case DP_TRAINING_PATTERN_2:
2329                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2330                         break;
2331                 case DP_TRAINING_PATTERN_3:
2332                         DRM_ERROR("DP training pattern 3 not supported\n");
2333                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2334                         break;
2335                 }
2336
2337         } else {
2338                 if (IS_CHERRYVIEW(dev))
2339                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2340                 else
2341                         *DP &= ~DP_LINK_TRAIN_MASK;
2342
2343                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2344                 case DP_TRAINING_PATTERN_DISABLE:
2345                         *DP |= DP_LINK_TRAIN_OFF;
2346                         break;
2347                 case DP_TRAINING_PATTERN_1:
2348                         *DP |= DP_LINK_TRAIN_PAT_1;
2349                         break;
2350                 case DP_TRAINING_PATTERN_2:
2351                         *DP |= DP_LINK_TRAIN_PAT_2;
2352                         break;
2353                 case DP_TRAINING_PATTERN_3:
2354                         if (IS_CHERRYVIEW(dev)) {
2355                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2356                         } else {
2357                                 DRM_ERROR("DP training pattern 3 not supported\n");
2358                                 *DP |= DP_LINK_TRAIN_PAT_2;
2359                         }
2360                         break;
2361                 }
2362         }
2363 }
2364
2365 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2366 {
2367         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2368         struct drm_i915_private *dev_priv = dev->dev_private;
2369
2370         /* enable with pattern 1 (as per spec) */
2371         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2372                                  DP_TRAINING_PATTERN_1);
2373
2374         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2375         POSTING_READ(intel_dp->output_reg);
2376
2377         /*
2378          * Magic for VLV/CHV. We _must_ first set up the register
2379          * without actually enabling the port, and then do another
2380          * write to enable the port. Otherwise link training will
2381          * fail when the power sequencer is freshly used for this port.
2382          */
2383         intel_dp->DP |= DP_PORT_EN;
2384
2385         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2386         POSTING_READ(intel_dp->output_reg);
2387 }
2388
2389 static void intel_enable_dp(struct intel_encoder *encoder)
2390 {
2391         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2392         struct drm_device *dev = encoder->base.dev;
2393         struct drm_i915_private *dev_priv = dev->dev_private;
2394         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2395         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2396
2397         if (WARN_ON(dp_reg & DP_PORT_EN))
2398                 return;
2399
2400         pps_lock(intel_dp);
2401
2402         if (IS_VALLEYVIEW(dev))
2403                 vlv_init_panel_power_sequencer(intel_dp);
2404
2405         intel_dp_enable_port(intel_dp);
2406
2407         edp_panel_vdd_on(intel_dp);
2408         edp_panel_on(intel_dp);
2409         edp_panel_vdd_off(intel_dp, true);
2410
2411         pps_unlock(intel_dp);
2412
2413         if (IS_VALLEYVIEW(dev))
2414                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2415
2416         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2417         intel_dp_start_link_train(intel_dp);
2418         intel_dp_complete_link_train(intel_dp);
2419         intel_dp_stop_link_train(intel_dp);
2420
2421         if (crtc->config->has_audio) {
2422                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2423                                  pipe_name(crtc->pipe));
2424                 intel_audio_codec_enable(encoder);
2425         }
2426 }
2427
2428 static void g4x_enable_dp(struct intel_encoder *encoder)
2429 {
2430         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2431
2432         intel_enable_dp(encoder);
2433         intel_edp_backlight_on(intel_dp);
2434 }
2435
2436 static void vlv_enable_dp(struct intel_encoder *encoder)
2437 {
2438         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2439
2440         intel_edp_backlight_on(intel_dp);
2441         intel_psr_enable(intel_dp);
2442 }
2443
2444 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2445 {
2446         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2447         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2448
2449         intel_dp_prepare(encoder);
2450
2451         /* Only ilk+ has port A */
2452         if (dport->port == PORT_A) {
2453                 ironlake_set_pll_cpu_edp(intel_dp);
2454                 ironlake_edp_pll_on(intel_dp);
2455         }
2456 }
2457
2458 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2459 {
2460         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2461         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2462         enum pipe pipe = intel_dp->pps_pipe;
2463         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2464
2465         edp_panel_vdd_off_sync(intel_dp);
2466
2467         /*
2468          * VLV seems to get confused when multiple power seqeuencers
2469          * have the same port selected (even if only one has power/vdd
2470          * enabled). The failure manifests as vlv_wait_port_ready() failing
2471          * CHV on the other hand doesn't seem to mind having the same port
2472          * selected in multiple power seqeuencers, but let's clear the
2473          * port select always when logically disconnecting a power sequencer
2474          * from a port.
2475          */
2476         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2477                       pipe_name(pipe), port_name(intel_dig_port->port));
2478         I915_WRITE(pp_on_reg, 0);
2479         POSTING_READ(pp_on_reg);
2480
2481         intel_dp->pps_pipe = INVALID_PIPE;
2482 }
2483
2484 static void vlv_steal_power_sequencer(struct drm_device *dev,
2485                                       enum pipe pipe)
2486 {
2487         struct drm_i915_private *dev_priv = dev->dev_private;
2488         struct intel_encoder *encoder;
2489
2490         lockdep_assert_held(&dev_priv->pps_mutex);
2491
2492         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2493                 return;
2494
2495         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2496                             base.head) {
2497                 struct intel_dp *intel_dp;
2498                 enum port port;
2499
2500                 if (encoder->type != INTEL_OUTPUT_EDP)
2501                         continue;
2502
2503                 intel_dp = enc_to_intel_dp(&encoder->base);
2504                 port = dp_to_dig_port(intel_dp)->port;
2505
2506                 if (intel_dp->pps_pipe != pipe)
2507                         continue;
2508
2509                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2510                               pipe_name(pipe), port_name(port));
2511
2512                 WARN(encoder->connectors_active,
2513                      "stealing pipe %c power sequencer from active eDP port %c\n",
2514                      pipe_name(pipe), port_name(port));
2515
2516                 /* make sure vdd is off before we steal it */
2517                 vlv_detach_power_sequencer(intel_dp);
2518         }
2519 }
2520
2521 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2522 {
2523         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2524         struct intel_encoder *encoder = &intel_dig_port->base;
2525         struct drm_device *dev = encoder->base.dev;
2526         struct drm_i915_private *dev_priv = dev->dev_private;
2527         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2528
2529         lockdep_assert_held(&dev_priv->pps_mutex);
2530
2531         if (!is_edp(intel_dp))
2532                 return;
2533
2534         if (intel_dp->pps_pipe == crtc->pipe)
2535                 return;
2536
2537         /*
2538          * If another power sequencer was being used on this
2539          * port previously make sure to turn off vdd there while
2540          * we still have control of it.
2541          */
2542         if (intel_dp->pps_pipe != INVALID_PIPE)
2543                 vlv_detach_power_sequencer(intel_dp);
2544
2545         /*
2546          * We may be stealing the power
2547          * sequencer from another port.
2548          */
2549         vlv_steal_power_sequencer(dev, crtc->pipe);
2550
2551         /* now it's all ours */
2552         intel_dp->pps_pipe = crtc->pipe;
2553
2554         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2555                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2556
2557         /* init power sequencer on this pipe and port */
2558         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2559         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2560 }
2561
2562 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2563 {
2564         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2565         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2566         struct drm_device *dev = encoder->base.dev;
2567         struct drm_i915_private *dev_priv = dev->dev_private;
2568         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2569         enum dpio_channel port = vlv_dport_to_channel(dport);
2570         int pipe = intel_crtc->pipe;
2571         u32 val;
2572
2573         mutex_lock(&dev_priv->dpio_lock);
2574
2575         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2576         val = 0;
2577         if (pipe)
2578                 val |= (1<<21);
2579         else
2580                 val &= ~(1<<21);
2581         val |= 0x001000c4;
2582         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2583         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2584         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2585
2586         mutex_unlock(&dev_priv->dpio_lock);
2587
2588         intel_enable_dp(encoder);
2589 }
2590
2591 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2592 {
2593         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2594         struct drm_device *dev = encoder->base.dev;
2595         struct drm_i915_private *dev_priv = dev->dev_private;
2596         struct intel_crtc *intel_crtc =
2597                 to_intel_crtc(encoder->base.crtc);
2598         enum dpio_channel port = vlv_dport_to_channel(dport);
2599         int pipe = intel_crtc->pipe;
2600
2601         intel_dp_prepare(encoder);
2602
2603         /* Program Tx lane resets to default */
2604         mutex_lock(&dev_priv->dpio_lock);
2605         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2606                          DPIO_PCS_TX_LANE2_RESET |
2607                          DPIO_PCS_TX_LANE1_RESET);
2608         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2609                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2610                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2611                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2612                                  DPIO_PCS_CLK_SOFT_RESET);
2613
2614         /* Fix up inter-pair skew failure */
2615         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2616         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2617         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2618         mutex_unlock(&dev_priv->dpio_lock);
2619 }
2620
2621 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2622 {
2623         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2624         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2625         struct drm_device *dev = encoder->base.dev;
2626         struct drm_i915_private *dev_priv = dev->dev_private;
2627         struct intel_crtc *intel_crtc =
2628                 to_intel_crtc(encoder->base.crtc);
2629         enum dpio_channel ch = vlv_dport_to_channel(dport);
2630         int pipe = intel_crtc->pipe;
2631         int data, i;
2632         u32 val;
2633
2634         mutex_lock(&dev_priv->dpio_lock);
2635
2636         /* allow hardware to manage TX FIFO reset source */
2637         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2638         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2639         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2640
2641         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2642         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2643         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2644
2645         /* Deassert soft data lane reset*/
2646         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2647         val |= CHV_PCS_REQ_SOFTRESET_EN;
2648         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2649
2650         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2651         val |= CHV_PCS_REQ_SOFTRESET_EN;
2652         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2653
2654         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2655         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2656         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2657
2658         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2659         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2660         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2661
2662         /* Program Tx lane latency optimal setting*/
2663         for (i = 0; i < 4; i++) {
2664                 /* Set the latency optimal bit */
2665                 data = (i == 1) ? 0x0 : 0x6;
2666                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2667                                 data << DPIO_FRC_LATENCY_SHFIT);
2668
2669                 /* Set the upar bit */
2670                 data = (i == 1) ? 0x0 : 0x1;
2671                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2672                                 data << DPIO_UPAR_SHIFT);
2673         }
2674
2675         /* Data lane stagger programming */
2676         /* FIXME: Fix up value only after power analysis */
2677
2678         mutex_unlock(&dev_priv->dpio_lock);
2679
2680         intel_enable_dp(encoder);
2681 }
2682
2683 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2684 {
2685         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2686         struct drm_device *dev = encoder->base.dev;
2687         struct drm_i915_private *dev_priv = dev->dev_private;
2688         struct intel_crtc *intel_crtc =
2689                 to_intel_crtc(encoder->base.crtc);
2690         enum dpio_channel ch = vlv_dport_to_channel(dport);
2691         enum pipe pipe = intel_crtc->pipe;
2692         u32 val;
2693
2694         intel_dp_prepare(encoder);
2695
2696         mutex_lock(&dev_priv->dpio_lock);
2697
2698         /* program left/right clock distribution */
2699         if (pipe != PIPE_B) {
2700                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2701                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2702                 if (ch == DPIO_CH0)
2703                         val |= CHV_BUFLEFTENA1_FORCE;
2704                 if (ch == DPIO_CH1)
2705                         val |= CHV_BUFRIGHTENA1_FORCE;
2706                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2707         } else {
2708                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2709                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2710                 if (ch == DPIO_CH0)
2711                         val |= CHV_BUFLEFTENA2_FORCE;
2712                 if (ch == DPIO_CH1)
2713                         val |= CHV_BUFRIGHTENA2_FORCE;
2714                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2715         }
2716
2717         /* program clock channel usage */
2718         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2719         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2720         if (pipe != PIPE_B)
2721                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2722         else
2723                 val |= CHV_PCS_USEDCLKCHANNEL;
2724         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2725
2726         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2727         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2728         if (pipe != PIPE_B)
2729                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2730         else
2731                 val |= CHV_PCS_USEDCLKCHANNEL;
2732         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2733
2734         /*
2735          * This a a bit weird since generally CL
2736          * matches the pipe, but here we need to
2737          * pick the CL based on the port.
2738          */
2739         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2740         if (pipe != PIPE_B)
2741                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2742         else
2743                 val |= CHV_CMN_USEDCLKCHANNEL;
2744         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2745
2746         mutex_unlock(&dev_priv->dpio_lock);
2747 }
2748
2749 /*
2750  * Native read with retry for link status and receiver capability reads for
2751  * cases where the sink may still be asleep.
2752  *
2753  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2754  * supposed to retry 3 times per the spec.
2755  */
2756 static ssize_t
2757 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2758                         void *buffer, size_t size)
2759 {
2760         ssize_t ret;
2761         int i;
2762
2763         /*
2764          * Sometime we just get the same incorrect byte repeated
2765          * over the entire buffer. Doing just one throw away read
2766          * initially seems to "solve" it.
2767          */
2768         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2769
2770         for (i = 0; i < 3; i++) {
2771                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2772                 if (ret == size)
2773                         return ret;
2774                 msleep(1);
2775         }
2776
2777         return ret;
2778 }
2779
2780 /*
2781  * Fetch AUX CH registers 0x202 - 0x207 which contain
2782  * link status information
2783  */
2784 static bool
2785 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2786 {
2787         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2788                                        DP_LANE0_1_STATUS,
2789                                        link_status,
2790                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2791 }
2792
2793 /* These are source-specific values. */
2794 static uint8_t
2795 intel_dp_voltage_max(struct intel_dp *intel_dp)
2796 {
2797         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2798         struct drm_i915_private *dev_priv = dev->dev_private;
2799         enum port port = dp_to_dig_port(intel_dp)->port;
2800
2801         if (INTEL_INFO(dev)->gen >= 9) {
2802                 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2803                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2804                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2805         } else if (IS_VALLEYVIEW(dev))
2806                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2807         else if (IS_GEN7(dev) && port == PORT_A)
2808                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2809         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2810                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2811         else
2812                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2813 }
2814
2815 static uint8_t
2816 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2817 {
2818         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2819         enum port port = dp_to_dig_port(intel_dp)->port;
2820
2821         if (INTEL_INFO(dev)->gen >= 9) {
2822                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2823                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2824                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2825                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2826                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2827                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2828                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2829                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2830                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2831                 default:
2832                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2833                 }
2834         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2835                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2836                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2837                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2838                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2839                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2840                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2841                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2842                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2843                 default:
2844                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2845                 }
2846         } else if (IS_VALLEYVIEW(dev)) {
2847                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2848                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2849                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2850                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2851                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2852                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2853                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2854                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2855                 default:
2856                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2857                 }
2858         } else if (IS_GEN7(dev) && port == PORT_A) {
2859                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2860                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2861                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2862                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2863                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2864                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2865                 default:
2866                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2867                 }
2868         } else {
2869                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2870                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2871                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2872                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2873                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2874                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2875                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2876                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2877                 default:
2878                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2879                 }
2880         }
2881 }
2882
2883 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2884 {
2885         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2886         struct drm_i915_private *dev_priv = dev->dev_private;
2887         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2888         struct intel_crtc *intel_crtc =
2889                 to_intel_crtc(dport->base.base.crtc);
2890         unsigned long demph_reg_value, preemph_reg_value,
2891                 uniqtranscale_reg_value;
2892         uint8_t train_set = intel_dp->train_set[0];
2893         enum dpio_channel port = vlv_dport_to_channel(dport);
2894         int pipe = intel_crtc->pipe;
2895
2896         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2897         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2898                 preemph_reg_value = 0x0004000;
2899                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2900                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2901                         demph_reg_value = 0x2B405555;
2902                         uniqtranscale_reg_value = 0x552AB83A;
2903                         break;
2904                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2905                         demph_reg_value = 0x2B404040;
2906                         uniqtranscale_reg_value = 0x5548B83A;
2907                         break;
2908                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2909                         demph_reg_value = 0x2B245555;
2910                         uniqtranscale_reg_value = 0x5560B83A;
2911                         break;
2912                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2913                         demph_reg_value = 0x2B405555;
2914                         uniqtranscale_reg_value = 0x5598DA3A;
2915                         break;
2916                 default:
2917                         return 0;
2918                 }
2919                 break;
2920         case DP_TRAIN_PRE_EMPH_LEVEL_1:
2921                 preemph_reg_value = 0x0002000;
2922                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2923                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2924                         demph_reg_value = 0x2B404040;
2925                         uniqtranscale_reg_value = 0x5552B83A;
2926                         break;
2927                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2928                         demph_reg_value = 0x2B404848;
2929                         uniqtranscale_reg_value = 0x5580B83A;
2930                         break;
2931                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2932                         demph_reg_value = 0x2B404040;
2933                         uniqtranscale_reg_value = 0x55ADDA3A;
2934                         break;
2935                 default:
2936                         return 0;
2937                 }
2938                 break;
2939         case DP_TRAIN_PRE_EMPH_LEVEL_2:
2940                 preemph_reg_value = 0x0000000;
2941                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2942                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2943                         demph_reg_value = 0x2B305555;
2944                         uniqtranscale_reg_value = 0x5570B83A;
2945                         break;
2946                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2947                         demph_reg_value = 0x2B2B4040;
2948                         uniqtranscale_reg_value = 0x55ADDA3A;
2949                         break;
2950                 default:
2951                         return 0;
2952                 }
2953                 break;
2954         case DP_TRAIN_PRE_EMPH_LEVEL_3:
2955                 preemph_reg_value = 0x0006000;
2956                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2957                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2958                         demph_reg_value = 0x1B405555;
2959                         uniqtranscale_reg_value = 0x55ADDA3A;
2960                         break;
2961                 default:
2962                         return 0;
2963                 }
2964                 break;
2965         default:
2966                 return 0;
2967         }
2968
2969         mutex_lock(&dev_priv->dpio_lock);
2970         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2971         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2972         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2973                          uniqtranscale_reg_value);
2974         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2975         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2976         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2977         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2978         mutex_unlock(&dev_priv->dpio_lock);
2979
2980         return 0;
2981 }
2982
2983 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2984 {
2985         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2986         struct drm_i915_private *dev_priv = dev->dev_private;
2987         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2988         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
2989         u32 deemph_reg_value, margin_reg_value, val;
2990         uint8_t train_set = intel_dp->train_set[0];
2991         enum dpio_channel ch = vlv_dport_to_channel(dport);
2992         enum pipe pipe = intel_crtc->pipe;
2993         int i;
2994
2995         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2996         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2997                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2998                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2999                         deemph_reg_value = 128;
3000                         margin_reg_value = 52;
3001                         break;
3002                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3003                         deemph_reg_value = 128;
3004                         margin_reg_value = 77;
3005                         break;
3006                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3007                         deemph_reg_value = 128;
3008                         margin_reg_value = 102;
3009                         break;
3010                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3011                         deemph_reg_value = 128;
3012                         margin_reg_value = 154;
3013                         /* FIXME extra to set for 1200 */
3014                         break;
3015                 default:
3016                         return 0;
3017                 }
3018                 break;
3019         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3020                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3021                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3022                         deemph_reg_value = 85;
3023                         margin_reg_value = 78;
3024                         break;
3025                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3026                         deemph_reg_value = 85;
3027                         margin_reg_value = 116;
3028                         break;
3029                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3030                         deemph_reg_value = 85;
3031                         margin_reg_value = 154;
3032                         break;
3033                 default:
3034                         return 0;
3035                 }
3036                 break;
3037         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3038                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3039                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3040                         deemph_reg_value = 64;
3041                         margin_reg_value = 104;
3042                         break;
3043                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3044                         deemph_reg_value = 64;
3045                         margin_reg_value = 154;
3046                         break;
3047                 default:
3048                         return 0;
3049                 }
3050                 break;
3051         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3052                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3053                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3054                         deemph_reg_value = 43;
3055                         margin_reg_value = 154;
3056                         break;
3057                 default:
3058                         return 0;
3059                 }
3060                 break;
3061         default:
3062                 return 0;
3063         }
3064
3065         mutex_lock(&dev_priv->dpio_lock);
3066
3067         /* Clear calc init */
3068         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3069         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3070         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3071         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3072         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3073
3074         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3075         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3076         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3077         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3078         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3079
3080         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3081         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3082         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3083         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3084
3085         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3086         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3087         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3088         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3089
3090         /* Program swing deemph */
3091         for (i = 0; i < 4; i++) {
3092                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3093                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3094                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3095                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3096         }
3097
3098         /* Program swing margin */
3099         for (i = 0; i < 4; i++) {
3100                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3101                 val &= ~DPIO_SWING_MARGIN000_MASK;
3102                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3103                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3104         }
3105
3106         /* Disable unique transition scale */
3107         for (i = 0; i < 4; i++) {
3108                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3109                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3110                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3111         }
3112
3113         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3114                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3115                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3116                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3117
3118                 /*
3119                  * The document said it needs to set bit 27 for ch0 and bit 26
3120                  * for ch1. Might be a typo in the doc.
3121                  * For now, for this unique transition scale selection, set bit
3122                  * 27 for ch0 and ch1.
3123                  */
3124                 for (i = 0; i < 4; i++) {
3125                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3126                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3127                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3128                 }
3129
3130                 for (i = 0; i < 4; i++) {
3131                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3132                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3133                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3134                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3135                 }
3136         }
3137
3138         /* Start swing calculation */
3139         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3140         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3141         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3142
3143         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3144         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3145         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3146
3147         /* LRC Bypass */
3148         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3149         val |= DPIO_LRC_BYPASS;
3150         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3151
3152         mutex_unlock(&dev_priv->dpio_lock);
3153
3154         return 0;
3155 }
3156
3157 static void
3158 intel_get_adjust_train(struct intel_dp *intel_dp,
3159                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3160 {
3161         uint8_t v = 0;
3162         uint8_t p = 0;
3163         int lane;
3164         uint8_t voltage_max;
3165         uint8_t preemph_max;
3166
3167         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3168                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3169                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3170
3171                 if (this_v > v)
3172                         v = this_v;
3173                 if (this_p > p)
3174                         p = this_p;
3175         }
3176
3177         voltage_max = intel_dp_voltage_max(intel_dp);
3178         if (v >= voltage_max)
3179                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3180
3181         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3182         if (p >= preemph_max)
3183                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3184
3185         for (lane = 0; lane < 4; lane++)
3186                 intel_dp->train_set[lane] = v | p;
3187 }
3188
3189 static uint32_t
3190 intel_gen4_signal_levels(uint8_t train_set)
3191 {
3192         uint32_t        signal_levels = 0;
3193
3194         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3195         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3196         default:
3197                 signal_levels |= DP_VOLTAGE_0_4;
3198                 break;
3199         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3200                 signal_levels |= DP_VOLTAGE_0_6;
3201                 break;
3202         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3203                 signal_levels |= DP_VOLTAGE_0_8;
3204                 break;
3205         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3206                 signal_levels |= DP_VOLTAGE_1_2;
3207                 break;
3208         }
3209         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3210         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3211         default:
3212                 signal_levels |= DP_PRE_EMPHASIS_0;
3213                 break;
3214         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3215                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3216                 break;
3217         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3218                 signal_levels |= DP_PRE_EMPHASIS_6;
3219                 break;
3220         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3221                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3222                 break;
3223         }
3224         return signal_levels;
3225 }
3226
3227 /* Gen6's DP voltage swing and pre-emphasis control */
3228 static uint32_t
3229 intel_gen6_edp_signal_levels(uint8_t train_set)
3230 {
3231         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3232                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3233         switch (signal_levels) {
3234         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3235         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3236                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3237         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3238                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3239         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3240         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3241                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3242         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3243         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3244                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3245         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3246         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3247                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3248         default:
3249                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3250                               "0x%x\n", signal_levels);
3251                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3252         }
3253 }
3254
3255 /* Gen7's DP voltage swing and pre-emphasis control */
3256 static uint32_t
3257 intel_gen7_edp_signal_levels(uint8_t train_set)
3258 {
3259         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3260                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3261         switch (signal_levels) {
3262         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3263                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3264         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3265                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3266         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3267                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3268
3269         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3270                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3271         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3272                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3273
3274         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3275                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3276         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3277                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3278
3279         default:
3280                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3281                               "0x%x\n", signal_levels);
3282                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3283         }
3284 }
3285
3286 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3287 static uint32_t
3288 intel_hsw_signal_levels(uint8_t train_set)
3289 {
3290         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3291                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3292         switch (signal_levels) {
3293         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3294                 return DDI_BUF_TRANS_SELECT(0);
3295         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3296                 return DDI_BUF_TRANS_SELECT(1);
3297         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3298                 return DDI_BUF_TRANS_SELECT(2);
3299         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3300                 return DDI_BUF_TRANS_SELECT(3);
3301
3302         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3303                 return DDI_BUF_TRANS_SELECT(4);
3304         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3305                 return DDI_BUF_TRANS_SELECT(5);
3306         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3307                 return DDI_BUF_TRANS_SELECT(6);
3308
3309         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3310                 return DDI_BUF_TRANS_SELECT(7);
3311         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3312                 return DDI_BUF_TRANS_SELECT(8);
3313
3314         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3315                 return DDI_BUF_TRANS_SELECT(9);
3316         default:
3317                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3318                               "0x%x\n", signal_levels);
3319                 return DDI_BUF_TRANS_SELECT(0);
3320         }
3321 }
3322
3323 /* Properly updates "DP" with the correct signal levels. */
3324 static void
3325 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3326 {
3327         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3328         enum port port = intel_dig_port->port;
3329         struct drm_device *dev = intel_dig_port->base.base.dev;
3330         uint32_t signal_levels, mask;
3331         uint8_t train_set = intel_dp->train_set[0];
3332
3333         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3334                 signal_levels = intel_hsw_signal_levels(train_set);
3335                 mask = DDI_BUF_EMP_MASK;
3336         } else if (IS_CHERRYVIEW(dev)) {
3337                 signal_levels = intel_chv_signal_levels(intel_dp);
3338                 mask = 0;
3339         } else if (IS_VALLEYVIEW(dev)) {
3340                 signal_levels = intel_vlv_signal_levels(intel_dp);
3341                 mask = 0;
3342         } else if (IS_GEN7(dev) && port == PORT_A) {
3343                 signal_levels = intel_gen7_edp_signal_levels(train_set);
3344                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3345         } else if (IS_GEN6(dev) && port == PORT_A) {
3346                 signal_levels = intel_gen6_edp_signal_levels(train_set);
3347                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3348         } else {
3349                 signal_levels = intel_gen4_signal_levels(train_set);
3350                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3351         }
3352
3353         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3354
3355         *DP = (*DP & ~mask) | signal_levels;
3356 }
3357
3358 static bool
3359 intel_dp_set_link_train(struct intel_dp *intel_dp,
3360                         uint32_t *DP,
3361                         uint8_t dp_train_pat)
3362 {
3363         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3364         struct drm_device *dev = intel_dig_port->base.base.dev;
3365         struct drm_i915_private *dev_priv = dev->dev_private;
3366         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3367         int ret, len;
3368
3369         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3370
3371         I915_WRITE(intel_dp->output_reg, *DP);
3372         POSTING_READ(intel_dp->output_reg);
3373
3374         buf[0] = dp_train_pat;
3375         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3376             DP_TRAINING_PATTERN_DISABLE) {
3377                 /* don't write DP_TRAINING_LANEx_SET on disable */
3378                 len = 1;
3379         } else {
3380                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3381                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3382                 len = intel_dp->lane_count + 1;
3383         }
3384
3385         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3386                                 buf, len);
3387
3388         return ret == len;
3389 }
3390
3391 static bool
3392 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3393                         uint8_t dp_train_pat)
3394 {
3395         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3396         intel_dp_set_signal_levels(intel_dp, DP);
3397         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3398 }
3399
3400 static bool
3401 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3402                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3403 {
3404         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3405         struct drm_device *dev = intel_dig_port->base.base.dev;
3406         struct drm_i915_private *dev_priv = dev->dev_private;
3407         int ret;
3408
3409         intel_get_adjust_train(intel_dp, link_status);
3410         intel_dp_set_signal_levels(intel_dp, DP);
3411
3412         I915_WRITE(intel_dp->output_reg, *DP);
3413         POSTING_READ(intel_dp->output_reg);
3414
3415         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3416                                 intel_dp->train_set, intel_dp->lane_count);
3417
3418         return ret == intel_dp->lane_count;
3419 }
3420
3421 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3422 {
3423         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3424         struct drm_device *dev = intel_dig_port->base.base.dev;
3425         struct drm_i915_private *dev_priv = dev->dev_private;
3426         enum port port = intel_dig_port->port;
3427         uint32_t val;
3428
3429         if (!HAS_DDI(dev))
3430                 return;
3431
3432         val = I915_READ(DP_TP_CTL(port));
3433         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3434         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3435         I915_WRITE(DP_TP_CTL(port), val);
3436
3437         /*
3438          * On PORT_A we can have only eDP in SST mode. There the only reason
3439          * we need to set idle transmission mode is to work around a HW issue
3440          * where we enable the pipe while not in idle link-training mode.
3441          * In this case there is requirement to wait for a minimum number of
3442          * idle patterns to be sent.
3443          */
3444         if (port == PORT_A)
3445                 return;
3446
3447         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3448                      1))
3449                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3450 }
3451
3452 /* Enable corresponding port and start training pattern 1 */
3453 void
3454 intel_dp_start_link_train(struct intel_dp *intel_dp)
3455 {
3456         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3457         struct drm_device *dev = encoder->dev;
3458         int i;
3459         uint8_t voltage;
3460         int voltage_tries, loop_tries;
3461         uint32_t DP = intel_dp->DP;
3462         uint8_t link_config[2];
3463
3464         if (HAS_DDI(dev))
3465                 intel_ddi_prepare_link_retrain(encoder);
3466
3467         /* Write the link configuration data */
3468         link_config[0] = intel_dp->link_bw;
3469         link_config[1] = intel_dp->lane_count;
3470         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3471                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3472         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3473         if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
3474                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3475                                 &intel_dp->rate_select, 1);
3476
3477         link_config[0] = 0;
3478         link_config[1] = DP_SET_ANSI_8B10B;
3479         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3480
3481         DP |= DP_PORT_EN;
3482
3483         /* clock recovery */
3484         if (!intel_dp_reset_link_train(intel_dp, &DP,
3485                                        DP_TRAINING_PATTERN_1 |
3486                                        DP_LINK_SCRAMBLING_DISABLE)) {
3487                 DRM_ERROR("failed to enable link training\n");
3488                 return;
3489         }
3490
3491         voltage = 0xff;
3492         voltage_tries = 0;
3493         loop_tries = 0;
3494         for (;;) {
3495                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3496
3497                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3498                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3499                         DRM_ERROR("failed to get link status\n");
3500                         break;
3501                 }
3502
3503                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3504                         DRM_DEBUG_KMS("clock recovery OK\n");
3505                         break;
3506                 }
3507
3508                 /* Check to see if we've tried the max voltage */
3509                 for (i = 0; i < intel_dp->lane_count; i++)
3510                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3511                                 break;
3512                 if (i == intel_dp->lane_count) {
3513                         ++loop_tries;
3514                         if (loop_tries == 5) {
3515                                 DRM_ERROR("too many full retries, give up\n");
3516                                 break;
3517                         }
3518                         intel_dp_reset_link_train(intel_dp, &DP,
3519                                                   DP_TRAINING_PATTERN_1 |
3520                                                   DP_LINK_SCRAMBLING_DISABLE);
3521                         voltage_tries = 0;
3522                         continue;
3523                 }
3524
3525                 /* Check to see if we've tried the same voltage 5 times */
3526                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3527                         ++voltage_tries;
3528                         if (voltage_tries == 5) {
3529                                 DRM_ERROR("too many voltage retries, give up\n");
3530                                 break;
3531                         }
3532                 } else
3533                         voltage_tries = 0;
3534                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3535
3536                 /* Update training set as requested by target */
3537                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3538                         DRM_ERROR("failed to update link training\n");
3539                         break;
3540                 }
3541         }
3542
3543         intel_dp->DP = DP;
3544 }
3545
3546 void
3547 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3548 {
3549         bool channel_eq = false;
3550         int tries, cr_tries;
3551         uint32_t DP = intel_dp->DP;
3552         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3553
3554         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3555         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3556                 training_pattern = DP_TRAINING_PATTERN_3;
3557
3558         /* channel equalization */
3559         if (!intel_dp_set_link_train(intel_dp, &DP,
3560                                      training_pattern |
3561                                      DP_LINK_SCRAMBLING_DISABLE)) {
3562                 DRM_ERROR("failed to start channel equalization\n");
3563                 return;
3564         }
3565
3566         tries = 0;
3567         cr_tries = 0;
3568         channel_eq = false;
3569         for (;;) {
3570                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3571
3572                 if (cr_tries > 5) {
3573                         DRM_ERROR("failed to train DP, aborting\n");
3574                         break;
3575                 }
3576
3577                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3578                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3579                         DRM_ERROR("failed to get link status\n");
3580                         break;
3581                 }
3582
3583                 /* Make sure clock is still ok */
3584                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3585                         intel_dp_start_link_train(intel_dp);
3586                         intel_dp_set_link_train(intel_dp, &DP,
3587                                                 training_pattern |
3588                                                 DP_LINK_SCRAMBLING_DISABLE);
3589                         cr_tries++;
3590                         continue;
3591                 }
3592
3593                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3594                         channel_eq = true;
3595                         break;
3596                 }
3597
3598                 /* Try 5 times, then try clock recovery if that fails */
3599                 if (tries > 5) {
3600                         intel_dp_start_link_train(intel_dp);
3601                         intel_dp_set_link_train(intel_dp, &DP,
3602                                                 training_pattern |
3603                                                 DP_LINK_SCRAMBLING_DISABLE);
3604                         tries = 0;
3605                         cr_tries++;
3606                         continue;
3607                 }
3608
3609                 /* Update training set as requested by target */
3610                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3611                         DRM_ERROR("failed to update link training\n");
3612                         break;
3613                 }
3614                 ++tries;
3615         }
3616
3617         intel_dp_set_idle_link_train(intel_dp);
3618
3619         intel_dp->DP = DP;
3620
3621         if (channel_eq)
3622                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3623
3624 }
3625
3626 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3627 {
3628         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3629                                 DP_TRAINING_PATTERN_DISABLE);
3630 }
3631
3632 static void
3633 intel_dp_link_down(struct intel_dp *intel_dp)
3634 {
3635         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3636         enum port port = intel_dig_port->port;
3637         struct drm_device *dev = intel_dig_port->base.base.dev;
3638         struct drm_i915_private *dev_priv = dev->dev_private;
3639         uint32_t DP = intel_dp->DP;
3640
3641         if (WARN_ON(HAS_DDI(dev)))
3642                 return;
3643
3644         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3645                 return;
3646
3647         DRM_DEBUG_KMS("\n");
3648
3649         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3650                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3651                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3652         } else {
3653                 if (IS_CHERRYVIEW(dev))
3654                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3655                 else
3656                         DP &= ~DP_LINK_TRAIN_MASK;
3657                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3658         }
3659         POSTING_READ(intel_dp->output_reg);
3660
3661         if (HAS_PCH_IBX(dev) &&
3662             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3663                 /* Hardware workaround: leaving our transcoder select
3664                  * set to transcoder B while it's off will prevent the
3665                  * corresponding HDMI output on transcoder A.
3666                  *
3667                  * Combine this with another hardware workaround:
3668                  * transcoder select bit can only be cleared while the
3669                  * port is enabled.
3670                  */
3671                 DP &= ~DP_PIPEB_SELECT;
3672                 I915_WRITE(intel_dp->output_reg, DP);
3673                 POSTING_READ(intel_dp->output_reg);
3674         }
3675
3676         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3677         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3678         POSTING_READ(intel_dp->output_reg);
3679         msleep(intel_dp->panel_power_down_delay);
3680 }
3681
3682 static bool
3683 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3684 {
3685         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3686         struct drm_device *dev = dig_port->base.base.dev;
3687         struct drm_i915_private *dev_priv = dev->dev_private;
3688         uint8_t rev;
3689
3690         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3691                                     sizeof(intel_dp->dpcd)) < 0)
3692                 return false; /* aux transfer failed */
3693
3694         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3695
3696         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3697                 return false; /* DPCD not present */
3698
3699         /* Check if the panel supports PSR */
3700         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3701         if (is_edp(intel_dp)) {
3702                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3703                                         intel_dp->psr_dpcd,
3704                                         sizeof(intel_dp->psr_dpcd));
3705                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3706                         dev_priv->psr.sink_support = true;
3707                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3708                 }
3709         }
3710
3711         /* Training Pattern 3 support, both source and sink */
3712         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3713             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3714             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3715                 intel_dp->use_tps3 = true;
3716                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3717         } else
3718                 intel_dp->use_tps3 = false;
3719
3720         /* Intermediate frequency support */
3721         if (is_edp(intel_dp) &&
3722             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3723             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3724             (rev >= 0x03)) { /* eDp v1.4 or higher */
3725                 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3726                 int i;
3727
3728                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3729                                 DP_SUPPORTED_LINK_RATES,
3730                                 supported_rates,
3731                                 sizeof(supported_rates));
3732
3733                 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3734                         int val = le16_to_cpu(supported_rates[i]);
3735
3736                         if (val == 0)
3737                                 break;
3738
3739                         intel_dp->supported_rates[i] = val * 200;
3740                 }
3741                 intel_dp->num_supported_rates = i;
3742         }
3743         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3744               DP_DWN_STRM_PORT_PRESENT))
3745                 return true; /* native DP sink */
3746
3747         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3748                 return true; /* no per-port downstream info */
3749
3750         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3751                                     intel_dp->downstream_ports,
3752                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3753                 return false; /* downstream port status fetch failed */
3754
3755         return true;
3756 }
3757
3758 static void
3759 intel_dp_probe_oui(struct intel_dp *intel_dp)
3760 {
3761         u8 buf[3];
3762
3763         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3764                 return;
3765
3766         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3767                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3768                               buf[0], buf[1], buf[2]);
3769
3770         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3771                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3772                               buf[0], buf[1], buf[2]);
3773 }
3774
3775 static bool
3776 intel_dp_probe_mst(struct intel_dp *intel_dp)
3777 {
3778         u8 buf[1];
3779
3780         if (!intel_dp->can_mst)
3781                 return false;
3782
3783         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3784                 return false;
3785
3786         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3787                 if (buf[0] & DP_MST_CAP) {
3788                         DRM_DEBUG_KMS("Sink is MST capable\n");
3789                         intel_dp->is_mst = true;
3790                 } else {
3791                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3792                         intel_dp->is_mst = false;
3793                 }
3794         }
3795
3796         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3797         return intel_dp->is_mst;
3798 }
3799
3800 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3801 {
3802         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3803         struct drm_device *dev = intel_dig_port->base.base.dev;
3804         struct intel_crtc *intel_crtc =
3805                 to_intel_crtc(intel_dig_port->base.base.crtc);
3806         u8 buf;
3807         int test_crc_count;
3808         int attempts = 6;
3809
3810         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3811                 return -EIO;
3812
3813         if (!(buf & DP_TEST_CRC_SUPPORTED))
3814                 return -ENOTTY;
3815
3816         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3817                 return -EIO;
3818
3819         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3820                                 buf | DP_TEST_SINK_START) < 0)
3821                 return -EIO;
3822
3823         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3824                 return -EIO;
3825         test_crc_count = buf & DP_TEST_COUNT_MASK;
3826
3827         do {
3828                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3829                                       DP_TEST_SINK_MISC, &buf) < 0)
3830                         return -EIO;
3831                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3832         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3833
3834         if (attempts == 0) {
3835                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3836                 return -ETIMEDOUT;
3837         }
3838
3839         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3840                 return -EIO;
3841
3842         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3843                 return -EIO;
3844         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3845                                buf & ~DP_TEST_SINK_START) < 0)
3846                 return -EIO;
3847
3848         return 0;
3849 }
3850
3851 static bool
3852 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3853 {
3854         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3855                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3856                                        sink_irq_vector, 1) == 1;
3857 }
3858
3859 static bool
3860 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3861 {
3862         int ret;
3863
3864         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3865                                              DP_SINK_COUNT_ESI,
3866                                              sink_irq_vector, 14);
3867         if (ret != 14)
3868                 return false;
3869
3870         return true;
3871 }
3872
3873 static void
3874 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3875 {
3876         /* NAK by default */
3877         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3878 }
3879
3880 static int
3881 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3882 {
3883         bool bret;
3884
3885         if (intel_dp->is_mst) {
3886                 u8 esi[16] = { 0 };
3887                 int ret = 0;
3888                 int retry;
3889                 bool handled;
3890                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3891 go_again:
3892                 if (bret == true) {
3893
3894                         /* check link status - esi[10] = 0x200c */
3895                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3896                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3897                                 intel_dp_start_link_train(intel_dp);
3898                                 intel_dp_complete_link_train(intel_dp);
3899                                 intel_dp_stop_link_train(intel_dp);
3900                         }
3901
3902                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
3903                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3904
3905                         if (handled) {
3906                                 for (retry = 0; retry < 3; retry++) {
3907                                         int wret;
3908                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3909                                                                  DP_SINK_COUNT_ESI+1,
3910                                                                  &esi[1], 3);
3911                                         if (wret == 3) {
3912                                                 break;
3913                                         }
3914                                 }
3915
3916                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3917                                 if (bret == true) {
3918                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3919                                         goto go_again;
3920                                 }
3921                         } else
3922                                 ret = 0;
3923
3924                         return ret;
3925                 } else {
3926                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3927                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3928                         intel_dp->is_mst = false;
3929                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3930                         /* send a hotplug event */
3931                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3932                 }
3933         }
3934         return -EINVAL;
3935 }
3936
3937 /*
3938  * According to DP spec
3939  * 5.1.2:
3940  *  1. Read DPCD
3941  *  2. Configure link according to Receiver Capabilities
3942  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3943  *  4. Check link status on receipt of hot-plug interrupt
3944  */
3945 static void
3946 intel_dp_check_link_status(struct intel_dp *intel_dp)
3947 {
3948         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3949         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3950         u8 sink_irq_vector;
3951         u8 link_status[DP_LINK_STATUS_SIZE];
3952
3953         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3954
3955         if (!intel_encoder->connectors_active)
3956                 return;
3957
3958         if (WARN_ON(!intel_encoder->base.crtc))
3959                 return;
3960
3961         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3962                 return;
3963
3964         /* Try to read receiver status if the link appears to be up */
3965         if (!intel_dp_get_link_status(intel_dp, link_status)) {
3966                 return;
3967         }
3968
3969         /* Now read the DPCD to see if it's actually running */
3970         if (!intel_dp_get_dpcd(intel_dp)) {
3971                 return;
3972         }
3973
3974         /* Try to read the source of the interrupt */
3975         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3976             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3977                 /* Clear interrupt source */
3978                 drm_dp_dpcd_writeb(&intel_dp->aux,
3979                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
3980                                    sink_irq_vector);
3981
3982                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3983                         intel_dp_handle_test_request(intel_dp);
3984                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3985                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3986         }
3987
3988         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3989                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3990                               intel_encoder->base.name);
3991                 intel_dp_start_link_train(intel_dp);
3992                 intel_dp_complete_link_train(intel_dp);
3993                 intel_dp_stop_link_train(intel_dp);
3994         }
3995 }
3996
3997 /* XXX this is probably wrong for multiple downstream ports */
3998 static enum drm_connector_status
3999 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4000 {
4001         uint8_t *dpcd = intel_dp->dpcd;
4002         uint8_t type;
4003
4004         if (!intel_dp_get_dpcd(intel_dp))
4005                 return connector_status_disconnected;
4006
4007         /* if there's no downstream port, we're done */
4008         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4009                 return connector_status_connected;
4010
4011         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4012         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4013             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4014                 uint8_t reg;
4015
4016                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4017                                             &reg, 1) < 0)
4018                         return connector_status_unknown;
4019
4020                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4021                                               : connector_status_disconnected;
4022         }
4023
4024         /* If no HPD, poke DDC gently */
4025         if (drm_probe_ddc(&intel_dp->aux.ddc))
4026                 return connector_status_connected;
4027
4028         /* Well we tried, say unknown for unreliable port types */
4029         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4030                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4031                 if (type == DP_DS_PORT_TYPE_VGA ||
4032                     type == DP_DS_PORT_TYPE_NON_EDID)
4033                         return connector_status_unknown;
4034         } else {
4035                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4036                         DP_DWN_STRM_PORT_TYPE_MASK;
4037                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4038                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4039                         return connector_status_unknown;
4040         }
4041
4042         /* Anything else is out of spec, warn and ignore */
4043         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4044         return connector_status_disconnected;
4045 }
4046
4047 static enum drm_connector_status
4048 edp_detect(struct intel_dp *intel_dp)
4049 {
4050         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4051         enum drm_connector_status status;
4052
4053         status = intel_panel_detect(dev);
4054         if (status == connector_status_unknown)
4055                 status = connector_status_connected;
4056
4057         return status;
4058 }
4059
4060 static enum drm_connector_status
4061 ironlake_dp_detect(struct intel_dp *intel_dp)
4062 {
4063         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4064         struct drm_i915_private *dev_priv = dev->dev_private;
4065         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4066
4067         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4068                 return connector_status_disconnected;
4069
4070         return intel_dp_detect_dpcd(intel_dp);
4071 }
4072
4073 static int g4x_digital_port_connected(struct drm_device *dev,
4074                                        struct intel_digital_port *intel_dig_port)
4075 {
4076         struct drm_i915_private *dev_priv = dev->dev_private;
4077         uint32_t bit;
4078
4079         if (IS_VALLEYVIEW(dev)) {
4080                 switch (intel_dig_port->port) {
4081                 case PORT_B:
4082                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4083                         break;
4084                 case PORT_C:
4085                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4086                         break;
4087                 case PORT_D:
4088                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4089                         break;
4090                 default:
4091                         return -EINVAL;
4092                 }
4093         } else {
4094                 switch (intel_dig_port->port) {
4095                 case PORT_B:
4096                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4097                         break;
4098                 case PORT_C:
4099                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4100                         break;
4101                 case PORT_D:
4102                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4103                         break;
4104                 default:
4105                         return -EINVAL;
4106                 }
4107         }
4108
4109         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4110                 return 0;
4111         return 1;
4112 }
4113
4114 static enum drm_connector_status
4115 g4x_dp_detect(struct intel_dp *intel_dp)
4116 {
4117         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4118         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4119         int ret;
4120
4121         /* Can't disconnect eDP, but you can close the lid... */
4122         if (is_edp(intel_dp)) {
4123                 enum drm_connector_status status;
4124
4125                 status = intel_panel_detect(dev);
4126                 if (status == connector_status_unknown)
4127                         status = connector_status_connected;
4128                 return status;
4129         }
4130
4131         ret = g4x_digital_port_connected(dev, intel_dig_port);
4132         if (ret == -EINVAL)
4133                 return connector_status_unknown;
4134         else if (ret == 0)
4135                 return connector_status_disconnected;
4136
4137         return intel_dp_detect_dpcd(intel_dp);
4138 }
4139
4140 static struct edid *
4141 intel_dp_get_edid(struct intel_dp *intel_dp)
4142 {
4143         struct intel_connector *intel_connector = intel_dp->attached_connector;
4144
4145         /* use cached edid if we have one */
4146         if (intel_connector->edid) {
4147                 /* invalid edid */
4148                 if (IS_ERR(intel_connector->edid))
4149                         return NULL;
4150
4151                 return drm_edid_duplicate(intel_connector->edid);
4152         } else
4153                 return drm_get_edid(&intel_connector->base,
4154                                     &intel_dp->aux.ddc);
4155 }
4156
4157 static void
4158 intel_dp_set_edid(struct intel_dp *intel_dp)
4159 {
4160         struct intel_connector *intel_connector = intel_dp->attached_connector;
4161         struct edid *edid;
4162
4163         edid = intel_dp_get_edid(intel_dp);
4164         intel_connector->detect_edid = edid;
4165
4166         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4167                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4168         else
4169                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4170 }
4171
4172 static void
4173 intel_dp_unset_edid(struct intel_dp *intel_dp)
4174 {
4175         struct intel_connector *intel_connector = intel_dp->attached_connector;
4176
4177         kfree(intel_connector->detect_edid);
4178         intel_connector->detect_edid = NULL;
4179
4180         intel_dp->has_audio = false;
4181 }
4182
4183 static enum intel_display_power_domain
4184 intel_dp_power_get(struct intel_dp *dp)
4185 {
4186         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4187         enum intel_display_power_domain power_domain;
4188
4189         power_domain = intel_display_port_power_domain(encoder);
4190         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4191
4192         return power_domain;
4193 }
4194
4195 static void
4196 intel_dp_power_put(struct intel_dp *dp,
4197                    enum intel_display_power_domain power_domain)
4198 {
4199         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4200         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4201 }
4202
4203 static enum drm_connector_status
4204 intel_dp_detect(struct drm_connector *connector, bool force)
4205 {
4206         struct intel_dp *intel_dp = intel_attached_dp(connector);
4207         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4208         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4209         struct drm_device *dev = connector->dev;
4210         enum drm_connector_status status;
4211         enum intel_display_power_domain power_domain;
4212         bool ret;
4213
4214         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4215                       connector->base.id, connector->name);
4216         intel_dp_unset_edid(intel_dp);
4217
4218         if (intel_dp->is_mst) {
4219                 /* MST devices are disconnected from a monitor POV */
4220                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4221                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4222                 return connector_status_disconnected;
4223         }
4224
4225         power_domain = intel_dp_power_get(intel_dp);
4226
4227         /* Can't disconnect eDP, but you can close the lid... */
4228         if (is_edp(intel_dp))
4229                 status = edp_detect(intel_dp);
4230         else if (HAS_PCH_SPLIT(dev))
4231                 status = ironlake_dp_detect(intel_dp);
4232         else
4233                 status = g4x_dp_detect(intel_dp);
4234         if (status != connector_status_connected)
4235                 goto out;
4236
4237         intel_dp_probe_oui(intel_dp);
4238
4239         ret = intel_dp_probe_mst(intel_dp);
4240         if (ret) {
4241                 /* if we are in MST mode then this connector
4242                    won't appear connected or have anything with EDID on it */
4243                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4244                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4245                 status = connector_status_disconnected;
4246                 goto out;
4247         }
4248
4249         intel_dp_set_edid(intel_dp);
4250
4251         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4252                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4253         status = connector_status_connected;
4254
4255 out:
4256         intel_dp_power_put(intel_dp, power_domain);
4257         return status;
4258 }
4259
4260 static void
4261 intel_dp_force(struct drm_connector *connector)
4262 {
4263         struct intel_dp *intel_dp = intel_attached_dp(connector);
4264         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4265         enum intel_display_power_domain power_domain;
4266
4267         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4268                       connector->base.id, connector->name);
4269         intel_dp_unset_edid(intel_dp);
4270
4271         if (connector->status != connector_status_connected)
4272                 return;
4273
4274         power_domain = intel_dp_power_get(intel_dp);
4275
4276         intel_dp_set_edid(intel_dp);
4277
4278         intel_dp_power_put(intel_dp, power_domain);
4279
4280         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4281                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4282 }
4283
4284 static int intel_dp_get_modes(struct drm_connector *connector)
4285 {
4286         struct intel_connector *intel_connector = to_intel_connector(connector);
4287         struct edid *edid;
4288
4289         edid = intel_connector->detect_edid;
4290         if (edid) {
4291                 int ret = intel_connector_update_modes(connector, edid);
4292                 if (ret)
4293                         return ret;
4294         }
4295
4296         /* if eDP has no EDID, fall back to fixed mode */
4297         if (is_edp(intel_attached_dp(connector)) &&
4298             intel_connector->panel.fixed_mode) {
4299                 struct drm_display_mode *mode;
4300
4301                 mode = drm_mode_duplicate(connector->dev,
4302                                           intel_connector->panel.fixed_mode);
4303                 if (mode) {
4304                         drm_mode_probed_add(connector, mode);
4305                         return 1;
4306                 }
4307         }
4308
4309         return 0;
4310 }
4311
4312 static bool
4313 intel_dp_detect_audio(struct drm_connector *connector)
4314 {
4315         bool has_audio = false;
4316         struct edid *edid;
4317
4318         edid = to_intel_connector(connector)->detect_edid;
4319         if (edid)
4320                 has_audio = drm_detect_monitor_audio(edid);
4321
4322         return has_audio;
4323 }
4324
4325 static int
4326 intel_dp_set_property(struct drm_connector *connector,
4327                       struct drm_property *property,
4328                       uint64_t val)
4329 {
4330         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4331         struct intel_connector *intel_connector = to_intel_connector(connector);
4332         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4333         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4334         int ret;
4335
4336         ret = drm_object_property_set_value(&connector->base, property, val);
4337         if (ret)
4338                 return ret;
4339
4340         if (property == dev_priv->force_audio_property) {
4341                 int i = val;
4342                 bool has_audio;
4343
4344                 if (i == intel_dp->force_audio)
4345                         return 0;
4346
4347                 intel_dp->force_audio = i;
4348
4349                 if (i == HDMI_AUDIO_AUTO)
4350                         has_audio = intel_dp_detect_audio(connector);
4351                 else
4352                         has_audio = (i == HDMI_AUDIO_ON);
4353
4354                 if (has_audio == intel_dp->has_audio)
4355                         return 0;
4356
4357                 intel_dp->has_audio = has_audio;
4358                 goto done;
4359         }
4360
4361         if (property == dev_priv->broadcast_rgb_property) {
4362                 bool old_auto = intel_dp->color_range_auto;
4363                 uint32_t old_range = intel_dp->color_range;
4364
4365                 switch (val) {
4366                 case INTEL_BROADCAST_RGB_AUTO:
4367                         intel_dp->color_range_auto = true;
4368                         break;
4369                 case INTEL_BROADCAST_RGB_FULL:
4370                         intel_dp->color_range_auto = false;
4371                         intel_dp->color_range = 0;
4372                         break;
4373                 case INTEL_BROADCAST_RGB_LIMITED:
4374                         intel_dp->color_range_auto = false;
4375                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4376                         break;
4377                 default:
4378                         return -EINVAL;
4379                 }
4380
4381                 if (old_auto == intel_dp->color_range_auto &&
4382                     old_range == intel_dp->color_range)
4383                         return 0;
4384
4385                 goto done;
4386         }
4387
4388         if (is_edp(intel_dp) &&
4389             property == connector->dev->mode_config.scaling_mode_property) {
4390                 if (val == DRM_MODE_SCALE_NONE) {
4391                         DRM_DEBUG_KMS("no scaling not supported\n");
4392                         return -EINVAL;
4393                 }
4394
4395                 if (intel_connector->panel.fitting_mode == val) {
4396                         /* the eDP scaling property is not changed */
4397                         return 0;
4398                 }
4399                 intel_connector->panel.fitting_mode = val;
4400
4401                 goto done;
4402         }
4403
4404         return -EINVAL;
4405
4406 done:
4407         if (intel_encoder->base.crtc)
4408                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4409
4410         return 0;
4411 }
4412
4413 static void
4414 intel_dp_connector_destroy(struct drm_connector *connector)
4415 {
4416         struct intel_connector *intel_connector = to_intel_connector(connector);
4417
4418         kfree(intel_connector->detect_edid);
4419
4420         if (!IS_ERR_OR_NULL(intel_connector->edid))
4421                 kfree(intel_connector->edid);
4422
4423         /* Can't call is_edp() since the encoder may have been destroyed
4424          * already. */
4425         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4426                 intel_panel_fini(&intel_connector->panel);
4427
4428         drm_connector_cleanup(connector);
4429         kfree(connector);
4430 }
4431
4432 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4433 {
4434         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4435         struct intel_dp *intel_dp = &intel_dig_port->dp;
4436
4437         drm_dp_aux_unregister(&intel_dp->aux);
4438         intel_dp_mst_encoder_cleanup(intel_dig_port);
4439         if (is_edp(intel_dp)) {
4440                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4441                 /*
4442                  * vdd might still be enabled do to the delayed vdd off.
4443                  * Make sure vdd is actually turned off here.
4444                  */
4445                 pps_lock(intel_dp);
4446                 edp_panel_vdd_off_sync(intel_dp);
4447                 pps_unlock(intel_dp);
4448
4449                 if (intel_dp->edp_notifier.notifier_call) {
4450                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4451                         intel_dp->edp_notifier.notifier_call = NULL;
4452                 }
4453         }
4454         drm_encoder_cleanup(encoder);
4455         kfree(intel_dig_port);
4456 }
4457
4458 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4459 {
4460         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4461
4462         if (!is_edp(intel_dp))
4463                 return;
4464
4465         /*
4466          * vdd might still be enabled do to the delayed vdd off.
4467          * Make sure vdd is actually turned off here.
4468          */
4469         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4470         pps_lock(intel_dp);
4471         edp_panel_vdd_off_sync(intel_dp);
4472         pps_unlock(intel_dp);
4473 }
4474
4475 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4476 {
4477         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4478         struct drm_device *dev = intel_dig_port->base.base.dev;
4479         struct drm_i915_private *dev_priv = dev->dev_private;
4480         enum intel_display_power_domain power_domain;
4481
4482         lockdep_assert_held(&dev_priv->pps_mutex);
4483
4484         if (!edp_have_panel_vdd(intel_dp))
4485                 return;
4486
4487         /*
4488          * The VDD bit needs a power domain reference, so if the bit is
4489          * already enabled when we boot or resume, grab this reference and
4490          * schedule a vdd off, so we don't hold on to the reference
4491          * indefinitely.
4492          */
4493         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4494         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4495         intel_display_power_get(dev_priv, power_domain);
4496
4497         edp_panel_vdd_schedule_off(intel_dp);
4498 }
4499
4500 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4501 {
4502         struct intel_dp *intel_dp;
4503
4504         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4505                 return;
4506
4507         intel_dp = enc_to_intel_dp(encoder);
4508
4509         pps_lock(intel_dp);
4510
4511         /*
4512          * Read out the current power sequencer assignment,
4513          * in case the BIOS did something with it.
4514          */
4515         if (IS_VALLEYVIEW(encoder->dev))
4516                 vlv_initial_power_sequencer_setup(intel_dp);
4517
4518         intel_edp_panel_vdd_sanitize(intel_dp);
4519
4520         pps_unlock(intel_dp);
4521 }
4522
4523 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4524         .dpms = intel_connector_dpms,
4525         .detect = intel_dp_detect,
4526         .force = intel_dp_force,
4527         .fill_modes = drm_helper_probe_single_connector_modes,
4528         .set_property = intel_dp_set_property,
4529         .atomic_get_property = intel_connector_atomic_get_property,
4530         .destroy = intel_dp_connector_destroy,
4531         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4532 };
4533
4534 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4535         .get_modes = intel_dp_get_modes,
4536         .mode_valid = intel_dp_mode_valid,
4537         .best_encoder = intel_best_encoder,
4538 };
4539
4540 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4541         .reset = intel_dp_encoder_reset,
4542         .destroy = intel_dp_encoder_destroy,
4543 };
4544
4545 void
4546 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4547 {
4548         return;
4549 }
4550
4551 enum irqreturn
4552 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4553 {
4554         struct intel_dp *intel_dp = &intel_dig_port->dp;
4555         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4556         struct drm_device *dev = intel_dig_port->base.base.dev;
4557         struct drm_i915_private *dev_priv = dev->dev_private;
4558         enum intel_display_power_domain power_domain;
4559         enum irqreturn ret = IRQ_NONE;
4560
4561         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4562                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4563
4564         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4565                 /*
4566                  * vdd off can generate a long pulse on eDP which
4567                  * would require vdd on to handle it, and thus we
4568                  * would end up in an endless cycle of
4569                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4570                  */
4571                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4572                               port_name(intel_dig_port->port));
4573                 return IRQ_HANDLED;
4574         }
4575
4576         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4577                       port_name(intel_dig_port->port),
4578                       long_hpd ? "long" : "short");
4579
4580         power_domain = intel_display_port_power_domain(intel_encoder);
4581         intel_display_power_get(dev_priv, power_domain);
4582
4583         if (long_hpd) {
4584
4585                 if (HAS_PCH_SPLIT(dev)) {
4586                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4587                                 goto mst_fail;
4588                 } else {
4589                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4590                                 goto mst_fail;
4591                 }
4592
4593                 if (!intel_dp_get_dpcd(intel_dp)) {
4594                         goto mst_fail;
4595                 }
4596
4597                 intel_dp_probe_oui(intel_dp);
4598
4599                 if (!intel_dp_probe_mst(intel_dp))
4600                         goto mst_fail;
4601
4602         } else {
4603                 if (intel_dp->is_mst) {
4604                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4605                                 goto mst_fail;
4606                 }
4607
4608                 if (!intel_dp->is_mst) {
4609                         /*
4610                          * we'll check the link status via the normal hot plug path later -
4611                          * but for short hpds we should check it now
4612                          */
4613                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4614                         intel_dp_check_link_status(intel_dp);
4615                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4616                 }
4617         }
4618
4619         ret = IRQ_HANDLED;
4620
4621         goto put_power;
4622 mst_fail:
4623         /* if we were in MST mode, and device is not there get out of MST mode */
4624         if (intel_dp->is_mst) {
4625                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4626                 intel_dp->is_mst = false;
4627                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4628         }
4629 put_power:
4630         intel_display_power_put(dev_priv, power_domain);
4631
4632         return ret;
4633 }
4634
4635 /* Return which DP Port should be selected for Transcoder DP control */
4636 int
4637 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4638 {
4639         struct drm_device *dev = crtc->dev;
4640         struct intel_encoder *intel_encoder;
4641         struct intel_dp *intel_dp;
4642
4643         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4644                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4645
4646                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4647                     intel_encoder->type == INTEL_OUTPUT_EDP)
4648                         return intel_dp->output_reg;
4649         }
4650
4651         return -1;
4652 }
4653
4654 /* check the VBT to see whether the eDP is on DP-D port */
4655 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4656 {
4657         struct drm_i915_private *dev_priv = dev->dev_private;
4658         union child_device_config *p_child;
4659         int i;
4660         static const short port_mapping[] = {
4661                 [PORT_B] = PORT_IDPB,
4662                 [PORT_C] = PORT_IDPC,
4663                 [PORT_D] = PORT_IDPD,
4664         };
4665
4666         if (port == PORT_A)
4667                 return true;
4668
4669         if (!dev_priv->vbt.child_dev_num)
4670                 return false;
4671
4672         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4673                 p_child = dev_priv->vbt.child_dev + i;
4674
4675                 if (p_child->common.dvo_port == port_mapping[port] &&
4676                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4677                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4678                         return true;
4679         }
4680         return false;
4681 }
4682
4683 void
4684 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4685 {
4686         struct intel_connector *intel_connector = to_intel_connector(connector);
4687
4688         intel_attach_force_audio_property(connector);
4689         intel_attach_broadcast_rgb_property(connector);
4690         intel_dp->color_range_auto = true;
4691
4692         if (is_edp(intel_dp)) {
4693                 drm_mode_create_scaling_mode_property(connector->dev);
4694                 drm_object_attach_property(
4695                         &connector->base,
4696                         connector->dev->mode_config.scaling_mode_property,
4697                         DRM_MODE_SCALE_ASPECT);
4698                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4699         }
4700 }
4701
4702 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4703 {
4704         intel_dp->last_power_cycle = jiffies;
4705         intel_dp->last_power_on = jiffies;
4706         intel_dp->last_backlight_off = jiffies;
4707 }
4708
4709 static void
4710 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4711                                     struct intel_dp *intel_dp)
4712 {
4713         struct drm_i915_private *dev_priv = dev->dev_private;
4714         struct edp_power_seq cur, vbt, spec,
4715                 *final = &intel_dp->pps_delays;
4716         u32 pp_on, pp_off, pp_div, pp;
4717         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4718
4719         lockdep_assert_held(&dev_priv->pps_mutex);
4720
4721         /* already initialized? */
4722         if (final->t11_t12 != 0)
4723                 return;
4724
4725         if (HAS_PCH_SPLIT(dev)) {
4726                 pp_ctrl_reg = PCH_PP_CONTROL;
4727                 pp_on_reg = PCH_PP_ON_DELAYS;
4728                 pp_off_reg = PCH_PP_OFF_DELAYS;
4729                 pp_div_reg = PCH_PP_DIVISOR;
4730         } else {
4731                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4732
4733                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4734                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4735                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4736                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4737         }
4738
4739         /* Workaround: Need to write PP_CONTROL with the unlock key as
4740          * the very first thing. */
4741         pp = ironlake_get_pp_control(intel_dp);
4742         I915_WRITE(pp_ctrl_reg, pp);
4743
4744         pp_on = I915_READ(pp_on_reg);
4745         pp_off = I915_READ(pp_off_reg);
4746         pp_div = I915_READ(pp_div_reg);
4747
4748         /* Pull timing values out of registers */
4749         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4750                 PANEL_POWER_UP_DELAY_SHIFT;
4751
4752         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4753                 PANEL_LIGHT_ON_DELAY_SHIFT;
4754
4755         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4756                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4757
4758         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4759                 PANEL_POWER_DOWN_DELAY_SHIFT;
4760
4761         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4762                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4763
4764         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4765                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4766
4767         vbt = dev_priv->vbt.edp_pps;
4768
4769         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4770          * our hw here, which are all in 100usec. */
4771         spec.t1_t3 = 210 * 10;
4772         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4773         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4774         spec.t10 = 500 * 10;
4775         /* This one is special and actually in units of 100ms, but zero
4776          * based in the hw (so we need to add 100 ms). But the sw vbt
4777          * table multiplies it with 1000 to make it in units of 100usec,
4778          * too. */
4779         spec.t11_t12 = (510 + 100) * 10;
4780
4781         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4782                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4783
4784         /* Use the max of the register settings and vbt. If both are
4785          * unset, fall back to the spec limits. */
4786 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4787                                        spec.field : \
4788                                        max(cur.field, vbt.field))
4789         assign_final(t1_t3);
4790         assign_final(t8);
4791         assign_final(t9);
4792         assign_final(t10);
4793         assign_final(t11_t12);
4794 #undef assign_final
4795
4796 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4797         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4798         intel_dp->backlight_on_delay = get_delay(t8);
4799         intel_dp->backlight_off_delay = get_delay(t9);
4800         intel_dp->panel_power_down_delay = get_delay(t10);
4801         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4802 #undef get_delay
4803
4804         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4805                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4806                       intel_dp->panel_power_cycle_delay);
4807
4808         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4809                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4810 }
4811
4812 static void
4813 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4814                                               struct intel_dp *intel_dp)
4815 {
4816         struct drm_i915_private *dev_priv = dev->dev_private;
4817         u32 pp_on, pp_off, pp_div, port_sel = 0;
4818         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4819         int pp_on_reg, pp_off_reg, pp_div_reg;
4820         enum port port = dp_to_dig_port(intel_dp)->port;
4821         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4822
4823         lockdep_assert_held(&dev_priv->pps_mutex);
4824
4825         if (HAS_PCH_SPLIT(dev)) {
4826                 pp_on_reg = PCH_PP_ON_DELAYS;
4827                 pp_off_reg = PCH_PP_OFF_DELAYS;
4828                 pp_div_reg = PCH_PP_DIVISOR;
4829         } else {
4830                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4831
4832                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4833                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4834                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4835         }
4836
4837         /*
4838          * And finally store the new values in the power sequencer. The
4839          * backlight delays are set to 1 because we do manual waits on them. For
4840          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4841          * we'll end up waiting for the backlight off delay twice: once when we
4842          * do the manual sleep, and once when we disable the panel and wait for
4843          * the PP_STATUS bit to become zero.
4844          */
4845         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4846                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4847         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4848                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4849         /* Compute the divisor for the pp clock, simply match the Bspec
4850          * formula. */
4851         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4852         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4853                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
4854
4855         /* Haswell doesn't have any port selection bits for the panel
4856          * power sequencer any more. */
4857         if (IS_VALLEYVIEW(dev)) {
4858                 port_sel = PANEL_PORT_SELECT_VLV(port);
4859         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4860                 if (port == PORT_A)
4861                         port_sel = PANEL_PORT_SELECT_DPA;
4862                 else
4863                         port_sel = PANEL_PORT_SELECT_DPD;
4864         }
4865
4866         pp_on |= port_sel;
4867
4868         I915_WRITE(pp_on_reg, pp_on);
4869         I915_WRITE(pp_off_reg, pp_off);
4870         I915_WRITE(pp_div_reg, pp_div);
4871
4872         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4873                       I915_READ(pp_on_reg),
4874                       I915_READ(pp_off_reg),
4875                       I915_READ(pp_div_reg));
4876 }
4877
4878 /**
4879  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4880  * @dev: DRM device
4881  * @refresh_rate: RR to be programmed
4882  *
4883  * This function gets called when refresh rate (RR) has to be changed from
4884  * one frequency to another. Switches can be between high and low RR
4885  * supported by the panel or to any other RR based on media playback (in
4886  * this case, RR value needs to be passed from user space).
4887  *
4888  * The caller of this function needs to take a lock on dev_priv->drrs.
4889  */
4890 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4891 {
4892         struct drm_i915_private *dev_priv = dev->dev_private;
4893         struct intel_encoder *encoder;
4894         struct intel_digital_port *dig_port = NULL;
4895         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4896         struct intel_crtc_state *config = NULL;
4897         struct intel_crtc *intel_crtc = NULL;
4898         u32 reg, val;
4899         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4900
4901         if (refresh_rate <= 0) {
4902                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4903                 return;
4904         }
4905
4906         if (intel_dp == NULL) {
4907                 DRM_DEBUG_KMS("DRRS not supported.\n");
4908                 return;
4909         }
4910
4911         /*
4912          * FIXME: This needs proper synchronization with psr state for some
4913          * platforms that cannot have PSR and DRRS enabled at the same time.
4914          */
4915
4916         dig_port = dp_to_dig_port(intel_dp);
4917         encoder = &dig_port->base;
4918         intel_crtc = encoder->new_crtc;
4919
4920         if (!intel_crtc) {
4921                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4922                 return;
4923         }
4924
4925         config = intel_crtc->config;
4926
4927         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4928                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4929                 return;
4930         }
4931
4932         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4933                         refresh_rate)
4934                 index = DRRS_LOW_RR;
4935
4936         if (index == dev_priv->drrs.refresh_rate_type) {
4937                 DRM_DEBUG_KMS(
4938                         "DRRS requested for previously set RR...ignoring\n");
4939                 return;
4940         }
4941
4942         if (!intel_crtc->active) {
4943                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4944                 return;
4945         }
4946
4947         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4948                 switch (index) {
4949                 case DRRS_HIGH_RR:
4950                         intel_dp_set_m_n(intel_crtc, M1_N1);
4951                         break;
4952                 case DRRS_LOW_RR:
4953                         intel_dp_set_m_n(intel_crtc, M2_N2);
4954                         break;
4955                 case DRRS_MAX_RR:
4956                 default:
4957                         DRM_ERROR("Unsupported refreshrate type\n");
4958                 }
4959         } else if (INTEL_INFO(dev)->gen > 6) {
4960                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4961                 val = I915_READ(reg);
4962
4963                 if (index > DRRS_HIGH_RR) {
4964                         if (IS_VALLEYVIEW(dev))
4965                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4966                         else
4967                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4968                 } else {
4969                         if (IS_VALLEYVIEW(dev))
4970                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4971                         else
4972                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4973                 }
4974                 I915_WRITE(reg, val);
4975         }
4976
4977         dev_priv->drrs.refresh_rate_type = index;
4978
4979         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4980 }
4981
4982 /**
4983  * intel_edp_drrs_enable - init drrs struct if supported
4984  * @intel_dp: DP struct
4985  *
4986  * Initializes frontbuffer_bits and drrs.dp
4987  */
4988 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
4989 {
4990         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4991         struct drm_i915_private *dev_priv = dev->dev_private;
4992         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4993         struct drm_crtc *crtc = dig_port->base.base.crtc;
4994         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4995
4996         if (!intel_crtc->config->has_drrs) {
4997                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
4998                 return;
4999         }
5000
5001         mutex_lock(&dev_priv->drrs.mutex);
5002         if (WARN_ON(dev_priv->drrs.dp)) {
5003                 DRM_ERROR("DRRS already enabled\n");
5004                 goto unlock;
5005         }
5006
5007         dev_priv->drrs.busy_frontbuffer_bits = 0;
5008
5009         dev_priv->drrs.dp = intel_dp;
5010
5011 unlock:
5012         mutex_unlock(&dev_priv->drrs.mutex);
5013 }
5014
5015 /**
5016  * intel_edp_drrs_disable - Disable DRRS
5017  * @intel_dp: DP struct
5018  *
5019  */
5020 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5021 {
5022         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5023         struct drm_i915_private *dev_priv = dev->dev_private;
5024         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5025         struct drm_crtc *crtc = dig_port->base.base.crtc;
5026         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5027
5028         if (!intel_crtc->config->has_drrs)
5029                 return;
5030
5031         mutex_lock(&dev_priv->drrs.mutex);
5032         if (!dev_priv->drrs.dp) {
5033                 mutex_unlock(&dev_priv->drrs.mutex);
5034                 return;
5035         }
5036
5037         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5038                 intel_dp_set_drrs_state(dev_priv->dev,
5039                         intel_dp->attached_connector->panel.
5040                         fixed_mode->vrefresh);
5041
5042         dev_priv->drrs.dp = NULL;
5043         mutex_unlock(&dev_priv->drrs.mutex);
5044
5045         cancel_delayed_work_sync(&dev_priv->drrs.work);
5046 }
5047
5048 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5049 {
5050         struct drm_i915_private *dev_priv =
5051                 container_of(work, typeof(*dev_priv), drrs.work.work);
5052         struct intel_dp *intel_dp;
5053
5054         mutex_lock(&dev_priv->drrs.mutex);
5055
5056         intel_dp = dev_priv->drrs.dp;
5057
5058         if (!intel_dp)
5059                 goto unlock;
5060
5061         /*
5062          * The delayed work can race with an invalidate hence we need to
5063          * recheck.
5064          */
5065
5066         if (dev_priv->drrs.busy_frontbuffer_bits)
5067                 goto unlock;
5068
5069         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5070                 intel_dp_set_drrs_state(dev_priv->dev,
5071                         intel_dp->attached_connector->panel.
5072                         downclock_mode->vrefresh);
5073
5074 unlock:
5075
5076         mutex_unlock(&dev_priv->drrs.mutex);
5077 }
5078
5079 /**
5080  * intel_edp_drrs_invalidate - Invalidate DRRS
5081  * @dev: DRM device
5082  * @frontbuffer_bits: frontbuffer plane tracking bits
5083  *
5084  * When there is a disturbance on screen (due to cursor movement/time
5085  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5086  * high RR.
5087  *
5088  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5089  */
5090 void intel_edp_drrs_invalidate(struct drm_device *dev,
5091                 unsigned frontbuffer_bits)
5092 {
5093         struct drm_i915_private *dev_priv = dev->dev_private;
5094         struct drm_crtc *crtc;
5095         enum pipe pipe;
5096
5097         if (!dev_priv->drrs.dp)
5098                 return;
5099
5100         cancel_delayed_work_sync(&dev_priv->drrs.work);
5101
5102         mutex_lock(&dev_priv->drrs.mutex);
5103         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5104         pipe = to_intel_crtc(crtc)->pipe;
5105
5106         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5107                 intel_dp_set_drrs_state(dev_priv->dev,
5108                                 dev_priv->drrs.dp->attached_connector->panel.
5109                                 fixed_mode->vrefresh);
5110         }
5111
5112         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5113
5114         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5115         mutex_unlock(&dev_priv->drrs.mutex);
5116 }
5117
5118 /**
5119  * intel_edp_drrs_flush - Flush DRRS
5120  * @dev: DRM device
5121  * @frontbuffer_bits: frontbuffer plane tracking bits
5122  *
5123  * When there is no movement on screen, DRRS work can be scheduled.
5124  * This DRRS work is responsible for setting relevant registers after a
5125  * timeout of 1 second.
5126  *
5127  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5128  */
5129 void intel_edp_drrs_flush(struct drm_device *dev,
5130                 unsigned frontbuffer_bits)
5131 {
5132         struct drm_i915_private *dev_priv = dev->dev_private;
5133         struct drm_crtc *crtc;
5134         enum pipe pipe;
5135
5136         if (!dev_priv->drrs.dp)
5137                 return;
5138
5139         cancel_delayed_work_sync(&dev_priv->drrs.work);
5140
5141         mutex_lock(&dev_priv->drrs.mutex);
5142         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5143         pipe = to_intel_crtc(crtc)->pipe;
5144         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5145
5146         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5147                         !dev_priv->drrs.busy_frontbuffer_bits)
5148                 schedule_delayed_work(&dev_priv->drrs.work,
5149                                 msecs_to_jiffies(1000));
5150         mutex_unlock(&dev_priv->drrs.mutex);
5151 }
5152
5153 /**
5154  * DOC: Display Refresh Rate Switching (DRRS)
5155  *
5156  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5157  * which enables swtching between low and high refresh rates,
5158  * dynamically, based on the usage scenario. This feature is applicable
5159  * for internal panels.
5160  *
5161  * Indication that the panel supports DRRS is given by the panel EDID, which
5162  * would list multiple refresh rates for one resolution.
5163  *
5164  * DRRS is of 2 types - static and seamless.
5165  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5166  * (may appear as a blink on screen) and is used in dock-undock scenario.
5167  * Seamless DRRS involves changing RR without any visual effect to the user
5168  * and can be used during normal system usage. This is done by programming
5169  * certain registers.
5170  *
5171  * Support for static/seamless DRRS may be indicated in the VBT based on
5172  * inputs from the panel spec.
5173  *
5174  * DRRS saves power by switching to low RR based on usage scenarios.
5175  *
5176  * eDP DRRS:-
5177  *        The implementation is based on frontbuffer tracking implementation.
5178  * When there is a disturbance on the screen triggered by user activity or a
5179  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5180  * When there is no movement on screen, after a timeout of 1 second, a switch
5181  * to low RR is made.
5182  *        For integration with frontbuffer tracking code,
5183  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5184  *
5185  * DRRS can be further extended to support other internal panels and also
5186  * the scenario of video playback wherein RR is set based on the rate
5187  * requested by userspace.
5188  */
5189
5190 /**
5191  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5192  * @intel_connector: eDP connector
5193  * @fixed_mode: preferred mode of panel
5194  *
5195  * This function is  called only once at driver load to initialize basic
5196  * DRRS stuff.
5197  *
5198  * Returns:
5199  * Downclock mode if panel supports it, else return NULL.
5200  * DRRS support is determined by the presence of downclock mode (apart
5201  * from VBT setting).
5202  */
5203 static struct drm_display_mode *
5204 intel_dp_drrs_init(struct intel_connector *intel_connector,
5205                 struct drm_display_mode *fixed_mode)
5206 {
5207         struct drm_connector *connector = &intel_connector->base;
5208         struct drm_device *dev = connector->dev;
5209         struct drm_i915_private *dev_priv = dev->dev_private;
5210         struct drm_display_mode *downclock_mode = NULL;
5211
5212         if (INTEL_INFO(dev)->gen <= 6) {
5213                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5214                 return NULL;
5215         }
5216
5217         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5218                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5219                 return NULL;
5220         }
5221
5222         downclock_mode = intel_find_panel_downclock
5223                                         (dev, fixed_mode, connector);
5224
5225         if (!downclock_mode) {
5226                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5227                 return NULL;
5228         }
5229
5230         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5231
5232         mutex_init(&dev_priv->drrs.mutex);
5233
5234         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5235
5236         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5237         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5238         return downclock_mode;
5239 }
5240
5241 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5242                                      struct intel_connector *intel_connector)
5243 {
5244         struct drm_connector *connector = &intel_connector->base;
5245         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5246         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5247         struct drm_device *dev = intel_encoder->base.dev;
5248         struct drm_i915_private *dev_priv = dev->dev_private;
5249         struct drm_display_mode *fixed_mode = NULL;
5250         struct drm_display_mode *downclock_mode = NULL;
5251         bool has_dpcd;
5252         struct drm_display_mode *scan;
5253         struct edid *edid;
5254         enum pipe pipe = INVALID_PIPE;
5255
5256         dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
5257
5258         if (!is_edp(intel_dp))
5259                 return true;
5260
5261         pps_lock(intel_dp);
5262         intel_edp_panel_vdd_sanitize(intel_dp);
5263         pps_unlock(intel_dp);
5264
5265         /* Cache DPCD and EDID for edp. */
5266         has_dpcd = intel_dp_get_dpcd(intel_dp);
5267
5268         if (has_dpcd) {
5269                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5270                         dev_priv->no_aux_handshake =
5271                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5272                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5273         } else {
5274                 /* if this fails, presume the device is a ghost */
5275                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5276                 return false;
5277         }
5278
5279         /* We now know it's not a ghost, init power sequence regs. */
5280         pps_lock(intel_dp);
5281         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5282         pps_unlock(intel_dp);
5283
5284         mutex_lock(&dev->mode_config.mutex);
5285         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5286         if (edid) {
5287                 if (drm_add_edid_modes(connector, edid)) {
5288                         drm_mode_connector_update_edid_property(connector,
5289                                                                 edid);
5290                         drm_edid_to_eld(connector, edid);
5291                 } else {
5292                         kfree(edid);
5293                         edid = ERR_PTR(-EINVAL);
5294                 }
5295         } else {
5296                 edid = ERR_PTR(-ENOENT);
5297         }
5298         intel_connector->edid = edid;
5299
5300         /* prefer fixed mode from EDID if available */
5301         list_for_each_entry(scan, &connector->probed_modes, head) {
5302                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5303                         fixed_mode = drm_mode_duplicate(dev, scan);
5304                         downclock_mode = intel_dp_drrs_init(
5305                                                 intel_connector, fixed_mode);
5306                         break;
5307                 }
5308         }
5309
5310         /* fallback to VBT if available for eDP */
5311         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5312                 fixed_mode = drm_mode_duplicate(dev,
5313                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5314                 if (fixed_mode)
5315                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5316         }
5317         mutex_unlock(&dev->mode_config.mutex);
5318
5319         if (IS_VALLEYVIEW(dev)) {
5320                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5321                 register_reboot_notifier(&intel_dp->edp_notifier);
5322
5323                 /*
5324                  * Figure out the current pipe for the initial backlight setup.
5325                  * If the current pipe isn't valid, try the PPS pipe, and if that
5326                  * fails just assume pipe A.
5327                  */
5328                 if (IS_CHERRYVIEW(dev))
5329                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5330                 else
5331                         pipe = PORT_TO_PIPE(intel_dp->DP);
5332
5333                 if (pipe != PIPE_A && pipe != PIPE_B)
5334                         pipe = intel_dp->pps_pipe;
5335
5336                 if (pipe != PIPE_A && pipe != PIPE_B)
5337                         pipe = PIPE_A;
5338
5339                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5340                               pipe_name(pipe));
5341         }
5342
5343         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5344         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5345         intel_panel_setup_backlight(connector, pipe);
5346
5347         return true;
5348 }
5349
5350 bool
5351 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5352                         struct intel_connector *intel_connector)
5353 {
5354         struct drm_connector *connector = &intel_connector->base;
5355         struct intel_dp *intel_dp = &intel_dig_port->dp;
5356         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5357         struct drm_device *dev = intel_encoder->base.dev;
5358         struct drm_i915_private *dev_priv = dev->dev_private;
5359         enum port port = intel_dig_port->port;
5360         int type;
5361
5362         intel_dp->pps_pipe = INVALID_PIPE;
5363
5364         /* intel_dp vfuncs */
5365         if (INTEL_INFO(dev)->gen >= 9)
5366                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5367         else if (IS_VALLEYVIEW(dev))
5368                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5369         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5370                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5371         else if (HAS_PCH_SPLIT(dev))
5372                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5373         else
5374                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5375
5376         if (INTEL_INFO(dev)->gen >= 9)
5377                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5378         else
5379                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5380
5381         /* Preserve the current hw state. */
5382         intel_dp->DP = I915_READ(intel_dp->output_reg);
5383         intel_dp->attached_connector = intel_connector;
5384
5385         if (intel_dp_is_edp(dev, port))
5386                 type = DRM_MODE_CONNECTOR_eDP;
5387         else
5388                 type = DRM_MODE_CONNECTOR_DisplayPort;
5389
5390         /*
5391          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5392          * for DP the encoder type can be set by the caller to
5393          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5394          */
5395         if (type == DRM_MODE_CONNECTOR_eDP)
5396                 intel_encoder->type = INTEL_OUTPUT_EDP;
5397
5398         /* eDP only on port B and/or C on vlv/chv */
5399         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5400                     port != PORT_B && port != PORT_C))
5401                 return false;
5402
5403         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5404                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5405                         port_name(port));
5406
5407         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5408         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5409
5410         connector->interlace_allowed = true;
5411         connector->doublescan_allowed = 0;
5412
5413         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5414                           edp_panel_vdd_work);
5415
5416         intel_connector_attach_encoder(intel_connector, intel_encoder);
5417         drm_connector_register(connector);
5418
5419         if (HAS_DDI(dev))
5420                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5421         else
5422                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5423         intel_connector->unregister = intel_dp_connector_unregister;
5424
5425         /* Set up the hotplug pin. */
5426         switch (port) {
5427         case PORT_A:
5428                 intel_encoder->hpd_pin = HPD_PORT_A;
5429                 break;
5430         case PORT_B:
5431                 intel_encoder->hpd_pin = HPD_PORT_B;
5432                 break;
5433         case PORT_C:
5434                 intel_encoder->hpd_pin = HPD_PORT_C;
5435                 break;
5436         case PORT_D:
5437                 intel_encoder->hpd_pin = HPD_PORT_D;
5438                 break;
5439         default:
5440                 BUG();
5441         }
5442
5443         if (is_edp(intel_dp)) {
5444                 pps_lock(intel_dp);
5445                 intel_dp_init_panel_power_timestamps(intel_dp);
5446                 if (IS_VALLEYVIEW(dev))
5447                         vlv_initial_power_sequencer_setup(intel_dp);
5448                 else
5449                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5450                 pps_unlock(intel_dp);
5451         }
5452
5453         intel_dp_aux_init(intel_dp, intel_connector);
5454
5455         /* init MST on ports that can support it */
5456         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5457                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5458                         intel_dp_mst_encoder_init(intel_dig_port,
5459                                                   intel_connector->base.base.id);
5460                 }
5461         }
5462
5463         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5464                 drm_dp_aux_unregister(&intel_dp->aux);
5465                 if (is_edp(intel_dp)) {
5466                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5467                         /*
5468                          * vdd might still be enabled do to the delayed vdd off.
5469                          * Make sure vdd is actually turned off here.
5470                          */
5471                         pps_lock(intel_dp);
5472                         edp_panel_vdd_off_sync(intel_dp);
5473                         pps_unlock(intel_dp);
5474                 }
5475                 drm_connector_unregister(connector);
5476                 drm_connector_cleanup(connector);
5477                 return false;
5478         }
5479
5480         intel_dp_add_properties(intel_dp, connector);
5481
5482         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5483          * 0xd.  Failure to do so will result in spurious interrupts being
5484          * generated on the port when a cable is not attached.
5485          */
5486         if (IS_G4X(dev) && !IS_GM45(dev)) {
5487                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5488                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5489         }
5490
5491         return true;
5492 }
5493
5494 void
5495 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5496 {
5497         struct drm_i915_private *dev_priv = dev->dev_private;
5498         struct intel_digital_port *intel_dig_port;
5499         struct intel_encoder *intel_encoder;
5500         struct drm_encoder *encoder;
5501         struct intel_connector *intel_connector;
5502
5503         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5504         if (!intel_dig_port)
5505                 return;
5506
5507         intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5508         if (!intel_connector) {
5509                 kfree(intel_dig_port);
5510                 return;
5511         }
5512
5513         intel_encoder = &intel_dig_port->base;
5514         encoder = &intel_encoder->base;
5515
5516         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5517                          DRM_MODE_ENCODER_TMDS);
5518
5519         intel_encoder->compute_config = intel_dp_compute_config;
5520         intel_encoder->disable = intel_disable_dp;
5521         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5522         intel_encoder->get_config = intel_dp_get_config;
5523         intel_encoder->suspend = intel_dp_encoder_suspend;
5524         if (IS_CHERRYVIEW(dev)) {
5525                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5526                 intel_encoder->pre_enable = chv_pre_enable_dp;
5527                 intel_encoder->enable = vlv_enable_dp;
5528                 intel_encoder->post_disable = chv_post_disable_dp;
5529         } else if (IS_VALLEYVIEW(dev)) {
5530                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5531                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5532                 intel_encoder->enable = vlv_enable_dp;
5533                 intel_encoder->post_disable = vlv_post_disable_dp;
5534         } else {
5535                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5536                 intel_encoder->enable = g4x_enable_dp;
5537                 if (INTEL_INFO(dev)->gen >= 5)
5538                         intel_encoder->post_disable = ilk_post_disable_dp;
5539         }
5540
5541         intel_dig_port->port = port;
5542         intel_dig_port->dp.output_reg = output_reg;
5543
5544         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5545         if (IS_CHERRYVIEW(dev)) {
5546                 if (port == PORT_D)
5547                         intel_encoder->crtc_mask = 1 << 2;
5548                 else
5549                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5550         } else {
5551                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5552         }
5553         intel_encoder->cloneable = 0;
5554         intel_encoder->hot_plug = intel_dp_hot_plug;
5555
5556         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5557         dev_priv->hpd_irq_port[port] = intel_dig_port;
5558
5559         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5560                 drm_encoder_cleanup(encoder);
5561                 kfree(intel_dig_port);
5562                 kfree(intel_connector);
5563         }
5564 }
5565
5566 void intel_dp_mst_suspend(struct drm_device *dev)
5567 {
5568         struct drm_i915_private *dev_priv = dev->dev_private;
5569         int i;
5570
5571         /* disable MST */
5572         for (i = 0; i < I915_MAX_PORTS; i++) {
5573                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5574                 if (!intel_dig_port)
5575                         continue;
5576
5577                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5578                         if (!intel_dig_port->dp.can_mst)
5579                                 continue;
5580                         if (intel_dig_port->dp.is_mst)
5581                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5582                 }
5583         }
5584 }
5585
5586 void intel_dp_mst_resume(struct drm_device *dev)
5587 {
5588         struct drm_i915_private *dev_priv = dev->dev_private;
5589         int i;
5590
5591         for (i = 0; i < I915_MAX_PORTS; i++) {
5592                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5593                 if (!intel_dig_port)
5594                         continue;
5595                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5596                         int ret;
5597
5598                         if (!intel_dig_port->dp.can_mst)
5599                                 continue;
5600
5601                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5602                         if (ret != 0) {
5603                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5604                         }
5605                 }
5606         }
5607 }