drm/i915/skl: Enabling PSR2 SU with frame sync
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 struct dp_link_dpll {
45         int link_bw;
46         struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50         { DP_LINK_BW_1_62,
51                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52         { DP_LINK_BW_2_7,
53                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57         { DP_LINK_BW_1_62,
58                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59         { DP_LINK_BW_2_7,
60                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64         { DP_LINK_BW_1_62,
65                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66         { DP_LINK_BW_2_7,
67                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71  * CHV supports eDP 1.4 that have  more link rates.
72  * Below only provides the fixed rate but exclude variable rate.
73  */
74 static const struct dp_link_dpll chv_dpll[] = {
75         /*
76          * CHV requires to program fractional division for m2.
77          * m2 is stored in fixed point format using formula below
78          * (m2_int << 22) | m2_fraction
79          */
80         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
81                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
83                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
85                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89                                   324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91                                  243000, 270000, 324000, 405000,
92                                  420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
94
95 /**
96  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97  * @intel_dp: DP struct
98  *
99  * If a CPU or PCH DP output is attached to an eDP panel, this function
100  * will return true, and false otherwise.
101  */
102 static bool is_edp(struct intel_dp *intel_dp)
103 {
104         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
107 }
108
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
110 {
111         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113         return intel_dig_port->base.base.dev;
114 }
115
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117 {
118         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
119 }
120
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
126                                       enum pipe pipe);
127
128 static int
129 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
130 {
131         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
132
133         switch (max_link_bw) {
134         case DP_LINK_BW_1_62:
135         case DP_LINK_BW_2_7:
136         case DP_LINK_BW_5_4:
137                 break;
138         default:
139                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140                      max_link_bw);
141                 max_link_bw = DP_LINK_BW_1_62;
142                 break;
143         }
144         return max_link_bw;
145 }
146
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148 {
149         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150         struct drm_device *dev = intel_dig_port->base.base.dev;
151         u8 source_max, sink_max;
152
153         source_max = 4;
154         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156                 source_max = 2;
157
158         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160         return min(source_max, sink_max);
161 }
162
163 /*
164  * The units on the numbers in the next two are... bizarre.  Examples will
165  * make it clearer; this one parallels an example in the eDP spec.
166  *
167  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168  *
169  *     270000 * 1 * 8 / 10 == 216000
170  *
171  * The actual data capacity of that configuration is 2.16Gbit/s, so the
172  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
173  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174  * 119000.  At 18bpp that's 2142000 kilobits per second.
175  *
176  * Thus the strange-looking division by 10 in intel_dp_link_required, to
177  * get the result in decakilobits instead of kilobits.
178  */
179
180 static int
181 intel_dp_link_required(int pixel_clock, int bpp)
182 {
183         return (pixel_clock * bpp + 9) / 10;
184 }
185
186 static int
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188 {
189         return (max_link_clock * max_lanes * 8) / 10;
190 }
191
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194                     struct drm_display_mode *mode)
195 {
196         struct intel_dp *intel_dp = intel_attached_dp(connector);
197         struct intel_connector *intel_connector = to_intel_connector(connector);
198         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199         int target_clock = mode->clock;
200         int max_rate, mode_rate, max_lanes, max_link_clock;
201
202         if (is_edp(intel_dp) && fixed_mode) {
203                 if (mode->hdisplay > fixed_mode->hdisplay)
204                         return MODE_PANEL;
205
206                 if (mode->vdisplay > fixed_mode->vdisplay)
207                         return MODE_PANEL;
208
209                 target_clock = fixed_mode->clock;
210         }
211
212         max_link_clock = intel_dp_max_link_rate(intel_dp);
213         max_lanes = intel_dp_max_lane_count(intel_dp);
214
215         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216         mode_rate = intel_dp_link_required(target_clock, 18);
217
218         if (mode_rate > max_rate)
219                 return MODE_CLOCK_HIGH;
220
221         if (mode->clock < 10000)
222                 return MODE_CLOCK_LOW;
223
224         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225                 return MODE_H_ILLEGAL;
226
227         return MODE_OK;
228 }
229
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
231 {
232         int     i;
233         uint32_t v = 0;
234
235         if (src_bytes > 4)
236                 src_bytes = 4;
237         for (i = 0; i < src_bytes; i++)
238                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239         return v;
240 }
241
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
243 {
244         int i;
245         if (dst_bytes > 4)
246                 dst_bytes = 4;
247         for (i = 0; i < dst_bytes; i++)
248                 dst[i] = src >> ((3-i) * 8);
249 }
250
251 /* hrawclock is 1/4 the FSB frequency */
252 static int
253 intel_hrawclk(struct drm_device *dev)
254 {
255         struct drm_i915_private *dev_priv = dev->dev_private;
256         uint32_t clkcfg;
257
258         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259         if (IS_VALLEYVIEW(dev))
260                 return 200;
261
262         clkcfg = I915_READ(CLKCFG);
263         switch (clkcfg & CLKCFG_FSB_MASK) {
264         case CLKCFG_FSB_400:
265                 return 100;
266         case CLKCFG_FSB_533:
267                 return 133;
268         case CLKCFG_FSB_667:
269                 return 166;
270         case CLKCFG_FSB_800:
271                 return 200;
272         case CLKCFG_FSB_1067:
273                 return 266;
274         case CLKCFG_FSB_1333:
275                 return 333;
276         /* these two are just a guess; one of them might be right */
277         case CLKCFG_FSB_1600:
278         case CLKCFG_FSB_1600_ALT:
279                 return 400;
280         default:
281                 return 133;
282         }
283 }
284
285 static void
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287                                     struct intel_dp *intel_dp);
288 static void
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290                                               struct intel_dp *intel_dp);
291
292 static void pps_lock(struct intel_dp *intel_dp)
293 {
294         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295         struct intel_encoder *encoder = &intel_dig_port->base;
296         struct drm_device *dev = encoder->base.dev;
297         struct drm_i915_private *dev_priv = dev->dev_private;
298         enum intel_display_power_domain power_domain;
299
300         /*
301          * See vlv_power_sequencer_reset() why we need
302          * a power domain reference here.
303          */
304         power_domain = intel_display_port_power_domain(encoder);
305         intel_display_power_get(dev_priv, power_domain);
306
307         mutex_lock(&dev_priv->pps_mutex);
308 }
309
310 static void pps_unlock(struct intel_dp *intel_dp)
311 {
312         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313         struct intel_encoder *encoder = &intel_dig_port->base;
314         struct drm_device *dev = encoder->base.dev;
315         struct drm_i915_private *dev_priv = dev->dev_private;
316         enum intel_display_power_domain power_domain;
317
318         mutex_unlock(&dev_priv->pps_mutex);
319
320         power_domain = intel_display_port_power_domain(encoder);
321         intel_display_power_put(dev_priv, power_domain);
322 }
323
324 static void
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326 {
327         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328         struct drm_device *dev = intel_dig_port->base.base.dev;
329         struct drm_i915_private *dev_priv = dev->dev_private;
330         enum pipe pipe = intel_dp->pps_pipe;
331         bool pll_enabled;
332         uint32_t DP;
333
334         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336                  pipe_name(pipe), port_name(intel_dig_port->port)))
337                 return;
338
339         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340                       pipe_name(pipe), port_name(intel_dig_port->port));
341
342         /* Preserve the BIOS-computed detected bit. This is
343          * supposed to be read-only.
344          */
345         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347         DP |= DP_PORT_WIDTH(1);
348         DP |= DP_LINK_TRAIN_PAT_1;
349
350         if (IS_CHERRYVIEW(dev))
351                 DP |= DP_PIPE_SELECT_CHV(pipe);
352         else if (pipe == PIPE_B)
353                 DP |= DP_PIPEB_SELECT;
354
355         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357         /*
358          * The DPLL for the pipe must be enabled for this to work.
359          * So enable temporarily it if it's not already enabled.
360          */
361         if (!pll_enabled)
362                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
365         /*
366          * Similar magic as in intel_dp_enable_port().
367          * We _must_ do this port enable + disable trick
368          * to make this power seqeuencer lock onto the port.
369          * Otherwise even VDD force bit won't work.
370          */
371         I915_WRITE(intel_dp->output_reg, DP);
372         POSTING_READ(intel_dp->output_reg);
373
374         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375         POSTING_READ(intel_dp->output_reg);
376
377         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378         POSTING_READ(intel_dp->output_reg);
379
380         if (!pll_enabled)
381                 vlv_force_pll_off(dev, pipe);
382 }
383
384 static enum pipe
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386 {
387         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388         struct drm_device *dev = intel_dig_port->base.base.dev;
389         struct drm_i915_private *dev_priv = dev->dev_private;
390         struct intel_encoder *encoder;
391         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
392         enum pipe pipe;
393
394         lockdep_assert_held(&dev_priv->pps_mutex);
395
396         /* We should never land here with regular DP ports */
397         WARN_ON(!is_edp(intel_dp));
398
399         if (intel_dp->pps_pipe != INVALID_PIPE)
400                 return intel_dp->pps_pipe;
401
402         /*
403          * We don't have power sequencer currently.
404          * Pick one that's not used by other ports.
405          */
406         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407                             base.head) {
408                 struct intel_dp *tmp;
409
410                 if (encoder->type != INTEL_OUTPUT_EDP)
411                         continue;
412
413                 tmp = enc_to_intel_dp(&encoder->base);
414
415                 if (tmp->pps_pipe != INVALID_PIPE)
416                         pipes &= ~(1 << tmp->pps_pipe);
417         }
418
419         /*
420          * Didn't find one. This should not happen since there
421          * are two power sequencers and up to two eDP ports.
422          */
423         if (WARN_ON(pipes == 0))
424                 pipe = PIPE_A;
425         else
426                 pipe = ffs(pipes) - 1;
427
428         vlv_steal_power_sequencer(dev, pipe);
429         intel_dp->pps_pipe = pipe;
430
431         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432                       pipe_name(intel_dp->pps_pipe),
433                       port_name(intel_dig_port->port));
434
435         /* init power sequencer on this pipe and port */
436         intel_dp_init_panel_power_sequencer(dev, intel_dp);
437         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
438
439         /*
440          * Even vdd force doesn't work until we've made
441          * the power sequencer lock in on the port.
442          */
443         vlv_power_sequencer_kick(intel_dp);
444
445         return intel_dp->pps_pipe;
446 }
447
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449                                enum pipe pipe);
450
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452                                enum pipe pipe)
453 {
454         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455 }
456
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458                                 enum pipe pipe)
459 {
460         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461 }
462
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464                          enum pipe pipe)
465 {
466         return true;
467 }
468
469 static enum pipe
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471                      enum port port,
472                      vlv_pipe_check pipe_check)
473 {
474         enum pipe pipe;
475
476         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478                         PANEL_PORT_SELECT_MASK;
479
480                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481                         continue;
482
483                 if (!pipe_check(dev_priv, pipe))
484                         continue;
485
486                 return pipe;
487         }
488
489         return INVALID_PIPE;
490 }
491
492 static void
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494 {
495         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496         struct drm_device *dev = intel_dig_port->base.base.dev;
497         struct drm_i915_private *dev_priv = dev->dev_private;
498         enum port port = intel_dig_port->port;
499
500         lockdep_assert_held(&dev_priv->pps_mutex);
501
502         /* try to find a pipe with this port selected */
503         /* first pick one where the panel is on */
504         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505                                                   vlv_pipe_has_pp_on);
506         /* didn't find one? pick one where vdd is on */
507         if (intel_dp->pps_pipe == INVALID_PIPE)
508                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509                                                           vlv_pipe_has_vdd_on);
510         /* didn't find one? pick one with just the correct port */
511         if (intel_dp->pps_pipe == INVALID_PIPE)
512                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513                                                           vlv_pipe_any);
514
515         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516         if (intel_dp->pps_pipe == INVALID_PIPE) {
517                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518                               port_name(port));
519                 return;
520         }
521
522         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523                       port_name(port), pipe_name(intel_dp->pps_pipe));
524
525         intel_dp_init_panel_power_sequencer(dev, intel_dp);
526         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
527 }
528
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530 {
531         struct drm_device *dev = dev_priv->dev;
532         struct intel_encoder *encoder;
533
534         if (WARN_ON(!IS_VALLEYVIEW(dev)))
535                 return;
536
537         /*
538          * We can't grab pps_mutex here due to deadlock with power_domain
539          * mutex when power_domain functions are called while holding pps_mutex.
540          * That also means that in order to use pps_pipe the code needs to
541          * hold both a power domain reference and pps_mutex, and the power domain
542          * reference get/put must be done while _not_ holding pps_mutex.
543          * pps_{lock,unlock}() do these steps in the correct order, so one
544          * should use them always.
545          */
546
547         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548                 struct intel_dp *intel_dp;
549
550                 if (encoder->type != INTEL_OUTPUT_EDP)
551                         continue;
552
553                 intel_dp = enc_to_intel_dp(&encoder->base);
554                 intel_dp->pps_pipe = INVALID_PIPE;
555         }
556 }
557
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559 {
560         struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562         if (HAS_PCH_SPLIT(dev))
563                 return PCH_PP_CONTROL;
564         else
565                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569 {
570         struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572         if (HAS_PCH_SPLIT(dev))
573                 return PCH_PP_STATUS;
574         else
575                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576 }
577
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579    This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581                               void *unused)
582 {
583         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584                                                  edp_notifier);
585         struct drm_device *dev = intel_dp_to_dev(intel_dp);
586         struct drm_i915_private *dev_priv = dev->dev_private;
587         u32 pp_div;
588         u32 pp_ctrl_reg, pp_div_reg;
589
590         if (!is_edp(intel_dp) || code != SYS_RESTART)
591                 return 0;
592
593         pps_lock(intel_dp);
594
595         if (IS_VALLEYVIEW(dev)) {
596                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
598                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
600                 pp_div = I915_READ(pp_div_reg);
601                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606                 msleep(intel_dp->panel_power_cycle_delay);
607         }
608
609         pps_unlock(intel_dp);
610
611         return 0;
612 }
613
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
615 {
616         struct drm_device *dev = intel_dp_to_dev(intel_dp);
617         struct drm_i915_private *dev_priv = dev->dev_private;
618
619         lockdep_assert_held(&dev_priv->pps_mutex);
620
621         if (IS_VALLEYVIEW(dev) &&
622             intel_dp->pps_pipe == INVALID_PIPE)
623                 return false;
624
625         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
626 }
627
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
629 {
630         struct drm_device *dev = intel_dp_to_dev(intel_dp);
631         struct drm_i915_private *dev_priv = dev->dev_private;
632
633         lockdep_assert_held(&dev_priv->pps_mutex);
634
635         if (IS_VALLEYVIEW(dev) &&
636             intel_dp->pps_pipe == INVALID_PIPE)
637                 return false;
638
639         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
640 }
641
642 static void
643 intel_dp_check_edp(struct intel_dp *intel_dp)
644 {
645         struct drm_device *dev = intel_dp_to_dev(intel_dp);
646         struct drm_i915_private *dev_priv = dev->dev_private;
647
648         if (!is_edp(intel_dp))
649                 return;
650
651         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654                               I915_READ(_pp_stat_reg(intel_dp)),
655                               I915_READ(_pp_ctrl_reg(intel_dp)));
656         }
657 }
658
659 static uint32_t
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661 {
662         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663         struct drm_device *dev = intel_dig_port->base.base.dev;
664         struct drm_i915_private *dev_priv = dev->dev_private;
665         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
666         uint32_t status;
667         bool done;
668
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
670         if (has_aux_irq)
671                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672                                           msecs_to_jiffies_timeout(10));
673         else
674                 done = wait_for_atomic(C, 10) == 0;
675         if (!done)
676                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677                           has_aux_irq);
678 #undef C
679
680         return status;
681 }
682
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
684 {
685         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686         struct drm_device *dev = intel_dig_port->base.base.dev;
687
688         /*
689          * The clock divider is based off the hrawclk, and would like to run at
690          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
691          */
692         return index ? 0 : intel_hrawclk(dev) / 2;
693 }
694
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698         struct drm_device *dev = intel_dig_port->base.base.dev;
699         struct drm_i915_private *dev_priv = dev->dev_private;
700
701         if (index)
702                 return 0;
703
704         if (intel_dig_port->port == PORT_A) {
705                 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
706         } else {
707                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
708         }
709 }
710
711 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
712 {
713         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
714         struct drm_device *dev = intel_dig_port->base.base.dev;
715         struct drm_i915_private *dev_priv = dev->dev_private;
716
717         if (intel_dig_port->port == PORT_A) {
718                 if (index)
719                         return 0;
720                 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
721         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
722                 /* Workaround for non-ULT HSW */
723                 switch (index) {
724                 case 0: return 63;
725                 case 1: return 72;
726                 default: return 0;
727                 }
728         } else  {
729                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
730         }
731 }
732
733 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
734 {
735         return index ? 0 : 100;
736 }
737
738 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
739 {
740         /*
741          * SKL doesn't need us to program the AUX clock divider (Hardware will
742          * derive the clock from CDCLK automatically). We still implement the
743          * get_aux_clock_divider vfunc to plug-in into the existing code.
744          */
745         return index ? 0 : 1;
746 }
747
748 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
749                                       bool has_aux_irq,
750                                       int send_bytes,
751                                       uint32_t aux_clock_divider)
752 {
753         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
754         struct drm_device *dev = intel_dig_port->base.base.dev;
755         uint32_t precharge, timeout;
756
757         if (IS_GEN6(dev))
758                 precharge = 3;
759         else
760                 precharge = 5;
761
762         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
763                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
764         else
765                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
766
767         return DP_AUX_CH_CTL_SEND_BUSY |
768                DP_AUX_CH_CTL_DONE |
769                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
770                DP_AUX_CH_CTL_TIME_OUT_ERROR |
771                timeout |
772                DP_AUX_CH_CTL_RECEIVE_ERROR |
773                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
774                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
775                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
776 }
777
778 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
779                                       bool has_aux_irq,
780                                       int send_bytes,
781                                       uint32_t unused)
782 {
783         return DP_AUX_CH_CTL_SEND_BUSY |
784                DP_AUX_CH_CTL_DONE |
785                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
786                DP_AUX_CH_CTL_TIME_OUT_ERROR |
787                DP_AUX_CH_CTL_TIME_OUT_1600us |
788                DP_AUX_CH_CTL_RECEIVE_ERROR |
789                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
790                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
791 }
792
793 static int
794 intel_dp_aux_ch(struct intel_dp *intel_dp,
795                 const uint8_t *send, int send_bytes,
796                 uint8_t *recv, int recv_size)
797 {
798         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
799         struct drm_device *dev = intel_dig_port->base.base.dev;
800         struct drm_i915_private *dev_priv = dev->dev_private;
801         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
802         uint32_t ch_data = ch_ctl + 4;
803         uint32_t aux_clock_divider;
804         int i, ret, recv_bytes;
805         uint32_t status;
806         int try, clock = 0;
807         bool has_aux_irq = HAS_AUX_IRQ(dev);
808         bool vdd;
809
810         pps_lock(intel_dp);
811
812         /*
813          * We will be called with VDD already enabled for dpcd/edid/oui reads.
814          * In such cases we want to leave VDD enabled and it's up to upper layers
815          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
816          * ourselves.
817          */
818         vdd = edp_panel_vdd_on(intel_dp);
819
820         /* dp aux is extremely sensitive to irq latency, hence request the
821          * lowest possible wakeup latency and so prevent the cpu from going into
822          * deep sleep states.
823          */
824         pm_qos_update_request(&dev_priv->pm_qos, 0);
825
826         intel_dp_check_edp(intel_dp);
827
828         intel_aux_display_runtime_get(dev_priv);
829
830         /* Try to wait for any previous AUX channel activity */
831         for (try = 0; try < 3; try++) {
832                 status = I915_READ_NOTRACE(ch_ctl);
833                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
834                         break;
835                 msleep(1);
836         }
837
838         if (try == 3) {
839                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
840                      I915_READ(ch_ctl));
841                 ret = -EBUSY;
842                 goto out;
843         }
844
845         /* Only 5 data registers! */
846         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
847                 ret = -E2BIG;
848                 goto out;
849         }
850
851         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
852                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
853                                                           has_aux_irq,
854                                                           send_bytes,
855                                                           aux_clock_divider);
856
857                 /* Must try at least 3 times according to DP spec */
858                 for (try = 0; try < 5; try++) {
859                         /* Load the send data into the aux channel data registers */
860                         for (i = 0; i < send_bytes; i += 4)
861                                 I915_WRITE(ch_data + i,
862                                            intel_dp_pack_aux(send + i,
863                                                              send_bytes - i));
864
865                         /* Send the command and wait for it to complete */
866                         I915_WRITE(ch_ctl, send_ctl);
867
868                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
869
870                         /* Clear done status and any errors */
871                         I915_WRITE(ch_ctl,
872                                    status |
873                                    DP_AUX_CH_CTL_DONE |
874                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
875                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
876
877                         if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
878                                       DP_AUX_CH_CTL_RECEIVE_ERROR))
879                                 continue;
880                         if (status & DP_AUX_CH_CTL_DONE)
881                                 break;
882                 }
883                 if (status & DP_AUX_CH_CTL_DONE)
884                         break;
885         }
886
887         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
888                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
889                 ret = -EBUSY;
890                 goto out;
891         }
892
893         /* Check for timeout or receive error.
894          * Timeouts occur when the sink is not connected
895          */
896         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
897                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
898                 ret = -EIO;
899                 goto out;
900         }
901
902         /* Timeouts occur when the device isn't connected, so they're
903          * "normal" -- don't fill the kernel log with these */
904         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
905                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
906                 ret = -ETIMEDOUT;
907                 goto out;
908         }
909
910         /* Unload any bytes sent back from the other side */
911         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
912                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
913         if (recv_bytes > recv_size)
914                 recv_bytes = recv_size;
915
916         for (i = 0; i < recv_bytes; i += 4)
917                 intel_dp_unpack_aux(I915_READ(ch_data + i),
918                                     recv + i, recv_bytes - i);
919
920         ret = recv_bytes;
921 out:
922         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
923         intel_aux_display_runtime_put(dev_priv);
924
925         if (vdd)
926                 edp_panel_vdd_off(intel_dp, false);
927
928         pps_unlock(intel_dp);
929
930         return ret;
931 }
932
933 #define BARE_ADDRESS_SIZE       3
934 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
935 static ssize_t
936 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
937 {
938         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
939         uint8_t txbuf[20], rxbuf[20];
940         size_t txsize, rxsize;
941         int ret;
942
943         txbuf[0] = (msg->request << 4) |
944                 ((msg->address >> 16) & 0xf);
945         txbuf[1] = (msg->address >> 8) & 0xff;
946         txbuf[2] = msg->address & 0xff;
947         txbuf[3] = msg->size - 1;
948
949         switch (msg->request & ~DP_AUX_I2C_MOT) {
950         case DP_AUX_NATIVE_WRITE:
951         case DP_AUX_I2C_WRITE:
952                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
953                 rxsize = 2; /* 0 or 1 data bytes */
954
955                 if (WARN_ON(txsize > 20))
956                         return -E2BIG;
957
958                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
959
960                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
961                 if (ret > 0) {
962                         msg->reply = rxbuf[0] >> 4;
963
964                         if (ret > 1) {
965                                 /* Number of bytes written in a short write. */
966                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
967                         } else {
968                                 /* Return payload size. */
969                                 ret = msg->size;
970                         }
971                 }
972                 break;
973
974         case DP_AUX_NATIVE_READ:
975         case DP_AUX_I2C_READ:
976                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
977                 rxsize = msg->size + 1;
978
979                 if (WARN_ON(rxsize > 20))
980                         return -E2BIG;
981
982                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
983                 if (ret > 0) {
984                         msg->reply = rxbuf[0] >> 4;
985                         /*
986                          * Assume happy day, and copy the data. The caller is
987                          * expected to check msg->reply before touching it.
988                          *
989                          * Return payload size.
990                          */
991                         ret--;
992                         memcpy(msg->buffer, rxbuf + 1, ret);
993                 }
994                 break;
995
996         default:
997                 ret = -EINVAL;
998                 break;
999         }
1000
1001         return ret;
1002 }
1003
1004 static void
1005 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1006 {
1007         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1008         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1009         enum port port = intel_dig_port->port;
1010         const char *name = NULL;
1011         int ret;
1012
1013         switch (port) {
1014         case PORT_A:
1015                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1016                 name = "DPDDC-A";
1017                 break;
1018         case PORT_B:
1019                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1020                 name = "DPDDC-B";
1021                 break;
1022         case PORT_C:
1023                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1024                 name = "DPDDC-C";
1025                 break;
1026         case PORT_D:
1027                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1028                 name = "DPDDC-D";
1029                 break;
1030         default:
1031                 BUG();
1032         }
1033
1034         /*
1035          * The AUX_CTL register is usually DP_CTL + 0x10.
1036          *
1037          * On Haswell and Broadwell though:
1038          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1039          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1040          *
1041          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1042          */
1043         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1044                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1045
1046         intel_dp->aux.name = name;
1047         intel_dp->aux.dev = dev->dev;
1048         intel_dp->aux.transfer = intel_dp_aux_transfer;
1049
1050         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1051                       connector->base.kdev->kobj.name);
1052
1053         ret = drm_dp_aux_register(&intel_dp->aux);
1054         if (ret < 0) {
1055                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1056                           name, ret);
1057                 return;
1058         }
1059
1060         ret = sysfs_create_link(&connector->base.kdev->kobj,
1061                                 &intel_dp->aux.ddc.dev.kobj,
1062                                 intel_dp->aux.ddc.dev.kobj.name);
1063         if (ret < 0) {
1064                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1065                 drm_dp_aux_unregister(&intel_dp->aux);
1066         }
1067 }
1068
1069 static void
1070 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1071 {
1072         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1073
1074         if (!intel_connector->mst_port)
1075                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1076                                   intel_dp->aux.ddc.dev.kobj.name);
1077         intel_connector_unregister(intel_connector);
1078 }
1079
1080 static void
1081 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1082 {
1083         u32 ctrl1;
1084
1085         pipe_config->ddi_pll_sel = SKL_DPLL0;
1086         pipe_config->dpll_hw_state.cfgcr1 = 0;
1087         pipe_config->dpll_hw_state.cfgcr2 = 0;
1088
1089         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1090         switch (link_clock / 2) {
1091         case 81000:
1092                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1093                                               SKL_DPLL0);
1094                 break;
1095         case 135000:
1096                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1097                                               SKL_DPLL0);
1098                 break;
1099         case 270000:
1100                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1101                                               SKL_DPLL0);
1102                 break;
1103         case 162000:
1104                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1105                                               SKL_DPLL0);
1106                 break;
1107         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1108         results in CDCLK change. Need to handle the change of CDCLK by
1109         disabling pipes and re-enabling them */
1110         case 108000:
1111                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1112                                               SKL_DPLL0);
1113                 break;
1114         case 216000:
1115                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1116                                               SKL_DPLL0);
1117                 break;
1118
1119         }
1120         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1121 }
1122
1123 static void
1124 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1125 {
1126         switch (link_bw) {
1127         case DP_LINK_BW_1_62:
1128                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1129                 break;
1130         case DP_LINK_BW_2_7:
1131                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1132                 break;
1133         case DP_LINK_BW_5_4:
1134                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1135                 break;
1136         }
1137 }
1138
1139 static int
1140 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1141 {
1142         if (intel_dp->num_sink_rates) {
1143                 *sink_rates = intel_dp->sink_rates;
1144                 return intel_dp->num_sink_rates;
1145         }
1146
1147         *sink_rates = default_rates;
1148
1149         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1150 }
1151
1152 static int
1153 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1154 {
1155         if (INTEL_INFO(dev)->gen >= 9) {
1156                 *source_rates = gen9_rates;
1157                 return ARRAY_SIZE(gen9_rates);
1158         } else if (IS_CHERRYVIEW(dev)) {
1159                 *source_rates = chv_rates;
1160                 return ARRAY_SIZE(chv_rates);
1161         }
1162
1163         *source_rates = default_rates;
1164
1165         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1166                 /* WaDisableHBR2:skl */
1167                 return (DP_LINK_BW_2_7 >> 3) + 1;
1168         else if (INTEL_INFO(dev)->gen >= 8 ||
1169             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1170                 return (DP_LINK_BW_5_4 >> 3) + 1;
1171         else
1172                 return (DP_LINK_BW_2_7 >> 3) + 1;
1173 }
1174
1175 static void
1176 intel_dp_set_clock(struct intel_encoder *encoder,
1177                    struct intel_crtc_state *pipe_config, int link_bw)
1178 {
1179         struct drm_device *dev = encoder->base.dev;
1180         const struct dp_link_dpll *divisor = NULL;
1181         int i, count = 0;
1182
1183         if (IS_G4X(dev)) {
1184                 divisor = gen4_dpll;
1185                 count = ARRAY_SIZE(gen4_dpll);
1186         } else if (HAS_PCH_SPLIT(dev)) {
1187                 divisor = pch_dpll;
1188                 count = ARRAY_SIZE(pch_dpll);
1189         } else if (IS_CHERRYVIEW(dev)) {
1190                 divisor = chv_dpll;
1191                 count = ARRAY_SIZE(chv_dpll);
1192         } else if (IS_VALLEYVIEW(dev)) {
1193                 divisor = vlv_dpll;
1194                 count = ARRAY_SIZE(vlv_dpll);
1195         }
1196
1197         if (divisor && count) {
1198                 for (i = 0; i < count; i++) {
1199                         if (link_bw == divisor[i].link_bw) {
1200                                 pipe_config->dpll = divisor[i].dpll;
1201                                 pipe_config->clock_set = true;
1202                                 break;
1203                         }
1204                 }
1205         }
1206 }
1207
1208 static int intersect_rates(const int *source_rates, int source_len,
1209                            const int *sink_rates, int sink_len,
1210                            int *common_rates)
1211 {
1212         int i = 0, j = 0, k = 0;
1213
1214         while (i < source_len && j < sink_len) {
1215                 if (source_rates[i] == sink_rates[j]) {
1216                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1217                                 return k;
1218                         common_rates[k] = source_rates[i];
1219                         ++k;
1220                         ++i;
1221                         ++j;
1222                 } else if (source_rates[i] < sink_rates[j]) {
1223                         ++i;
1224                 } else {
1225                         ++j;
1226                 }
1227         }
1228         return k;
1229 }
1230
1231 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1232                                  int *common_rates)
1233 {
1234         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1235         const int *source_rates, *sink_rates;
1236         int source_len, sink_len;
1237
1238         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1239         source_len = intel_dp_source_rates(dev, &source_rates);
1240
1241         return intersect_rates(source_rates, source_len,
1242                                sink_rates, sink_len,
1243                                common_rates);
1244 }
1245
1246 static void snprintf_int_array(char *str, size_t len,
1247                                const int *array, int nelem)
1248 {
1249         int i;
1250
1251         str[0] = '\0';
1252
1253         for (i = 0; i < nelem; i++) {
1254                 int r = snprintf(str, len, "%d,", array[i]);
1255                 if (r >= len)
1256                         return;
1257                 str += r;
1258                 len -= r;
1259         }
1260 }
1261
1262 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1263 {
1264         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1265         const int *source_rates, *sink_rates;
1266         int source_len, sink_len, common_len;
1267         int common_rates[DP_MAX_SUPPORTED_RATES];
1268         char str[128]; /* FIXME: too big for stack? */
1269
1270         if ((drm_debug & DRM_UT_KMS) == 0)
1271                 return;
1272
1273         source_len = intel_dp_source_rates(dev, &source_rates);
1274         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1275         DRM_DEBUG_KMS("source rates: %s\n", str);
1276
1277         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1278         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1279         DRM_DEBUG_KMS("sink rates: %s\n", str);
1280
1281         common_len = intel_dp_common_rates(intel_dp, common_rates);
1282         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1283         DRM_DEBUG_KMS("common rates: %s\n", str);
1284 }
1285
1286 static int rate_to_index(int find, const int *rates)
1287 {
1288         int i = 0;
1289
1290         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1291                 if (find == rates[i])
1292                         break;
1293
1294         return i;
1295 }
1296
1297 int
1298 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1299 {
1300         int rates[DP_MAX_SUPPORTED_RATES] = {};
1301         int len;
1302
1303         len = intel_dp_common_rates(intel_dp, rates);
1304         if (WARN_ON(len <= 0))
1305                 return 162000;
1306
1307         return rates[rate_to_index(0, rates) - 1];
1308 }
1309
1310 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1311 {
1312         return rate_to_index(rate, intel_dp->sink_rates);
1313 }
1314
1315 bool
1316 intel_dp_compute_config(struct intel_encoder *encoder,
1317                         struct intel_crtc_state *pipe_config)
1318 {
1319         struct drm_device *dev = encoder->base.dev;
1320         struct drm_i915_private *dev_priv = dev->dev_private;
1321         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1322         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1323         enum port port = dp_to_dig_port(intel_dp)->port;
1324         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1325         struct intel_connector *intel_connector = intel_dp->attached_connector;
1326         int lane_count, clock;
1327         int min_lane_count = 1;
1328         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1329         /* Conveniently, the link BW constants become indices with a shift...*/
1330         int min_clock = 0;
1331         int max_clock;
1332         int bpp, mode_rate;
1333         int link_avail, link_clock;
1334         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1335         int common_len;
1336
1337         common_len = intel_dp_common_rates(intel_dp, common_rates);
1338
1339         /* No common link rates between source and sink */
1340         WARN_ON(common_len <= 0);
1341
1342         max_clock = common_len - 1;
1343
1344         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1345                 pipe_config->has_pch_encoder = true;
1346
1347         pipe_config->has_dp_encoder = true;
1348         pipe_config->has_drrs = false;
1349         pipe_config->has_audio = intel_dp->has_audio;
1350
1351         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1352                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1353                                        adjusted_mode);
1354                 if (!HAS_PCH_SPLIT(dev))
1355                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1356                                                  intel_connector->panel.fitting_mode);
1357                 else
1358                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1359                                                 intel_connector->panel.fitting_mode);
1360         }
1361
1362         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1363                 return false;
1364
1365         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1366                       "max bw %d pixel clock %iKHz\n",
1367                       max_lane_count, common_rates[max_clock],
1368                       adjusted_mode->crtc_clock);
1369
1370         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1371          * bpc in between. */
1372         bpp = pipe_config->pipe_bpp;
1373         if (is_edp(intel_dp)) {
1374                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1375                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1376                                       dev_priv->vbt.edp_bpp);
1377                         bpp = dev_priv->vbt.edp_bpp;
1378                 }
1379
1380                 /*
1381                  * Use the maximum clock and number of lanes the eDP panel
1382                  * advertizes being capable of. The panels are generally
1383                  * designed to support only a single clock and lane
1384                  * configuration, and typically these values correspond to the
1385                  * native resolution of the panel.
1386                  */
1387                 min_lane_count = max_lane_count;
1388                 min_clock = max_clock;
1389         }
1390
1391         for (; bpp >= 6*3; bpp -= 2*3) {
1392                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1393                                                    bpp);
1394
1395                 for (clock = min_clock; clock <= max_clock; clock++) {
1396                         for (lane_count = min_lane_count;
1397                                 lane_count <= max_lane_count;
1398                                 lane_count <<= 1) {
1399
1400                                 link_clock = common_rates[clock];
1401                                 link_avail = intel_dp_max_data_rate(link_clock,
1402                                                                     lane_count);
1403
1404                                 if (mode_rate <= link_avail) {
1405                                         goto found;
1406                                 }
1407                         }
1408                 }
1409         }
1410
1411         return false;
1412
1413 found:
1414         if (intel_dp->color_range_auto) {
1415                 /*
1416                  * See:
1417                  * CEA-861-E - 5.1 Default Encoding Parameters
1418                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1419                  */
1420                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1421                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1422                 else
1423                         intel_dp->color_range = 0;
1424         }
1425
1426         if (intel_dp->color_range)
1427                 pipe_config->limited_color_range = true;
1428
1429         intel_dp->lane_count = lane_count;
1430
1431         if (intel_dp->num_sink_rates) {
1432                 intel_dp->link_bw = 0;
1433                 intel_dp->rate_select =
1434                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1435         } else {
1436                 intel_dp->link_bw =
1437                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1438                 intel_dp->rate_select = 0;
1439         }
1440
1441         pipe_config->pipe_bpp = bpp;
1442         pipe_config->port_clock = common_rates[clock];
1443
1444         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1445                       intel_dp->link_bw, intel_dp->lane_count,
1446                       pipe_config->port_clock, bpp);
1447         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1448                       mode_rate, link_avail);
1449
1450         intel_link_compute_m_n(bpp, lane_count,
1451                                adjusted_mode->crtc_clock,
1452                                pipe_config->port_clock,
1453                                &pipe_config->dp_m_n);
1454
1455         if (intel_connector->panel.downclock_mode != NULL &&
1456                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1457                         pipe_config->has_drrs = true;
1458                         intel_link_compute_m_n(bpp, lane_count,
1459                                 intel_connector->panel.downclock_mode->clock,
1460                                 pipe_config->port_clock,
1461                                 &pipe_config->dp_m2_n2);
1462         }
1463
1464         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1465                 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1466         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1467                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1468         else
1469                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1470
1471         return true;
1472 }
1473
1474 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1475 {
1476         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1477         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1478         struct drm_device *dev = crtc->base.dev;
1479         struct drm_i915_private *dev_priv = dev->dev_private;
1480         u32 dpa_ctl;
1481
1482         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1483                       crtc->config->port_clock);
1484         dpa_ctl = I915_READ(DP_A);
1485         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1486
1487         if (crtc->config->port_clock == 162000) {
1488                 /* For a long time we've carried around a ILK-DevA w/a for the
1489                  * 160MHz clock. If we're really unlucky, it's still required.
1490                  */
1491                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1492                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1493                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1494         } else {
1495                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1496                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1497         }
1498
1499         I915_WRITE(DP_A, dpa_ctl);
1500
1501         POSTING_READ(DP_A);
1502         udelay(500);
1503 }
1504
1505 static void intel_dp_prepare(struct intel_encoder *encoder)
1506 {
1507         struct drm_device *dev = encoder->base.dev;
1508         struct drm_i915_private *dev_priv = dev->dev_private;
1509         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1510         enum port port = dp_to_dig_port(intel_dp)->port;
1511         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1512         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1513
1514         /*
1515          * There are four kinds of DP registers:
1516          *
1517          *      IBX PCH
1518          *      SNB CPU
1519          *      IVB CPU
1520          *      CPT PCH
1521          *
1522          * IBX PCH and CPU are the same for almost everything,
1523          * except that the CPU DP PLL is configured in this
1524          * register
1525          *
1526          * CPT PCH is quite different, having many bits moved
1527          * to the TRANS_DP_CTL register instead. That
1528          * configuration happens (oddly) in ironlake_pch_enable
1529          */
1530
1531         /* Preserve the BIOS-computed detected bit. This is
1532          * supposed to be read-only.
1533          */
1534         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1535
1536         /* Handle DP bits in common between all three register formats */
1537         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1538         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1539
1540         if (crtc->config->has_audio)
1541                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1542
1543         /* Split out the IBX/CPU vs CPT settings */
1544
1545         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1546                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1547                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1548                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1549                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1550                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1551
1552                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1553                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1554
1555                 intel_dp->DP |= crtc->pipe << 29;
1556         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1557                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1558                         intel_dp->DP |= intel_dp->color_range;
1559
1560                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1561                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1562                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1563                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1564                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1565
1566                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1567                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1568
1569                 if (!IS_CHERRYVIEW(dev)) {
1570                         if (crtc->pipe == 1)
1571                                 intel_dp->DP |= DP_PIPEB_SELECT;
1572                 } else {
1573                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1574                 }
1575         } else {
1576                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1577         }
1578 }
1579
1580 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1581 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1582
1583 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1584 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1585
1586 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1587 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1588
1589 static void wait_panel_status(struct intel_dp *intel_dp,
1590                                        u32 mask,
1591                                        u32 value)
1592 {
1593         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1594         struct drm_i915_private *dev_priv = dev->dev_private;
1595         u32 pp_stat_reg, pp_ctrl_reg;
1596
1597         lockdep_assert_held(&dev_priv->pps_mutex);
1598
1599         pp_stat_reg = _pp_stat_reg(intel_dp);
1600         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1601
1602         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1603                         mask, value,
1604                         I915_READ(pp_stat_reg),
1605                         I915_READ(pp_ctrl_reg));
1606
1607         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1608                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1609                                 I915_READ(pp_stat_reg),
1610                                 I915_READ(pp_ctrl_reg));
1611         }
1612
1613         DRM_DEBUG_KMS("Wait complete\n");
1614 }
1615
1616 static void wait_panel_on(struct intel_dp *intel_dp)
1617 {
1618         DRM_DEBUG_KMS("Wait for panel power on\n");
1619         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1620 }
1621
1622 static void wait_panel_off(struct intel_dp *intel_dp)
1623 {
1624         DRM_DEBUG_KMS("Wait for panel power off time\n");
1625         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1626 }
1627
1628 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1629 {
1630         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1631
1632         /* When we disable the VDD override bit last we have to do the manual
1633          * wait. */
1634         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1635                                        intel_dp->panel_power_cycle_delay);
1636
1637         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1638 }
1639
1640 static void wait_backlight_on(struct intel_dp *intel_dp)
1641 {
1642         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1643                                        intel_dp->backlight_on_delay);
1644 }
1645
1646 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1647 {
1648         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1649                                        intel_dp->backlight_off_delay);
1650 }
1651
1652 /* Read the current pp_control value, unlocking the register if it
1653  * is locked
1654  */
1655
1656 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1657 {
1658         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1659         struct drm_i915_private *dev_priv = dev->dev_private;
1660         u32 control;
1661
1662         lockdep_assert_held(&dev_priv->pps_mutex);
1663
1664         control = I915_READ(_pp_ctrl_reg(intel_dp));
1665         control &= ~PANEL_UNLOCK_MASK;
1666         control |= PANEL_UNLOCK_REGS;
1667         return control;
1668 }
1669
1670 /*
1671  * Must be paired with edp_panel_vdd_off().
1672  * Must hold pps_mutex around the whole on/off sequence.
1673  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1674  */
1675 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1676 {
1677         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1678         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1679         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1680         struct drm_i915_private *dev_priv = dev->dev_private;
1681         enum intel_display_power_domain power_domain;
1682         u32 pp;
1683         u32 pp_stat_reg, pp_ctrl_reg;
1684         bool need_to_disable = !intel_dp->want_panel_vdd;
1685
1686         lockdep_assert_held(&dev_priv->pps_mutex);
1687
1688         if (!is_edp(intel_dp))
1689                 return false;
1690
1691         cancel_delayed_work(&intel_dp->panel_vdd_work);
1692         intel_dp->want_panel_vdd = true;
1693
1694         if (edp_have_panel_vdd(intel_dp))
1695                 return need_to_disable;
1696
1697         power_domain = intel_display_port_power_domain(intel_encoder);
1698         intel_display_power_get(dev_priv, power_domain);
1699
1700         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1701                       port_name(intel_dig_port->port));
1702
1703         if (!edp_have_panel_power(intel_dp))
1704                 wait_panel_power_cycle(intel_dp);
1705
1706         pp = ironlake_get_pp_control(intel_dp);
1707         pp |= EDP_FORCE_VDD;
1708
1709         pp_stat_reg = _pp_stat_reg(intel_dp);
1710         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1711
1712         I915_WRITE(pp_ctrl_reg, pp);
1713         POSTING_READ(pp_ctrl_reg);
1714         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1715                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1716         /*
1717          * If the panel wasn't on, delay before accessing aux channel
1718          */
1719         if (!edp_have_panel_power(intel_dp)) {
1720                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1721                               port_name(intel_dig_port->port));
1722                 msleep(intel_dp->panel_power_up_delay);
1723         }
1724
1725         return need_to_disable;
1726 }
1727
1728 /*
1729  * Must be paired with intel_edp_panel_vdd_off() or
1730  * intel_edp_panel_off().
1731  * Nested calls to these functions are not allowed since
1732  * we drop the lock. Caller must use some higher level
1733  * locking to prevent nested calls from other threads.
1734  */
1735 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1736 {
1737         bool vdd;
1738
1739         if (!is_edp(intel_dp))
1740                 return;
1741
1742         pps_lock(intel_dp);
1743         vdd = edp_panel_vdd_on(intel_dp);
1744         pps_unlock(intel_dp);
1745
1746         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1747              port_name(dp_to_dig_port(intel_dp)->port));
1748 }
1749
1750 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1751 {
1752         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1753         struct drm_i915_private *dev_priv = dev->dev_private;
1754         struct intel_digital_port *intel_dig_port =
1755                 dp_to_dig_port(intel_dp);
1756         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1757         enum intel_display_power_domain power_domain;
1758         u32 pp;
1759         u32 pp_stat_reg, pp_ctrl_reg;
1760
1761         lockdep_assert_held(&dev_priv->pps_mutex);
1762
1763         WARN_ON(intel_dp->want_panel_vdd);
1764
1765         if (!edp_have_panel_vdd(intel_dp))
1766                 return;
1767
1768         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1769                       port_name(intel_dig_port->port));
1770
1771         pp = ironlake_get_pp_control(intel_dp);
1772         pp &= ~EDP_FORCE_VDD;
1773
1774         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1775         pp_stat_reg = _pp_stat_reg(intel_dp);
1776
1777         I915_WRITE(pp_ctrl_reg, pp);
1778         POSTING_READ(pp_ctrl_reg);
1779
1780         /* Make sure sequencer is idle before allowing subsequent activity */
1781         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1782         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1783
1784         if ((pp & POWER_TARGET_ON) == 0)
1785                 intel_dp->last_power_cycle = jiffies;
1786
1787         power_domain = intel_display_port_power_domain(intel_encoder);
1788         intel_display_power_put(dev_priv, power_domain);
1789 }
1790
1791 static void edp_panel_vdd_work(struct work_struct *__work)
1792 {
1793         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1794                                                  struct intel_dp, panel_vdd_work);
1795
1796         pps_lock(intel_dp);
1797         if (!intel_dp->want_panel_vdd)
1798                 edp_panel_vdd_off_sync(intel_dp);
1799         pps_unlock(intel_dp);
1800 }
1801
1802 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1803 {
1804         unsigned long delay;
1805
1806         /*
1807          * Queue the timer to fire a long time from now (relative to the power
1808          * down delay) to keep the panel power up across a sequence of
1809          * operations.
1810          */
1811         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1812         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1813 }
1814
1815 /*
1816  * Must be paired with edp_panel_vdd_on().
1817  * Must hold pps_mutex around the whole on/off sequence.
1818  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1819  */
1820 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1821 {
1822         struct drm_i915_private *dev_priv =
1823                 intel_dp_to_dev(intel_dp)->dev_private;
1824
1825         lockdep_assert_held(&dev_priv->pps_mutex);
1826
1827         if (!is_edp(intel_dp))
1828                 return;
1829
1830         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1831              port_name(dp_to_dig_port(intel_dp)->port));
1832
1833         intel_dp->want_panel_vdd = false;
1834
1835         if (sync)
1836                 edp_panel_vdd_off_sync(intel_dp);
1837         else
1838                 edp_panel_vdd_schedule_off(intel_dp);
1839 }
1840
1841 static void edp_panel_on(struct intel_dp *intel_dp)
1842 {
1843         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1844         struct drm_i915_private *dev_priv = dev->dev_private;
1845         u32 pp;
1846         u32 pp_ctrl_reg;
1847
1848         lockdep_assert_held(&dev_priv->pps_mutex);
1849
1850         if (!is_edp(intel_dp))
1851                 return;
1852
1853         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1854                       port_name(dp_to_dig_port(intel_dp)->port));
1855
1856         if (WARN(edp_have_panel_power(intel_dp),
1857                  "eDP port %c panel power already on\n",
1858                  port_name(dp_to_dig_port(intel_dp)->port)))
1859                 return;
1860
1861         wait_panel_power_cycle(intel_dp);
1862
1863         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1864         pp = ironlake_get_pp_control(intel_dp);
1865         if (IS_GEN5(dev)) {
1866                 /* ILK workaround: disable reset around power sequence */
1867                 pp &= ~PANEL_POWER_RESET;
1868                 I915_WRITE(pp_ctrl_reg, pp);
1869                 POSTING_READ(pp_ctrl_reg);
1870         }
1871
1872         pp |= POWER_TARGET_ON;
1873         if (!IS_GEN5(dev))
1874                 pp |= PANEL_POWER_RESET;
1875
1876         I915_WRITE(pp_ctrl_reg, pp);
1877         POSTING_READ(pp_ctrl_reg);
1878
1879         wait_panel_on(intel_dp);
1880         intel_dp->last_power_on = jiffies;
1881
1882         if (IS_GEN5(dev)) {
1883                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1884                 I915_WRITE(pp_ctrl_reg, pp);
1885                 POSTING_READ(pp_ctrl_reg);
1886         }
1887 }
1888
1889 void intel_edp_panel_on(struct intel_dp *intel_dp)
1890 {
1891         if (!is_edp(intel_dp))
1892                 return;
1893
1894         pps_lock(intel_dp);
1895         edp_panel_on(intel_dp);
1896         pps_unlock(intel_dp);
1897 }
1898
1899
1900 static void edp_panel_off(struct intel_dp *intel_dp)
1901 {
1902         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1903         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1904         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1905         struct drm_i915_private *dev_priv = dev->dev_private;
1906         enum intel_display_power_domain power_domain;
1907         u32 pp;
1908         u32 pp_ctrl_reg;
1909
1910         lockdep_assert_held(&dev_priv->pps_mutex);
1911
1912         if (!is_edp(intel_dp))
1913                 return;
1914
1915         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1916                       port_name(dp_to_dig_port(intel_dp)->port));
1917
1918         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1919              port_name(dp_to_dig_port(intel_dp)->port));
1920
1921         pp = ironlake_get_pp_control(intel_dp);
1922         /* We need to switch off panel power _and_ force vdd, for otherwise some
1923          * panels get very unhappy and cease to work. */
1924         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1925                 EDP_BLC_ENABLE);
1926
1927         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1928
1929         intel_dp->want_panel_vdd = false;
1930
1931         I915_WRITE(pp_ctrl_reg, pp);
1932         POSTING_READ(pp_ctrl_reg);
1933
1934         intel_dp->last_power_cycle = jiffies;
1935         wait_panel_off(intel_dp);
1936
1937         /* We got a reference when we enabled the VDD. */
1938         power_domain = intel_display_port_power_domain(intel_encoder);
1939         intel_display_power_put(dev_priv, power_domain);
1940 }
1941
1942 void intel_edp_panel_off(struct intel_dp *intel_dp)
1943 {
1944         if (!is_edp(intel_dp))
1945                 return;
1946
1947         pps_lock(intel_dp);
1948         edp_panel_off(intel_dp);
1949         pps_unlock(intel_dp);
1950 }
1951
1952 /* Enable backlight in the panel power control. */
1953 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1954 {
1955         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1956         struct drm_device *dev = intel_dig_port->base.base.dev;
1957         struct drm_i915_private *dev_priv = dev->dev_private;
1958         u32 pp;
1959         u32 pp_ctrl_reg;
1960
1961         /*
1962          * If we enable the backlight right away following a panel power
1963          * on, we may see slight flicker as the panel syncs with the eDP
1964          * link.  So delay a bit to make sure the image is solid before
1965          * allowing it to appear.
1966          */
1967         wait_backlight_on(intel_dp);
1968
1969         pps_lock(intel_dp);
1970
1971         pp = ironlake_get_pp_control(intel_dp);
1972         pp |= EDP_BLC_ENABLE;
1973
1974         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1975
1976         I915_WRITE(pp_ctrl_reg, pp);
1977         POSTING_READ(pp_ctrl_reg);
1978
1979         pps_unlock(intel_dp);
1980 }
1981
1982 /* Enable backlight PWM and backlight PP control. */
1983 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1984 {
1985         if (!is_edp(intel_dp))
1986                 return;
1987
1988         DRM_DEBUG_KMS("\n");
1989
1990         intel_panel_enable_backlight(intel_dp->attached_connector);
1991         _intel_edp_backlight_on(intel_dp);
1992 }
1993
1994 /* Disable backlight in the panel power control. */
1995 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1996 {
1997         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1998         struct drm_i915_private *dev_priv = dev->dev_private;
1999         u32 pp;
2000         u32 pp_ctrl_reg;
2001
2002         if (!is_edp(intel_dp))
2003                 return;
2004
2005         pps_lock(intel_dp);
2006
2007         pp = ironlake_get_pp_control(intel_dp);
2008         pp &= ~EDP_BLC_ENABLE;
2009
2010         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2011
2012         I915_WRITE(pp_ctrl_reg, pp);
2013         POSTING_READ(pp_ctrl_reg);
2014
2015         pps_unlock(intel_dp);
2016
2017         intel_dp->last_backlight_off = jiffies;
2018         edp_wait_backlight_off(intel_dp);
2019 }
2020
2021 /* Disable backlight PP control and backlight PWM. */
2022 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2023 {
2024         if (!is_edp(intel_dp))
2025                 return;
2026
2027         DRM_DEBUG_KMS("\n");
2028
2029         _intel_edp_backlight_off(intel_dp);
2030         intel_panel_disable_backlight(intel_dp->attached_connector);
2031 }
2032
2033 /*
2034  * Hook for controlling the panel power control backlight through the bl_power
2035  * sysfs attribute. Take care to handle multiple calls.
2036  */
2037 static void intel_edp_backlight_power(struct intel_connector *connector,
2038                                       bool enable)
2039 {
2040         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2041         bool is_enabled;
2042
2043         pps_lock(intel_dp);
2044         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2045         pps_unlock(intel_dp);
2046
2047         if (is_enabled == enable)
2048                 return;
2049
2050         DRM_DEBUG_KMS("panel power control backlight %s\n",
2051                       enable ? "enable" : "disable");
2052
2053         if (enable)
2054                 _intel_edp_backlight_on(intel_dp);
2055         else
2056                 _intel_edp_backlight_off(intel_dp);
2057 }
2058
2059 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2060 {
2061         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2062         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2063         struct drm_device *dev = crtc->dev;
2064         struct drm_i915_private *dev_priv = dev->dev_private;
2065         u32 dpa_ctl;
2066
2067         assert_pipe_disabled(dev_priv,
2068                              to_intel_crtc(crtc)->pipe);
2069
2070         DRM_DEBUG_KMS("\n");
2071         dpa_ctl = I915_READ(DP_A);
2072         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2073         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2074
2075         /* We don't adjust intel_dp->DP while tearing down the link, to
2076          * facilitate link retraining (e.g. after hotplug). Hence clear all
2077          * enable bits here to ensure that we don't enable too much. */
2078         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2079         intel_dp->DP |= DP_PLL_ENABLE;
2080         I915_WRITE(DP_A, intel_dp->DP);
2081         POSTING_READ(DP_A);
2082         udelay(200);
2083 }
2084
2085 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2086 {
2087         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2088         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2089         struct drm_device *dev = crtc->dev;
2090         struct drm_i915_private *dev_priv = dev->dev_private;
2091         u32 dpa_ctl;
2092
2093         assert_pipe_disabled(dev_priv,
2094                              to_intel_crtc(crtc)->pipe);
2095
2096         dpa_ctl = I915_READ(DP_A);
2097         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2098              "dp pll off, should be on\n");
2099         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2100
2101         /* We can't rely on the value tracked for the DP register in
2102          * intel_dp->DP because link_down must not change that (otherwise link
2103          * re-training will fail. */
2104         dpa_ctl &= ~DP_PLL_ENABLE;
2105         I915_WRITE(DP_A, dpa_ctl);
2106         POSTING_READ(DP_A);
2107         udelay(200);
2108 }
2109
2110 /* If the sink supports it, try to set the power state appropriately */
2111 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2112 {
2113         int ret, i;
2114
2115         /* Should have a valid DPCD by this point */
2116         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2117                 return;
2118
2119         if (mode != DRM_MODE_DPMS_ON) {
2120                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2121                                          DP_SET_POWER_D3);
2122         } else {
2123                 /*
2124                  * When turning on, we need to retry for 1ms to give the sink
2125                  * time to wake up.
2126                  */
2127                 for (i = 0; i < 3; i++) {
2128                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2129                                                  DP_SET_POWER_D0);
2130                         if (ret == 1)
2131                                 break;
2132                         msleep(1);
2133                 }
2134         }
2135
2136         if (ret != 1)
2137                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2138                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2139 }
2140
2141 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2142                                   enum pipe *pipe)
2143 {
2144         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2145         enum port port = dp_to_dig_port(intel_dp)->port;
2146         struct drm_device *dev = encoder->base.dev;
2147         struct drm_i915_private *dev_priv = dev->dev_private;
2148         enum intel_display_power_domain power_domain;
2149         u32 tmp;
2150
2151         power_domain = intel_display_port_power_domain(encoder);
2152         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2153                 return false;
2154
2155         tmp = I915_READ(intel_dp->output_reg);
2156
2157         if (!(tmp & DP_PORT_EN))
2158                 return false;
2159
2160         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2161                 *pipe = PORT_TO_PIPE_CPT(tmp);
2162         } else if (IS_CHERRYVIEW(dev)) {
2163                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2164         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2165                 *pipe = PORT_TO_PIPE(tmp);
2166         } else {
2167                 u32 trans_sel;
2168                 u32 trans_dp;
2169                 int i;
2170
2171                 switch (intel_dp->output_reg) {
2172                 case PCH_DP_B:
2173                         trans_sel = TRANS_DP_PORT_SEL_B;
2174                         break;
2175                 case PCH_DP_C:
2176                         trans_sel = TRANS_DP_PORT_SEL_C;
2177                         break;
2178                 case PCH_DP_D:
2179                         trans_sel = TRANS_DP_PORT_SEL_D;
2180                         break;
2181                 default:
2182                         return true;
2183                 }
2184
2185                 for_each_pipe(dev_priv, i) {
2186                         trans_dp = I915_READ(TRANS_DP_CTL(i));
2187                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2188                                 *pipe = i;
2189                                 return true;
2190                         }
2191                 }
2192
2193                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2194                               intel_dp->output_reg);
2195         }
2196
2197         return true;
2198 }
2199
2200 static void intel_dp_get_config(struct intel_encoder *encoder,
2201                                 struct intel_crtc_state *pipe_config)
2202 {
2203         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2204         u32 tmp, flags = 0;
2205         struct drm_device *dev = encoder->base.dev;
2206         struct drm_i915_private *dev_priv = dev->dev_private;
2207         enum port port = dp_to_dig_port(intel_dp)->port;
2208         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2209         int dotclock;
2210
2211         tmp = I915_READ(intel_dp->output_reg);
2212         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2213                 pipe_config->has_audio = true;
2214
2215         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2216                 if (tmp & DP_SYNC_HS_HIGH)
2217                         flags |= DRM_MODE_FLAG_PHSYNC;
2218                 else
2219                         flags |= DRM_MODE_FLAG_NHSYNC;
2220
2221                 if (tmp & DP_SYNC_VS_HIGH)
2222                         flags |= DRM_MODE_FLAG_PVSYNC;
2223                 else
2224                         flags |= DRM_MODE_FLAG_NVSYNC;
2225         } else {
2226                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2227                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2228                         flags |= DRM_MODE_FLAG_PHSYNC;
2229                 else
2230                         flags |= DRM_MODE_FLAG_NHSYNC;
2231
2232                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2233                         flags |= DRM_MODE_FLAG_PVSYNC;
2234                 else
2235                         flags |= DRM_MODE_FLAG_NVSYNC;
2236         }
2237
2238         pipe_config->base.adjusted_mode.flags |= flags;
2239
2240         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2241             tmp & DP_COLOR_RANGE_16_235)
2242                 pipe_config->limited_color_range = true;
2243
2244         pipe_config->has_dp_encoder = true;
2245
2246         intel_dp_get_m_n(crtc, pipe_config);
2247
2248         if (port == PORT_A) {
2249                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2250                         pipe_config->port_clock = 162000;
2251                 else
2252                         pipe_config->port_clock = 270000;
2253         }
2254
2255         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2256                                             &pipe_config->dp_m_n);
2257
2258         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2259                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2260
2261         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2262
2263         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2264             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2265                 /*
2266                  * This is a big fat ugly hack.
2267                  *
2268                  * Some machines in UEFI boot mode provide us a VBT that has 18
2269                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2270                  * unknown we fail to light up. Yet the same BIOS boots up with
2271                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2272                  * max, not what it tells us to use.
2273                  *
2274                  * Note: This will still be broken if the eDP panel is not lit
2275                  * up by the BIOS, and thus we can't get the mode at module
2276                  * load.
2277                  */
2278                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2279                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2280                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2281         }
2282 }
2283
2284 static void intel_disable_dp(struct intel_encoder *encoder)
2285 {
2286         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2287         struct drm_device *dev = encoder->base.dev;
2288         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2289
2290         if (crtc->config->has_audio)
2291                 intel_audio_codec_disable(encoder);
2292
2293         if (HAS_PSR(dev) && !HAS_DDI(dev))
2294                 intel_psr_disable(intel_dp);
2295
2296         /* Make sure the panel is off before trying to change the mode. But also
2297          * ensure that we have vdd while we switch off the panel. */
2298         intel_edp_panel_vdd_on(intel_dp);
2299         intel_edp_backlight_off(intel_dp);
2300         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2301         intel_edp_panel_off(intel_dp);
2302
2303         /* disable the port before the pipe on g4x */
2304         if (INTEL_INFO(dev)->gen < 5)
2305                 intel_dp_link_down(intel_dp);
2306 }
2307
2308 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2309 {
2310         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2311         enum port port = dp_to_dig_port(intel_dp)->port;
2312
2313         intel_dp_link_down(intel_dp);
2314         if (port == PORT_A)
2315                 ironlake_edp_pll_off(intel_dp);
2316 }
2317
2318 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2319 {
2320         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2321
2322         intel_dp_link_down(intel_dp);
2323 }
2324
2325 static void chv_post_disable_dp(struct intel_encoder *encoder)
2326 {
2327         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2328         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2329         struct drm_device *dev = encoder->base.dev;
2330         struct drm_i915_private *dev_priv = dev->dev_private;
2331         struct intel_crtc *intel_crtc =
2332                 to_intel_crtc(encoder->base.crtc);
2333         enum dpio_channel ch = vlv_dport_to_channel(dport);
2334         enum pipe pipe = intel_crtc->pipe;
2335         u32 val;
2336
2337         intel_dp_link_down(intel_dp);
2338
2339         mutex_lock(&dev_priv->dpio_lock);
2340
2341         /* Propagate soft reset to data lane reset */
2342         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2343         val |= CHV_PCS_REQ_SOFTRESET_EN;
2344         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2345
2346         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2347         val |= CHV_PCS_REQ_SOFTRESET_EN;
2348         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2349
2350         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2351         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2352         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2353
2354         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2355         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2356         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2357
2358         mutex_unlock(&dev_priv->dpio_lock);
2359 }
2360
2361 static void
2362 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2363                          uint32_t *DP,
2364                          uint8_t dp_train_pat)
2365 {
2366         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2367         struct drm_device *dev = intel_dig_port->base.base.dev;
2368         struct drm_i915_private *dev_priv = dev->dev_private;
2369         enum port port = intel_dig_port->port;
2370
2371         if (HAS_DDI(dev)) {
2372                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2373
2374                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2375                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2376                 else
2377                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2378
2379                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2380                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2381                 case DP_TRAINING_PATTERN_DISABLE:
2382                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2383
2384                         break;
2385                 case DP_TRAINING_PATTERN_1:
2386                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2387                         break;
2388                 case DP_TRAINING_PATTERN_2:
2389                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2390                         break;
2391                 case DP_TRAINING_PATTERN_3:
2392                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2393                         break;
2394                 }
2395                 I915_WRITE(DP_TP_CTL(port), temp);
2396
2397         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2398                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2399
2400                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2401                 case DP_TRAINING_PATTERN_DISABLE:
2402                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2403                         break;
2404                 case DP_TRAINING_PATTERN_1:
2405                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2406                         break;
2407                 case DP_TRAINING_PATTERN_2:
2408                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2409                         break;
2410                 case DP_TRAINING_PATTERN_3:
2411                         DRM_ERROR("DP training pattern 3 not supported\n");
2412                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2413                         break;
2414                 }
2415
2416         } else {
2417                 if (IS_CHERRYVIEW(dev))
2418                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2419                 else
2420                         *DP &= ~DP_LINK_TRAIN_MASK;
2421
2422                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2423                 case DP_TRAINING_PATTERN_DISABLE:
2424                         *DP |= DP_LINK_TRAIN_OFF;
2425                         break;
2426                 case DP_TRAINING_PATTERN_1:
2427                         *DP |= DP_LINK_TRAIN_PAT_1;
2428                         break;
2429                 case DP_TRAINING_PATTERN_2:
2430                         *DP |= DP_LINK_TRAIN_PAT_2;
2431                         break;
2432                 case DP_TRAINING_PATTERN_3:
2433                         if (IS_CHERRYVIEW(dev)) {
2434                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2435                         } else {
2436                                 DRM_ERROR("DP training pattern 3 not supported\n");
2437                                 *DP |= DP_LINK_TRAIN_PAT_2;
2438                         }
2439                         break;
2440                 }
2441         }
2442 }
2443
2444 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2445 {
2446         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2447         struct drm_i915_private *dev_priv = dev->dev_private;
2448
2449         /* enable with pattern 1 (as per spec) */
2450         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2451                                  DP_TRAINING_PATTERN_1);
2452
2453         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2454         POSTING_READ(intel_dp->output_reg);
2455
2456         /*
2457          * Magic for VLV/CHV. We _must_ first set up the register
2458          * without actually enabling the port, and then do another
2459          * write to enable the port. Otherwise link training will
2460          * fail when the power sequencer is freshly used for this port.
2461          */
2462         intel_dp->DP |= DP_PORT_EN;
2463
2464         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2465         POSTING_READ(intel_dp->output_reg);
2466 }
2467
2468 static void intel_enable_dp(struct intel_encoder *encoder)
2469 {
2470         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2471         struct drm_device *dev = encoder->base.dev;
2472         struct drm_i915_private *dev_priv = dev->dev_private;
2473         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2474         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2475
2476         if (WARN_ON(dp_reg & DP_PORT_EN))
2477                 return;
2478
2479         pps_lock(intel_dp);
2480
2481         if (IS_VALLEYVIEW(dev))
2482                 vlv_init_panel_power_sequencer(intel_dp);
2483
2484         intel_dp_enable_port(intel_dp);
2485
2486         edp_panel_vdd_on(intel_dp);
2487         edp_panel_on(intel_dp);
2488         edp_panel_vdd_off(intel_dp, true);
2489
2490         pps_unlock(intel_dp);
2491
2492         if (IS_VALLEYVIEW(dev))
2493                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2494
2495         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2496         intel_dp_start_link_train(intel_dp);
2497         intel_dp_complete_link_train(intel_dp);
2498         intel_dp_stop_link_train(intel_dp);
2499
2500         if (crtc->config->has_audio) {
2501                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2502                                  pipe_name(crtc->pipe));
2503                 intel_audio_codec_enable(encoder);
2504         }
2505 }
2506
2507 static void g4x_enable_dp(struct intel_encoder *encoder)
2508 {
2509         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2510
2511         intel_enable_dp(encoder);
2512         intel_edp_backlight_on(intel_dp);
2513 }
2514
2515 static void vlv_enable_dp(struct intel_encoder *encoder)
2516 {
2517         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2518
2519         intel_edp_backlight_on(intel_dp);
2520         intel_psr_enable(intel_dp);
2521 }
2522
2523 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2524 {
2525         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2526         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2527
2528         intel_dp_prepare(encoder);
2529
2530         /* Only ilk+ has port A */
2531         if (dport->port == PORT_A) {
2532                 ironlake_set_pll_cpu_edp(intel_dp);
2533                 ironlake_edp_pll_on(intel_dp);
2534         }
2535 }
2536
2537 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2538 {
2539         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2540         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2541         enum pipe pipe = intel_dp->pps_pipe;
2542         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2543
2544         edp_panel_vdd_off_sync(intel_dp);
2545
2546         /*
2547          * VLV seems to get confused when multiple power seqeuencers
2548          * have the same port selected (even if only one has power/vdd
2549          * enabled). The failure manifests as vlv_wait_port_ready() failing
2550          * CHV on the other hand doesn't seem to mind having the same port
2551          * selected in multiple power seqeuencers, but let's clear the
2552          * port select always when logically disconnecting a power sequencer
2553          * from a port.
2554          */
2555         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2556                       pipe_name(pipe), port_name(intel_dig_port->port));
2557         I915_WRITE(pp_on_reg, 0);
2558         POSTING_READ(pp_on_reg);
2559
2560         intel_dp->pps_pipe = INVALID_PIPE;
2561 }
2562
2563 static void vlv_steal_power_sequencer(struct drm_device *dev,
2564                                       enum pipe pipe)
2565 {
2566         struct drm_i915_private *dev_priv = dev->dev_private;
2567         struct intel_encoder *encoder;
2568
2569         lockdep_assert_held(&dev_priv->pps_mutex);
2570
2571         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2572                 return;
2573
2574         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2575                             base.head) {
2576                 struct intel_dp *intel_dp;
2577                 enum port port;
2578
2579                 if (encoder->type != INTEL_OUTPUT_EDP)
2580                         continue;
2581
2582                 intel_dp = enc_to_intel_dp(&encoder->base);
2583                 port = dp_to_dig_port(intel_dp)->port;
2584
2585                 if (intel_dp->pps_pipe != pipe)
2586                         continue;
2587
2588                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2589                               pipe_name(pipe), port_name(port));
2590
2591                 WARN(encoder->connectors_active,
2592                      "stealing pipe %c power sequencer from active eDP port %c\n",
2593                      pipe_name(pipe), port_name(port));
2594
2595                 /* make sure vdd is off before we steal it */
2596                 vlv_detach_power_sequencer(intel_dp);
2597         }
2598 }
2599
2600 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2601 {
2602         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2603         struct intel_encoder *encoder = &intel_dig_port->base;
2604         struct drm_device *dev = encoder->base.dev;
2605         struct drm_i915_private *dev_priv = dev->dev_private;
2606         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2607
2608         lockdep_assert_held(&dev_priv->pps_mutex);
2609
2610         if (!is_edp(intel_dp))
2611                 return;
2612
2613         if (intel_dp->pps_pipe == crtc->pipe)
2614                 return;
2615
2616         /*
2617          * If another power sequencer was being used on this
2618          * port previously make sure to turn off vdd there while
2619          * we still have control of it.
2620          */
2621         if (intel_dp->pps_pipe != INVALID_PIPE)
2622                 vlv_detach_power_sequencer(intel_dp);
2623
2624         /*
2625          * We may be stealing the power
2626          * sequencer from another port.
2627          */
2628         vlv_steal_power_sequencer(dev, crtc->pipe);
2629
2630         /* now it's all ours */
2631         intel_dp->pps_pipe = crtc->pipe;
2632
2633         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2634                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2635
2636         /* init power sequencer on this pipe and port */
2637         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2638         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2639 }
2640
2641 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2642 {
2643         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2644         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2645         struct drm_device *dev = encoder->base.dev;
2646         struct drm_i915_private *dev_priv = dev->dev_private;
2647         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2648         enum dpio_channel port = vlv_dport_to_channel(dport);
2649         int pipe = intel_crtc->pipe;
2650         u32 val;
2651
2652         mutex_lock(&dev_priv->dpio_lock);
2653
2654         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2655         val = 0;
2656         if (pipe)
2657                 val |= (1<<21);
2658         else
2659                 val &= ~(1<<21);
2660         val |= 0x001000c4;
2661         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2662         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2663         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2664
2665         mutex_unlock(&dev_priv->dpio_lock);
2666
2667         intel_enable_dp(encoder);
2668 }
2669
2670 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2671 {
2672         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2673         struct drm_device *dev = encoder->base.dev;
2674         struct drm_i915_private *dev_priv = dev->dev_private;
2675         struct intel_crtc *intel_crtc =
2676                 to_intel_crtc(encoder->base.crtc);
2677         enum dpio_channel port = vlv_dport_to_channel(dport);
2678         int pipe = intel_crtc->pipe;
2679
2680         intel_dp_prepare(encoder);
2681
2682         /* Program Tx lane resets to default */
2683         mutex_lock(&dev_priv->dpio_lock);
2684         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2685                          DPIO_PCS_TX_LANE2_RESET |
2686                          DPIO_PCS_TX_LANE1_RESET);
2687         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2688                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2689                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2690                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2691                                  DPIO_PCS_CLK_SOFT_RESET);
2692
2693         /* Fix up inter-pair skew failure */
2694         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2695         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2696         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2697         mutex_unlock(&dev_priv->dpio_lock);
2698 }
2699
2700 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2701 {
2702         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2703         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2704         struct drm_device *dev = encoder->base.dev;
2705         struct drm_i915_private *dev_priv = dev->dev_private;
2706         struct intel_crtc *intel_crtc =
2707                 to_intel_crtc(encoder->base.crtc);
2708         enum dpio_channel ch = vlv_dport_to_channel(dport);
2709         int pipe = intel_crtc->pipe;
2710         int data, i;
2711         u32 val;
2712
2713         mutex_lock(&dev_priv->dpio_lock);
2714
2715         /* allow hardware to manage TX FIFO reset source */
2716         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2717         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2718         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2719
2720         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2721         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2722         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2723
2724         /* Deassert soft data lane reset*/
2725         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2726         val |= CHV_PCS_REQ_SOFTRESET_EN;
2727         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2728
2729         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2730         val |= CHV_PCS_REQ_SOFTRESET_EN;
2731         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2732
2733         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2734         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2735         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2736
2737         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2738         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2739         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2740
2741         /* Program Tx lane latency optimal setting*/
2742         for (i = 0; i < 4; i++) {
2743                 /* Set the latency optimal bit */
2744                 data = (i == 1) ? 0x0 : 0x6;
2745                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2746                                 data << DPIO_FRC_LATENCY_SHFIT);
2747
2748                 /* Set the upar bit */
2749                 data = (i == 1) ? 0x0 : 0x1;
2750                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2751                                 data << DPIO_UPAR_SHIFT);
2752         }
2753
2754         /* Data lane stagger programming */
2755         /* FIXME: Fix up value only after power analysis */
2756
2757         mutex_unlock(&dev_priv->dpio_lock);
2758
2759         intel_enable_dp(encoder);
2760 }
2761
2762 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2763 {
2764         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2765         struct drm_device *dev = encoder->base.dev;
2766         struct drm_i915_private *dev_priv = dev->dev_private;
2767         struct intel_crtc *intel_crtc =
2768                 to_intel_crtc(encoder->base.crtc);
2769         enum dpio_channel ch = vlv_dport_to_channel(dport);
2770         enum pipe pipe = intel_crtc->pipe;
2771         u32 val;
2772
2773         intel_dp_prepare(encoder);
2774
2775         mutex_lock(&dev_priv->dpio_lock);
2776
2777         /* program left/right clock distribution */
2778         if (pipe != PIPE_B) {
2779                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2780                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2781                 if (ch == DPIO_CH0)
2782                         val |= CHV_BUFLEFTENA1_FORCE;
2783                 if (ch == DPIO_CH1)
2784                         val |= CHV_BUFRIGHTENA1_FORCE;
2785                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2786         } else {
2787                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2788                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2789                 if (ch == DPIO_CH0)
2790                         val |= CHV_BUFLEFTENA2_FORCE;
2791                 if (ch == DPIO_CH1)
2792                         val |= CHV_BUFRIGHTENA2_FORCE;
2793                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2794         }
2795
2796         /* program clock channel usage */
2797         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2798         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2799         if (pipe != PIPE_B)
2800                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2801         else
2802                 val |= CHV_PCS_USEDCLKCHANNEL;
2803         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2804
2805         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2806         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2807         if (pipe != PIPE_B)
2808                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2809         else
2810                 val |= CHV_PCS_USEDCLKCHANNEL;
2811         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2812
2813         /*
2814          * This a a bit weird since generally CL
2815          * matches the pipe, but here we need to
2816          * pick the CL based on the port.
2817          */
2818         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2819         if (pipe != PIPE_B)
2820                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2821         else
2822                 val |= CHV_CMN_USEDCLKCHANNEL;
2823         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2824
2825         mutex_unlock(&dev_priv->dpio_lock);
2826 }
2827
2828 /*
2829  * Native read with retry for link status and receiver capability reads for
2830  * cases where the sink may still be asleep.
2831  *
2832  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2833  * supposed to retry 3 times per the spec.
2834  */
2835 static ssize_t
2836 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2837                         void *buffer, size_t size)
2838 {
2839         ssize_t ret;
2840         int i;
2841
2842         /*
2843          * Sometime we just get the same incorrect byte repeated
2844          * over the entire buffer. Doing just one throw away read
2845          * initially seems to "solve" it.
2846          */
2847         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2848
2849         for (i = 0; i < 3; i++) {
2850                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2851                 if (ret == size)
2852                         return ret;
2853                 msleep(1);
2854         }
2855
2856         return ret;
2857 }
2858
2859 /*
2860  * Fetch AUX CH registers 0x202 - 0x207 which contain
2861  * link status information
2862  */
2863 static bool
2864 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2865 {
2866         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2867                                        DP_LANE0_1_STATUS,
2868                                        link_status,
2869                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2870 }
2871
2872 /* These are source-specific values. */
2873 static uint8_t
2874 intel_dp_voltage_max(struct intel_dp *intel_dp)
2875 {
2876         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2877         struct drm_i915_private *dev_priv = dev->dev_private;
2878         enum port port = dp_to_dig_port(intel_dp)->port;
2879
2880         if (INTEL_INFO(dev)->gen >= 9) {
2881                 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2882                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2883                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2884         } else if (IS_VALLEYVIEW(dev))
2885                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2886         else if (IS_GEN7(dev) && port == PORT_A)
2887                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2888         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2889                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2890         else
2891                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2892 }
2893
2894 static uint8_t
2895 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2896 {
2897         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2898         enum port port = dp_to_dig_port(intel_dp)->port;
2899
2900         if (INTEL_INFO(dev)->gen >= 9) {
2901                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2902                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2903                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2904                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2905                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2906                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2907                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2908                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2909                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2910                 default:
2911                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2912                 }
2913         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2914                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2915                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2916                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2917                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2918                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2919                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2920                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2921                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2922                 default:
2923                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2924                 }
2925         } else if (IS_VALLEYVIEW(dev)) {
2926                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2927                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2928                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2929                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2930                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2931                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2932                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2933                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2934                 default:
2935                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2936                 }
2937         } else if (IS_GEN7(dev) && port == PORT_A) {
2938                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2939                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2940                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2941                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2942                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2943                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2944                 default:
2945                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2946                 }
2947         } else {
2948                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2949                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2950                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2951                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2952                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2953                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2954                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2955                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2956                 default:
2957                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2958                 }
2959         }
2960 }
2961
2962 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2963 {
2964         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2965         struct drm_i915_private *dev_priv = dev->dev_private;
2966         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2967         struct intel_crtc *intel_crtc =
2968                 to_intel_crtc(dport->base.base.crtc);
2969         unsigned long demph_reg_value, preemph_reg_value,
2970                 uniqtranscale_reg_value;
2971         uint8_t train_set = intel_dp->train_set[0];
2972         enum dpio_channel port = vlv_dport_to_channel(dport);
2973         int pipe = intel_crtc->pipe;
2974
2975         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2976         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2977                 preemph_reg_value = 0x0004000;
2978                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2979                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2980                         demph_reg_value = 0x2B405555;
2981                         uniqtranscale_reg_value = 0x552AB83A;
2982                         break;
2983                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2984                         demph_reg_value = 0x2B404040;
2985                         uniqtranscale_reg_value = 0x5548B83A;
2986                         break;
2987                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2988                         demph_reg_value = 0x2B245555;
2989                         uniqtranscale_reg_value = 0x5560B83A;
2990                         break;
2991                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2992                         demph_reg_value = 0x2B405555;
2993                         uniqtranscale_reg_value = 0x5598DA3A;
2994                         break;
2995                 default:
2996                         return 0;
2997                 }
2998                 break;
2999         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3000                 preemph_reg_value = 0x0002000;
3001                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3002                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3003                         demph_reg_value = 0x2B404040;
3004                         uniqtranscale_reg_value = 0x5552B83A;
3005                         break;
3006                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3007                         demph_reg_value = 0x2B404848;
3008                         uniqtranscale_reg_value = 0x5580B83A;
3009                         break;
3010                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3011                         demph_reg_value = 0x2B404040;
3012                         uniqtranscale_reg_value = 0x55ADDA3A;
3013                         break;
3014                 default:
3015                         return 0;
3016                 }
3017                 break;
3018         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3019                 preemph_reg_value = 0x0000000;
3020                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3021                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3022                         demph_reg_value = 0x2B305555;
3023                         uniqtranscale_reg_value = 0x5570B83A;
3024                         break;
3025                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3026                         demph_reg_value = 0x2B2B4040;
3027                         uniqtranscale_reg_value = 0x55ADDA3A;
3028                         break;
3029                 default:
3030                         return 0;
3031                 }
3032                 break;
3033         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3034                 preemph_reg_value = 0x0006000;
3035                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3036                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3037                         demph_reg_value = 0x1B405555;
3038                         uniqtranscale_reg_value = 0x55ADDA3A;
3039                         break;
3040                 default:
3041                         return 0;
3042                 }
3043                 break;
3044         default:
3045                 return 0;
3046         }
3047
3048         mutex_lock(&dev_priv->dpio_lock);
3049         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3050         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3051         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3052                          uniqtranscale_reg_value);
3053         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3054         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3055         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3056         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3057         mutex_unlock(&dev_priv->dpio_lock);
3058
3059         return 0;
3060 }
3061
3062 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3063 {
3064         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3065         struct drm_i915_private *dev_priv = dev->dev_private;
3066         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3067         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3068         u32 deemph_reg_value, margin_reg_value, val;
3069         uint8_t train_set = intel_dp->train_set[0];
3070         enum dpio_channel ch = vlv_dport_to_channel(dport);
3071         enum pipe pipe = intel_crtc->pipe;
3072         int i;
3073
3074         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3075         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3076                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3077                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3078                         deemph_reg_value = 128;
3079                         margin_reg_value = 52;
3080                         break;
3081                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3082                         deemph_reg_value = 128;
3083                         margin_reg_value = 77;
3084                         break;
3085                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3086                         deemph_reg_value = 128;
3087                         margin_reg_value = 102;
3088                         break;
3089                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3090                         deemph_reg_value = 128;
3091                         margin_reg_value = 154;
3092                         /* FIXME extra to set for 1200 */
3093                         break;
3094                 default:
3095                         return 0;
3096                 }
3097                 break;
3098         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3099                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3100                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3101                         deemph_reg_value = 85;
3102                         margin_reg_value = 78;
3103                         break;
3104                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3105                         deemph_reg_value = 85;
3106                         margin_reg_value = 116;
3107                         break;
3108                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3109                         deemph_reg_value = 85;
3110                         margin_reg_value = 154;
3111                         break;
3112                 default:
3113                         return 0;
3114                 }
3115                 break;
3116         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3117                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3118                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3119                         deemph_reg_value = 64;
3120                         margin_reg_value = 104;
3121                         break;
3122                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3123                         deemph_reg_value = 64;
3124                         margin_reg_value = 154;
3125                         break;
3126                 default:
3127                         return 0;
3128                 }
3129                 break;
3130         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3131                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3132                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3133                         deemph_reg_value = 43;
3134                         margin_reg_value = 154;
3135                         break;
3136                 default:
3137                         return 0;
3138                 }
3139                 break;
3140         default:
3141                 return 0;
3142         }
3143
3144         mutex_lock(&dev_priv->dpio_lock);
3145
3146         /* Clear calc init */
3147         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3148         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3149         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3150         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3151         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3152
3153         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3154         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3155         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3156         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3157         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3158
3159         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3160         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3161         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3162         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3163
3164         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3165         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3166         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3167         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3168
3169         /* Program swing deemph */
3170         for (i = 0; i < 4; i++) {
3171                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3172                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3173                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3174                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3175         }
3176
3177         /* Program swing margin */
3178         for (i = 0; i < 4; i++) {
3179                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3180                 val &= ~DPIO_SWING_MARGIN000_MASK;
3181                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3182                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3183         }
3184
3185         /* Disable unique transition scale */
3186         for (i = 0; i < 4; i++) {
3187                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3188                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3189                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3190         }
3191
3192         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3193                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3194                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3195                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3196
3197                 /*
3198                  * The document said it needs to set bit 27 for ch0 and bit 26
3199                  * for ch1. Might be a typo in the doc.
3200                  * For now, for this unique transition scale selection, set bit
3201                  * 27 for ch0 and ch1.
3202                  */
3203                 for (i = 0; i < 4; i++) {
3204                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3205                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3206                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3207                 }
3208
3209                 for (i = 0; i < 4; i++) {
3210                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3211                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3212                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3213                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3214                 }
3215         }
3216
3217         /* Start swing calculation */
3218         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3219         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3220         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3221
3222         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3223         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3224         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3225
3226         /* LRC Bypass */
3227         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3228         val |= DPIO_LRC_BYPASS;
3229         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3230
3231         mutex_unlock(&dev_priv->dpio_lock);
3232
3233         return 0;
3234 }
3235
3236 static void
3237 intel_get_adjust_train(struct intel_dp *intel_dp,
3238                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3239 {
3240         uint8_t v = 0;
3241         uint8_t p = 0;
3242         int lane;
3243         uint8_t voltage_max;
3244         uint8_t preemph_max;
3245
3246         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3247                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3248                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3249
3250                 if (this_v > v)
3251                         v = this_v;
3252                 if (this_p > p)
3253                         p = this_p;
3254         }
3255
3256         voltage_max = intel_dp_voltage_max(intel_dp);
3257         if (v >= voltage_max)
3258                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3259
3260         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3261         if (p >= preemph_max)
3262                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3263
3264         for (lane = 0; lane < 4; lane++)
3265                 intel_dp->train_set[lane] = v | p;
3266 }
3267
3268 static uint32_t
3269 intel_gen4_signal_levels(uint8_t train_set)
3270 {
3271         uint32_t        signal_levels = 0;
3272
3273         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3274         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3275         default:
3276                 signal_levels |= DP_VOLTAGE_0_4;
3277                 break;
3278         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3279                 signal_levels |= DP_VOLTAGE_0_6;
3280                 break;
3281         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3282                 signal_levels |= DP_VOLTAGE_0_8;
3283                 break;
3284         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3285                 signal_levels |= DP_VOLTAGE_1_2;
3286                 break;
3287         }
3288         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3289         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3290         default:
3291                 signal_levels |= DP_PRE_EMPHASIS_0;
3292                 break;
3293         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3294                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3295                 break;
3296         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3297                 signal_levels |= DP_PRE_EMPHASIS_6;
3298                 break;
3299         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3300                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3301                 break;
3302         }
3303         return signal_levels;
3304 }
3305
3306 /* Gen6's DP voltage swing and pre-emphasis control */
3307 static uint32_t
3308 intel_gen6_edp_signal_levels(uint8_t train_set)
3309 {
3310         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3311                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3312         switch (signal_levels) {
3313         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3314         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3315                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3316         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3317                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3318         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3319         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3320                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3321         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3322         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3323                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3324         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3325         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3326                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3327         default:
3328                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3329                               "0x%x\n", signal_levels);
3330                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3331         }
3332 }
3333
3334 /* Gen7's DP voltage swing and pre-emphasis control */
3335 static uint32_t
3336 intel_gen7_edp_signal_levels(uint8_t train_set)
3337 {
3338         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3339                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3340         switch (signal_levels) {
3341         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3342                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3343         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3344                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3345         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3346                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3347
3348         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3349                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3350         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3351                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3352
3353         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3354                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3355         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3356                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3357
3358         default:
3359                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3360                               "0x%x\n", signal_levels);
3361                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3362         }
3363 }
3364
3365 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3366 static uint32_t
3367 intel_hsw_signal_levels(uint8_t train_set)
3368 {
3369         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3370                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3371         switch (signal_levels) {
3372         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3373                 return DDI_BUF_TRANS_SELECT(0);
3374         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3375                 return DDI_BUF_TRANS_SELECT(1);
3376         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3377                 return DDI_BUF_TRANS_SELECT(2);
3378         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3379                 return DDI_BUF_TRANS_SELECT(3);
3380
3381         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3382                 return DDI_BUF_TRANS_SELECT(4);
3383         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3384                 return DDI_BUF_TRANS_SELECT(5);
3385         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3386                 return DDI_BUF_TRANS_SELECT(6);
3387
3388         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3389                 return DDI_BUF_TRANS_SELECT(7);
3390         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3391                 return DDI_BUF_TRANS_SELECT(8);
3392
3393         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3394                 return DDI_BUF_TRANS_SELECT(9);
3395         default:
3396                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3397                               "0x%x\n", signal_levels);
3398                 return DDI_BUF_TRANS_SELECT(0);
3399         }
3400 }
3401
3402 /* Properly updates "DP" with the correct signal levels. */
3403 static void
3404 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3405 {
3406         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3407         enum port port = intel_dig_port->port;
3408         struct drm_device *dev = intel_dig_port->base.base.dev;
3409         uint32_t signal_levels, mask;
3410         uint8_t train_set = intel_dp->train_set[0];
3411
3412         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3413                 signal_levels = intel_hsw_signal_levels(train_set);
3414                 mask = DDI_BUF_EMP_MASK;
3415         } else if (IS_CHERRYVIEW(dev)) {
3416                 signal_levels = intel_chv_signal_levels(intel_dp);
3417                 mask = 0;
3418         } else if (IS_VALLEYVIEW(dev)) {
3419                 signal_levels = intel_vlv_signal_levels(intel_dp);
3420                 mask = 0;
3421         } else if (IS_GEN7(dev) && port == PORT_A) {
3422                 signal_levels = intel_gen7_edp_signal_levels(train_set);
3423                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3424         } else if (IS_GEN6(dev) && port == PORT_A) {
3425                 signal_levels = intel_gen6_edp_signal_levels(train_set);
3426                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3427         } else {
3428                 signal_levels = intel_gen4_signal_levels(train_set);
3429                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3430         }
3431
3432         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3433
3434         *DP = (*DP & ~mask) | signal_levels;
3435 }
3436
3437 static bool
3438 intel_dp_set_link_train(struct intel_dp *intel_dp,
3439                         uint32_t *DP,
3440                         uint8_t dp_train_pat)
3441 {
3442         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3443         struct drm_device *dev = intel_dig_port->base.base.dev;
3444         struct drm_i915_private *dev_priv = dev->dev_private;
3445         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3446         int ret, len;
3447
3448         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3449
3450         I915_WRITE(intel_dp->output_reg, *DP);
3451         POSTING_READ(intel_dp->output_reg);
3452
3453         buf[0] = dp_train_pat;
3454         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3455             DP_TRAINING_PATTERN_DISABLE) {
3456                 /* don't write DP_TRAINING_LANEx_SET on disable */
3457                 len = 1;
3458         } else {
3459                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3460                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3461                 len = intel_dp->lane_count + 1;
3462         }
3463
3464         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3465                                 buf, len);
3466
3467         return ret == len;
3468 }
3469
3470 static bool
3471 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3472                         uint8_t dp_train_pat)
3473 {
3474         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3475         intel_dp_set_signal_levels(intel_dp, DP);
3476         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3477 }
3478
3479 static bool
3480 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3481                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3482 {
3483         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3484         struct drm_device *dev = intel_dig_port->base.base.dev;
3485         struct drm_i915_private *dev_priv = dev->dev_private;
3486         int ret;
3487
3488         intel_get_adjust_train(intel_dp, link_status);
3489         intel_dp_set_signal_levels(intel_dp, DP);
3490
3491         I915_WRITE(intel_dp->output_reg, *DP);
3492         POSTING_READ(intel_dp->output_reg);
3493
3494         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3495                                 intel_dp->train_set, intel_dp->lane_count);
3496
3497         return ret == intel_dp->lane_count;
3498 }
3499
3500 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3501 {
3502         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3503         struct drm_device *dev = intel_dig_port->base.base.dev;
3504         struct drm_i915_private *dev_priv = dev->dev_private;
3505         enum port port = intel_dig_port->port;
3506         uint32_t val;
3507
3508         if (!HAS_DDI(dev))
3509                 return;
3510
3511         val = I915_READ(DP_TP_CTL(port));
3512         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3513         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3514         I915_WRITE(DP_TP_CTL(port), val);
3515
3516         /*
3517          * On PORT_A we can have only eDP in SST mode. There the only reason
3518          * we need to set idle transmission mode is to work around a HW issue
3519          * where we enable the pipe while not in idle link-training mode.
3520          * In this case there is requirement to wait for a minimum number of
3521          * idle patterns to be sent.
3522          */
3523         if (port == PORT_A)
3524                 return;
3525
3526         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3527                      1))
3528                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3529 }
3530
3531 /* Enable corresponding port and start training pattern 1 */
3532 void
3533 intel_dp_start_link_train(struct intel_dp *intel_dp)
3534 {
3535         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3536         struct drm_device *dev = encoder->dev;
3537         int i;
3538         uint8_t voltage;
3539         int voltage_tries, loop_tries;
3540         uint32_t DP = intel_dp->DP;
3541         uint8_t link_config[2];
3542
3543         if (HAS_DDI(dev))
3544                 intel_ddi_prepare_link_retrain(encoder);
3545
3546         /* Write the link configuration data */
3547         link_config[0] = intel_dp->link_bw;
3548         link_config[1] = intel_dp->lane_count;
3549         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3550                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3551         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3552         if (intel_dp->num_sink_rates)
3553                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3554                                 &intel_dp->rate_select, 1);
3555
3556         link_config[0] = 0;
3557         link_config[1] = DP_SET_ANSI_8B10B;
3558         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3559
3560         DP |= DP_PORT_EN;
3561
3562         /* clock recovery */
3563         if (!intel_dp_reset_link_train(intel_dp, &DP,
3564                                        DP_TRAINING_PATTERN_1 |
3565                                        DP_LINK_SCRAMBLING_DISABLE)) {
3566                 DRM_ERROR("failed to enable link training\n");
3567                 return;
3568         }
3569
3570         voltage = 0xff;
3571         voltage_tries = 0;
3572         loop_tries = 0;
3573         for (;;) {
3574                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3575
3576                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3577                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3578                         DRM_ERROR("failed to get link status\n");
3579                         break;
3580                 }
3581
3582                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3583                         DRM_DEBUG_KMS("clock recovery OK\n");
3584                         break;
3585                 }
3586
3587                 /* Check to see if we've tried the max voltage */
3588                 for (i = 0; i < intel_dp->lane_count; i++)
3589                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3590                                 break;
3591                 if (i == intel_dp->lane_count) {
3592                         ++loop_tries;
3593                         if (loop_tries == 5) {
3594                                 DRM_ERROR("too many full retries, give up\n");
3595                                 break;
3596                         }
3597                         intel_dp_reset_link_train(intel_dp, &DP,
3598                                                   DP_TRAINING_PATTERN_1 |
3599                                                   DP_LINK_SCRAMBLING_DISABLE);
3600                         voltage_tries = 0;
3601                         continue;
3602                 }
3603
3604                 /* Check to see if we've tried the same voltage 5 times */
3605                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3606                         ++voltage_tries;
3607                         if (voltage_tries == 5) {
3608                                 DRM_ERROR("too many voltage retries, give up\n");
3609                                 break;
3610                         }
3611                 } else
3612                         voltage_tries = 0;
3613                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3614
3615                 /* Update training set as requested by target */
3616                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3617                         DRM_ERROR("failed to update link training\n");
3618                         break;
3619                 }
3620         }
3621
3622         intel_dp->DP = DP;
3623 }
3624
3625 void
3626 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3627 {
3628         bool channel_eq = false;
3629         int tries, cr_tries;
3630         uint32_t DP = intel_dp->DP;
3631         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3632
3633         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3634         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3635                 training_pattern = DP_TRAINING_PATTERN_3;
3636
3637         /* channel equalization */
3638         if (!intel_dp_set_link_train(intel_dp, &DP,
3639                                      training_pattern |
3640                                      DP_LINK_SCRAMBLING_DISABLE)) {
3641                 DRM_ERROR("failed to start channel equalization\n");
3642                 return;
3643         }
3644
3645         tries = 0;
3646         cr_tries = 0;
3647         channel_eq = false;
3648         for (;;) {
3649                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3650
3651                 if (cr_tries > 5) {
3652                         DRM_ERROR("failed to train DP, aborting\n");
3653                         break;
3654                 }
3655
3656                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3657                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3658                         DRM_ERROR("failed to get link status\n");
3659                         break;
3660                 }
3661
3662                 /* Make sure clock is still ok */
3663                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3664                         intel_dp_start_link_train(intel_dp);
3665                         intel_dp_set_link_train(intel_dp, &DP,
3666                                                 training_pattern |
3667                                                 DP_LINK_SCRAMBLING_DISABLE);
3668                         cr_tries++;
3669                         continue;
3670                 }
3671
3672                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3673                         channel_eq = true;
3674                         break;
3675                 }
3676
3677                 /* Try 5 times, then try clock recovery if that fails */
3678                 if (tries > 5) {
3679                         intel_dp_start_link_train(intel_dp);
3680                         intel_dp_set_link_train(intel_dp, &DP,
3681                                                 training_pattern |
3682                                                 DP_LINK_SCRAMBLING_DISABLE);
3683                         tries = 0;
3684                         cr_tries++;
3685                         continue;
3686                 }
3687
3688                 /* Update training set as requested by target */
3689                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3690                         DRM_ERROR("failed to update link training\n");
3691                         break;
3692                 }
3693                 ++tries;
3694         }
3695
3696         intel_dp_set_idle_link_train(intel_dp);
3697
3698         intel_dp->DP = DP;
3699
3700         if (channel_eq)
3701                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3702
3703 }
3704
3705 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3706 {
3707         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3708                                 DP_TRAINING_PATTERN_DISABLE);
3709 }
3710
3711 static void
3712 intel_dp_link_down(struct intel_dp *intel_dp)
3713 {
3714         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3715         enum port port = intel_dig_port->port;
3716         struct drm_device *dev = intel_dig_port->base.base.dev;
3717         struct drm_i915_private *dev_priv = dev->dev_private;
3718         uint32_t DP = intel_dp->DP;
3719
3720         if (WARN_ON(HAS_DDI(dev)))
3721                 return;
3722
3723         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3724                 return;
3725
3726         DRM_DEBUG_KMS("\n");
3727
3728         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3729                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3730                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3731         } else {
3732                 if (IS_CHERRYVIEW(dev))
3733                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3734                 else
3735                         DP &= ~DP_LINK_TRAIN_MASK;
3736                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3737         }
3738         POSTING_READ(intel_dp->output_reg);
3739
3740         if (HAS_PCH_IBX(dev) &&
3741             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3742                 /* Hardware workaround: leaving our transcoder select
3743                  * set to transcoder B while it's off will prevent the
3744                  * corresponding HDMI output on transcoder A.
3745                  *
3746                  * Combine this with another hardware workaround:
3747                  * transcoder select bit can only be cleared while the
3748                  * port is enabled.
3749                  */
3750                 DP &= ~DP_PIPEB_SELECT;
3751                 I915_WRITE(intel_dp->output_reg, DP);
3752                 POSTING_READ(intel_dp->output_reg);
3753         }
3754
3755         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3756         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3757         POSTING_READ(intel_dp->output_reg);
3758         msleep(intel_dp->panel_power_down_delay);
3759 }
3760
3761 static bool
3762 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3763 {
3764         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3765         struct drm_device *dev = dig_port->base.base.dev;
3766         struct drm_i915_private *dev_priv = dev->dev_private;
3767         uint8_t rev;
3768
3769         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3770                                     sizeof(intel_dp->dpcd)) < 0)
3771                 return false; /* aux transfer failed */
3772
3773         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3774
3775         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3776                 return false; /* DPCD not present */
3777
3778         /* Check if the panel supports PSR */
3779         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3780         if (is_edp(intel_dp)) {
3781                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3782                                         intel_dp->psr_dpcd,
3783                                         sizeof(intel_dp->psr_dpcd));
3784                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3785                         dev_priv->psr.sink_support = true;
3786                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3787                 }
3788
3789                 if (INTEL_INFO(dev)->gen >= 9 &&
3790                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3791                         uint8_t frame_sync_cap;
3792
3793                         dev_priv->psr.sink_support = true;
3794                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3795                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3796                                         &frame_sync_cap, 1);
3797                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3798                         /* PSR2 needs frame sync as well */
3799                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3800                         DRM_DEBUG_KMS("PSR2 %s on sink",
3801                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3802                 }
3803         }
3804
3805         /* Training Pattern 3 support, both source and sink */
3806         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3807             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3808             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3809                 intel_dp->use_tps3 = true;
3810                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3811         } else
3812                 intel_dp->use_tps3 = false;
3813
3814         /* Intermediate frequency support */
3815         if (is_edp(intel_dp) &&
3816             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3817             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3818             (rev >= 0x03)) { /* eDp v1.4 or higher */
3819                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3820                 int i;
3821
3822                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3823                                 DP_SUPPORTED_LINK_RATES,
3824                                 sink_rates,
3825                                 sizeof(sink_rates));
3826
3827                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3828                         int val = le16_to_cpu(sink_rates[i]);
3829
3830                         if (val == 0)
3831                                 break;
3832
3833                         intel_dp->sink_rates[i] = val * 200;
3834                 }
3835                 intel_dp->num_sink_rates = i;
3836         }
3837
3838         intel_dp_print_rates(intel_dp);
3839
3840         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3841               DP_DWN_STRM_PORT_PRESENT))
3842                 return true; /* native DP sink */
3843
3844         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3845                 return true; /* no per-port downstream info */
3846
3847         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3848                                     intel_dp->downstream_ports,
3849                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3850                 return false; /* downstream port status fetch failed */
3851
3852         return true;
3853 }
3854
3855 static void
3856 intel_dp_probe_oui(struct intel_dp *intel_dp)
3857 {
3858         u8 buf[3];
3859
3860         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3861                 return;
3862
3863         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3864                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3865                               buf[0], buf[1], buf[2]);
3866
3867         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3868                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3869                               buf[0], buf[1], buf[2]);
3870 }
3871
3872 static bool
3873 intel_dp_probe_mst(struct intel_dp *intel_dp)
3874 {
3875         u8 buf[1];
3876
3877         if (!intel_dp->can_mst)
3878                 return false;
3879
3880         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3881                 return false;
3882
3883         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3884                 if (buf[0] & DP_MST_CAP) {
3885                         DRM_DEBUG_KMS("Sink is MST capable\n");
3886                         intel_dp->is_mst = true;
3887                 } else {
3888                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3889                         intel_dp->is_mst = false;
3890                 }
3891         }
3892
3893         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3894         return intel_dp->is_mst;
3895 }
3896
3897 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3898 {
3899         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3900         struct drm_device *dev = intel_dig_port->base.base.dev;
3901         struct intel_crtc *intel_crtc =
3902                 to_intel_crtc(intel_dig_port->base.base.crtc);
3903         u8 buf;
3904         int test_crc_count;
3905         int attempts = 6;
3906
3907         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3908                 return -EIO;
3909
3910         if (!(buf & DP_TEST_CRC_SUPPORTED))
3911                 return -ENOTTY;
3912
3913         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3914                 return -EIO;
3915
3916         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3917                                 buf | DP_TEST_SINK_START) < 0)
3918                 return -EIO;
3919
3920         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3921                 return -EIO;
3922         test_crc_count = buf & DP_TEST_COUNT_MASK;
3923
3924         do {
3925                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3926                                       DP_TEST_SINK_MISC, &buf) < 0)
3927                         return -EIO;
3928                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3929         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3930
3931         if (attempts == 0) {
3932                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3933                 return -ETIMEDOUT;
3934         }
3935
3936         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3937                 return -EIO;
3938
3939         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3940                 return -EIO;
3941         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3942                                buf & ~DP_TEST_SINK_START) < 0)
3943                 return -EIO;
3944
3945         return 0;
3946 }
3947
3948 static bool
3949 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3950 {
3951         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3952                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3953                                        sink_irq_vector, 1) == 1;
3954 }
3955
3956 static bool
3957 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3958 {
3959         int ret;
3960
3961         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3962                                              DP_SINK_COUNT_ESI,
3963                                              sink_irq_vector, 14);
3964         if (ret != 14)
3965                 return false;
3966
3967         return true;
3968 }
3969
3970 static void
3971 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3972 {
3973         /* NAK by default */
3974         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3975 }
3976
3977 static int
3978 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3979 {
3980         bool bret;
3981
3982         if (intel_dp->is_mst) {
3983                 u8 esi[16] = { 0 };
3984                 int ret = 0;
3985                 int retry;
3986                 bool handled;
3987                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3988 go_again:
3989                 if (bret == true) {
3990
3991                         /* check link status - esi[10] = 0x200c */
3992                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3993                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3994                                 intel_dp_start_link_train(intel_dp);
3995                                 intel_dp_complete_link_train(intel_dp);
3996                                 intel_dp_stop_link_train(intel_dp);
3997                         }
3998
3999                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4000                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4001
4002                         if (handled) {
4003                                 for (retry = 0; retry < 3; retry++) {
4004                                         int wret;
4005                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4006                                                                  DP_SINK_COUNT_ESI+1,
4007                                                                  &esi[1], 3);
4008                                         if (wret == 3) {
4009                                                 break;
4010                                         }
4011                                 }
4012
4013                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4014                                 if (bret == true) {
4015                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4016                                         goto go_again;
4017                                 }
4018                         } else
4019                                 ret = 0;
4020
4021                         return ret;
4022                 } else {
4023                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4024                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4025                         intel_dp->is_mst = false;
4026                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4027                         /* send a hotplug event */
4028                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4029                 }
4030         }
4031         return -EINVAL;
4032 }
4033
4034 /*
4035  * According to DP spec
4036  * 5.1.2:
4037  *  1. Read DPCD
4038  *  2. Configure link according to Receiver Capabilities
4039  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4040  *  4. Check link status on receipt of hot-plug interrupt
4041  */
4042 static void
4043 intel_dp_check_link_status(struct intel_dp *intel_dp)
4044 {
4045         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4046         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4047         u8 sink_irq_vector;
4048         u8 link_status[DP_LINK_STATUS_SIZE];
4049
4050         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4051
4052         if (!intel_encoder->connectors_active)
4053                 return;
4054
4055         if (WARN_ON(!intel_encoder->base.crtc))
4056                 return;
4057
4058         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4059                 return;
4060
4061         /* Try to read receiver status if the link appears to be up */
4062         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4063                 return;
4064         }
4065
4066         /* Now read the DPCD to see if it's actually running */
4067         if (!intel_dp_get_dpcd(intel_dp)) {
4068                 return;
4069         }
4070
4071         /* Try to read the source of the interrupt */
4072         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4073             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4074                 /* Clear interrupt source */
4075                 drm_dp_dpcd_writeb(&intel_dp->aux,
4076                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4077                                    sink_irq_vector);
4078
4079                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4080                         intel_dp_handle_test_request(intel_dp);
4081                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4082                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4083         }
4084
4085         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4086                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4087                               intel_encoder->base.name);
4088                 intel_dp_start_link_train(intel_dp);
4089                 intel_dp_complete_link_train(intel_dp);
4090                 intel_dp_stop_link_train(intel_dp);
4091         }
4092 }
4093
4094 /* XXX this is probably wrong for multiple downstream ports */
4095 static enum drm_connector_status
4096 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4097 {
4098         uint8_t *dpcd = intel_dp->dpcd;
4099         uint8_t type;
4100
4101         if (!intel_dp_get_dpcd(intel_dp))
4102                 return connector_status_disconnected;
4103
4104         /* if there's no downstream port, we're done */
4105         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4106                 return connector_status_connected;
4107
4108         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4109         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4110             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4111                 uint8_t reg;
4112
4113                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4114                                             &reg, 1) < 0)
4115                         return connector_status_unknown;
4116
4117                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4118                                               : connector_status_disconnected;
4119         }
4120
4121         /* If no HPD, poke DDC gently */
4122         if (drm_probe_ddc(&intel_dp->aux.ddc))
4123                 return connector_status_connected;
4124
4125         /* Well we tried, say unknown for unreliable port types */
4126         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4127                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4128                 if (type == DP_DS_PORT_TYPE_VGA ||
4129                     type == DP_DS_PORT_TYPE_NON_EDID)
4130                         return connector_status_unknown;
4131         } else {
4132                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4133                         DP_DWN_STRM_PORT_TYPE_MASK;
4134                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4135                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4136                         return connector_status_unknown;
4137         }
4138
4139         /* Anything else is out of spec, warn and ignore */
4140         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4141         return connector_status_disconnected;
4142 }
4143
4144 static enum drm_connector_status
4145 edp_detect(struct intel_dp *intel_dp)
4146 {
4147         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4148         enum drm_connector_status status;
4149
4150         status = intel_panel_detect(dev);
4151         if (status == connector_status_unknown)
4152                 status = connector_status_connected;
4153
4154         return status;
4155 }
4156
4157 static enum drm_connector_status
4158 ironlake_dp_detect(struct intel_dp *intel_dp)
4159 {
4160         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4161         struct drm_i915_private *dev_priv = dev->dev_private;
4162         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4163
4164         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4165                 return connector_status_disconnected;
4166
4167         return intel_dp_detect_dpcd(intel_dp);
4168 }
4169
4170 static int g4x_digital_port_connected(struct drm_device *dev,
4171                                        struct intel_digital_port *intel_dig_port)
4172 {
4173         struct drm_i915_private *dev_priv = dev->dev_private;
4174         uint32_t bit;
4175
4176         if (IS_VALLEYVIEW(dev)) {
4177                 switch (intel_dig_port->port) {
4178                 case PORT_B:
4179                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4180                         break;
4181                 case PORT_C:
4182                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4183                         break;
4184                 case PORT_D:
4185                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4186                         break;
4187                 default:
4188                         return -EINVAL;
4189                 }
4190         } else {
4191                 switch (intel_dig_port->port) {
4192                 case PORT_B:
4193                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4194                         break;
4195                 case PORT_C:
4196                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4197                         break;
4198                 case PORT_D:
4199                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4200                         break;
4201                 default:
4202                         return -EINVAL;
4203                 }
4204         }
4205
4206         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4207                 return 0;
4208         return 1;
4209 }
4210
4211 static enum drm_connector_status
4212 g4x_dp_detect(struct intel_dp *intel_dp)
4213 {
4214         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4215         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4216         int ret;
4217
4218         /* Can't disconnect eDP, but you can close the lid... */
4219         if (is_edp(intel_dp)) {
4220                 enum drm_connector_status status;
4221
4222                 status = intel_panel_detect(dev);
4223                 if (status == connector_status_unknown)
4224                         status = connector_status_connected;
4225                 return status;
4226         }
4227
4228         ret = g4x_digital_port_connected(dev, intel_dig_port);
4229         if (ret == -EINVAL)
4230                 return connector_status_unknown;
4231         else if (ret == 0)
4232                 return connector_status_disconnected;
4233
4234         return intel_dp_detect_dpcd(intel_dp);
4235 }
4236
4237 static struct edid *
4238 intel_dp_get_edid(struct intel_dp *intel_dp)
4239 {
4240         struct intel_connector *intel_connector = intel_dp->attached_connector;
4241
4242         /* use cached edid if we have one */
4243         if (intel_connector->edid) {
4244                 /* invalid edid */
4245                 if (IS_ERR(intel_connector->edid))
4246                         return NULL;
4247
4248                 return drm_edid_duplicate(intel_connector->edid);
4249         } else
4250                 return drm_get_edid(&intel_connector->base,
4251                                     &intel_dp->aux.ddc);
4252 }
4253
4254 static void
4255 intel_dp_set_edid(struct intel_dp *intel_dp)
4256 {
4257         struct intel_connector *intel_connector = intel_dp->attached_connector;
4258         struct edid *edid;
4259
4260         edid = intel_dp_get_edid(intel_dp);
4261         intel_connector->detect_edid = edid;
4262
4263         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4264                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4265         else
4266                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4267 }
4268
4269 static void
4270 intel_dp_unset_edid(struct intel_dp *intel_dp)
4271 {
4272         struct intel_connector *intel_connector = intel_dp->attached_connector;
4273
4274         kfree(intel_connector->detect_edid);
4275         intel_connector->detect_edid = NULL;
4276
4277         intel_dp->has_audio = false;
4278 }
4279
4280 static enum intel_display_power_domain
4281 intel_dp_power_get(struct intel_dp *dp)
4282 {
4283         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4284         enum intel_display_power_domain power_domain;
4285
4286         power_domain = intel_display_port_power_domain(encoder);
4287         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4288
4289         return power_domain;
4290 }
4291
4292 static void
4293 intel_dp_power_put(struct intel_dp *dp,
4294                    enum intel_display_power_domain power_domain)
4295 {
4296         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4297         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4298 }
4299
4300 static enum drm_connector_status
4301 intel_dp_detect(struct drm_connector *connector, bool force)
4302 {
4303         struct intel_dp *intel_dp = intel_attached_dp(connector);
4304         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4305         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4306         struct drm_device *dev = connector->dev;
4307         enum drm_connector_status status;
4308         enum intel_display_power_domain power_domain;
4309         bool ret;
4310
4311         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4312                       connector->base.id, connector->name);
4313         intel_dp_unset_edid(intel_dp);
4314
4315         if (intel_dp->is_mst) {
4316                 /* MST devices are disconnected from a monitor POV */
4317                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4318                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4319                 return connector_status_disconnected;
4320         }
4321
4322         power_domain = intel_dp_power_get(intel_dp);
4323
4324         /* Can't disconnect eDP, but you can close the lid... */
4325         if (is_edp(intel_dp))
4326                 status = edp_detect(intel_dp);
4327         else if (HAS_PCH_SPLIT(dev))
4328                 status = ironlake_dp_detect(intel_dp);
4329         else
4330                 status = g4x_dp_detect(intel_dp);
4331         if (status != connector_status_connected)
4332                 goto out;
4333
4334         intel_dp_probe_oui(intel_dp);
4335
4336         ret = intel_dp_probe_mst(intel_dp);
4337         if (ret) {
4338                 /* if we are in MST mode then this connector
4339                    won't appear connected or have anything with EDID on it */
4340                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4341                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4342                 status = connector_status_disconnected;
4343                 goto out;
4344         }
4345
4346         intel_dp_set_edid(intel_dp);
4347
4348         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4349                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4350         status = connector_status_connected;
4351
4352 out:
4353         intel_dp_power_put(intel_dp, power_domain);
4354         return status;
4355 }
4356
4357 static void
4358 intel_dp_force(struct drm_connector *connector)
4359 {
4360         struct intel_dp *intel_dp = intel_attached_dp(connector);
4361         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4362         enum intel_display_power_domain power_domain;
4363
4364         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4365                       connector->base.id, connector->name);
4366         intel_dp_unset_edid(intel_dp);
4367
4368         if (connector->status != connector_status_connected)
4369                 return;
4370
4371         power_domain = intel_dp_power_get(intel_dp);
4372
4373         intel_dp_set_edid(intel_dp);
4374
4375         intel_dp_power_put(intel_dp, power_domain);
4376
4377         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4378                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4379 }
4380
4381 static int intel_dp_get_modes(struct drm_connector *connector)
4382 {
4383         struct intel_connector *intel_connector = to_intel_connector(connector);
4384         struct edid *edid;
4385
4386         edid = intel_connector->detect_edid;
4387         if (edid) {
4388                 int ret = intel_connector_update_modes(connector, edid);
4389                 if (ret)
4390                         return ret;
4391         }
4392
4393         /* if eDP has no EDID, fall back to fixed mode */
4394         if (is_edp(intel_attached_dp(connector)) &&
4395             intel_connector->panel.fixed_mode) {
4396                 struct drm_display_mode *mode;
4397
4398                 mode = drm_mode_duplicate(connector->dev,
4399                                           intel_connector->panel.fixed_mode);
4400                 if (mode) {
4401                         drm_mode_probed_add(connector, mode);
4402                         return 1;
4403                 }
4404         }
4405
4406         return 0;
4407 }
4408
4409 static bool
4410 intel_dp_detect_audio(struct drm_connector *connector)
4411 {
4412         bool has_audio = false;
4413         struct edid *edid;
4414
4415         edid = to_intel_connector(connector)->detect_edid;
4416         if (edid)
4417                 has_audio = drm_detect_monitor_audio(edid);
4418
4419         return has_audio;
4420 }
4421
4422 static int
4423 intel_dp_set_property(struct drm_connector *connector,
4424                       struct drm_property *property,
4425                       uint64_t val)
4426 {
4427         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4428         struct intel_connector *intel_connector = to_intel_connector(connector);
4429         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4430         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4431         int ret;
4432
4433         ret = drm_object_property_set_value(&connector->base, property, val);
4434         if (ret)
4435                 return ret;
4436
4437         if (property == dev_priv->force_audio_property) {
4438                 int i = val;
4439                 bool has_audio;
4440
4441                 if (i == intel_dp->force_audio)
4442                         return 0;
4443
4444                 intel_dp->force_audio = i;
4445
4446                 if (i == HDMI_AUDIO_AUTO)
4447                         has_audio = intel_dp_detect_audio(connector);
4448                 else
4449                         has_audio = (i == HDMI_AUDIO_ON);
4450
4451                 if (has_audio == intel_dp->has_audio)
4452                         return 0;
4453
4454                 intel_dp->has_audio = has_audio;
4455                 goto done;
4456         }
4457
4458         if (property == dev_priv->broadcast_rgb_property) {
4459                 bool old_auto = intel_dp->color_range_auto;
4460                 uint32_t old_range = intel_dp->color_range;
4461
4462                 switch (val) {
4463                 case INTEL_BROADCAST_RGB_AUTO:
4464                         intel_dp->color_range_auto = true;
4465                         break;
4466                 case INTEL_BROADCAST_RGB_FULL:
4467                         intel_dp->color_range_auto = false;
4468                         intel_dp->color_range = 0;
4469                         break;
4470                 case INTEL_BROADCAST_RGB_LIMITED:
4471                         intel_dp->color_range_auto = false;
4472                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4473                         break;
4474                 default:
4475                         return -EINVAL;
4476                 }
4477
4478                 if (old_auto == intel_dp->color_range_auto &&
4479                     old_range == intel_dp->color_range)
4480                         return 0;
4481
4482                 goto done;
4483         }
4484
4485         if (is_edp(intel_dp) &&
4486             property == connector->dev->mode_config.scaling_mode_property) {
4487                 if (val == DRM_MODE_SCALE_NONE) {
4488                         DRM_DEBUG_KMS("no scaling not supported\n");
4489                         return -EINVAL;
4490                 }
4491
4492                 if (intel_connector->panel.fitting_mode == val) {
4493                         /* the eDP scaling property is not changed */
4494                         return 0;
4495                 }
4496                 intel_connector->panel.fitting_mode = val;
4497
4498                 goto done;
4499         }
4500
4501         return -EINVAL;
4502
4503 done:
4504         if (intel_encoder->base.crtc)
4505                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4506
4507         return 0;
4508 }
4509
4510 static void
4511 intel_dp_connector_destroy(struct drm_connector *connector)
4512 {
4513         struct intel_connector *intel_connector = to_intel_connector(connector);
4514
4515         kfree(intel_connector->detect_edid);
4516
4517         if (!IS_ERR_OR_NULL(intel_connector->edid))
4518                 kfree(intel_connector->edid);
4519
4520         /* Can't call is_edp() since the encoder may have been destroyed
4521          * already. */
4522         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4523                 intel_panel_fini(&intel_connector->panel);
4524
4525         drm_connector_cleanup(connector);
4526         kfree(connector);
4527 }
4528
4529 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4530 {
4531         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4532         struct intel_dp *intel_dp = &intel_dig_port->dp;
4533
4534         drm_dp_aux_unregister(&intel_dp->aux);
4535         intel_dp_mst_encoder_cleanup(intel_dig_port);
4536         if (is_edp(intel_dp)) {
4537                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4538                 /*
4539                  * vdd might still be enabled do to the delayed vdd off.
4540                  * Make sure vdd is actually turned off here.
4541                  */
4542                 pps_lock(intel_dp);
4543                 edp_panel_vdd_off_sync(intel_dp);
4544                 pps_unlock(intel_dp);
4545
4546                 if (intel_dp->edp_notifier.notifier_call) {
4547                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4548                         intel_dp->edp_notifier.notifier_call = NULL;
4549                 }
4550         }
4551         drm_encoder_cleanup(encoder);
4552         kfree(intel_dig_port);
4553 }
4554
4555 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4556 {
4557         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4558
4559         if (!is_edp(intel_dp))
4560                 return;
4561
4562         /*
4563          * vdd might still be enabled do to the delayed vdd off.
4564          * Make sure vdd is actually turned off here.
4565          */
4566         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4567         pps_lock(intel_dp);
4568         edp_panel_vdd_off_sync(intel_dp);
4569         pps_unlock(intel_dp);
4570 }
4571
4572 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4573 {
4574         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4575         struct drm_device *dev = intel_dig_port->base.base.dev;
4576         struct drm_i915_private *dev_priv = dev->dev_private;
4577         enum intel_display_power_domain power_domain;
4578
4579         lockdep_assert_held(&dev_priv->pps_mutex);
4580
4581         if (!edp_have_panel_vdd(intel_dp))
4582                 return;
4583
4584         /*
4585          * The VDD bit needs a power domain reference, so if the bit is
4586          * already enabled when we boot or resume, grab this reference and
4587          * schedule a vdd off, so we don't hold on to the reference
4588          * indefinitely.
4589          */
4590         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4591         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4592         intel_display_power_get(dev_priv, power_domain);
4593
4594         edp_panel_vdd_schedule_off(intel_dp);
4595 }
4596
4597 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4598 {
4599         struct intel_dp *intel_dp;
4600
4601         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4602                 return;
4603
4604         intel_dp = enc_to_intel_dp(encoder);
4605
4606         pps_lock(intel_dp);
4607
4608         /*
4609          * Read out the current power sequencer assignment,
4610          * in case the BIOS did something with it.
4611          */
4612         if (IS_VALLEYVIEW(encoder->dev))
4613                 vlv_initial_power_sequencer_setup(intel_dp);
4614
4615         intel_edp_panel_vdd_sanitize(intel_dp);
4616
4617         pps_unlock(intel_dp);
4618 }
4619
4620 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4621         .dpms = intel_connector_dpms,
4622         .detect = intel_dp_detect,
4623         .force = intel_dp_force,
4624         .fill_modes = drm_helper_probe_single_connector_modes,
4625         .set_property = intel_dp_set_property,
4626         .atomic_get_property = intel_connector_atomic_get_property,
4627         .destroy = intel_dp_connector_destroy,
4628         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4629         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4630 };
4631
4632 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4633         .get_modes = intel_dp_get_modes,
4634         .mode_valid = intel_dp_mode_valid,
4635         .best_encoder = intel_best_encoder,
4636 };
4637
4638 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4639         .reset = intel_dp_encoder_reset,
4640         .destroy = intel_dp_encoder_destroy,
4641 };
4642
4643 void
4644 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4645 {
4646         return;
4647 }
4648
4649 enum irqreturn
4650 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4651 {
4652         struct intel_dp *intel_dp = &intel_dig_port->dp;
4653         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4654         struct drm_device *dev = intel_dig_port->base.base.dev;
4655         struct drm_i915_private *dev_priv = dev->dev_private;
4656         enum intel_display_power_domain power_domain;
4657         enum irqreturn ret = IRQ_NONE;
4658
4659         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4660                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4661
4662         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4663                 /*
4664                  * vdd off can generate a long pulse on eDP which
4665                  * would require vdd on to handle it, and thus we
4666                  * would end up in an endless cycle of
4667                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4668                  */
4669                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4670                               port_name(intel_dig_port->port));
4671                 return IRQ_HANDLED;
4672         }
4673
4674         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4675                       port_name(intel_dig_port->port),
4676                       long_hpd ? "long" : "short");
4677
4678         power_domain = intel_display_port_power_domain(intel_encoder);
4679         intel_display_power_get(dev_priv, power_domain);
4680
4681         if (long_hpd) {
4682
4683                 if (HAS_PCH_SPLIT(dev)) {
4684                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4685                                 goto mst_fail;
4686                 } else {
4687                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4688                                 goto mst_fail;
4689                 }
4690
4691                 if (!intel_dp_get_dpcd(intel_dp)) {
4692                         goto mst_fail;
4693                 }
4694
4695                 intel_dp_probe_oui(intel_dp);
4696
4697                 if (!intel_dp_probe_mst(intel_dp))
4698                         goto mst_fail;
4699
4700         } else {
4701                 if (intel_dp->is_mst) {
4702                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4703                                 goto mst_fail;
4704                 }
4705
4706                 if (!intel_dp->is_mst) {
4707                         /*
4708                          * we'll check the link status via the normal hot plug path later -
4709                          * but for short hpds we should check it now
4710                          */
4711                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4712                         intel_dp_check_link_status(intel_dp);
4713                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4714                 }
4715         }
4716
4717         ret = IRQ_HANDLED;
4718
4719         goto put_power;
4720 mst_fail:
4721         /* if we were in MST mode, and device is not there get out of MST mode */
4722         if (intel_dp->is_mst) {
4723                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4724                 intel_dp->is_mst = false;
4725                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4726         }
4727 put_power:
4728         intel_display_power_put(dev_priv, power_domain);
4729
4730         return ret;
4731 }
4732
4733 /* Return which DP Port should be selected for Transcoder DP control */
4734 int
4735 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4736 {
4737         struct drm_device *dev = crtc->dev;
4738         struct intel_encoder *intel_encoder;
4739         struct intel_dp *intel_dp;
4740
4741         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4742                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4743
4744                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4745                     intel_encoder->type == INTEL_OUTPUT_EDP)
4746                         return intel_dp->output_reg;
4747         }
4748
4749         return -1;
4750 }
4751
4752 /* check the VBT to see whether the eDP is on DP-D port */
4753 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4754 {
4755         struct drm_i915_private *dev_priv = dev->dev_private;
4756         union child_device_config *p_child;
4757         int i;
4758         static const short port_mapping[] = {
4759                 [PORT_B] = PORT_IDPB,
4760                 [PORT_C] = PORT_IDPC,
4761                 [PORT_D] = PORT_IDPD,
4762         };
4763
4764         if (port == PORT_A)
4765                 return true;
4766
4767         if (!dev_priv->vbt.child_dev_num)
4768                 return false;
4769
4770         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4771                 p_child = dev_priv->vbt.child_dev + i;
4772
4773                 if (p_child->common.dvo_port == port_mapping[port] &&
4774                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4775                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4776                         return true;
4777         }
4778         return false;
4779 }
4780
4781 void
4782 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4783 {
4784         struct intel_connector *intel_connector = to_intel_connector(connector);
4785
4786         intel_attach_force_audio_property(connector);
4787         intel_attach_broadcast_rgb_property(connector);
4788         intel_dp->color_range_auto = true;
4789
4790         if (is_edp(intel_dp)) {
4791                 drm_mode_create_scaling_mode_property(connector->dev);
4792                 drm_object_attach_property(
4793                         &connector->base,
4794                         connector->dev->mode_config.scaling_mode_property,
4795                         DRM_MODE_SCALE_ASPECT);
4796                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4797         }
4798 }
4799
4800 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4801 {
4802         intel_dp->last_power_cycle = jiffies;
4803         intel_dp->last_power_on = jiffies;
4804         intel_dp->last_backlight_off = jiffies;
4805 }
4806
4807 static void
4808 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4809                                     struct intel_dp *intel_dp)
4810 {
4811         struct drm_i915_private *dev_priv = dev->dev_private;
4812         struct edp_power_seq cur, vbt, spec,
4813                 *final = &intel_dp->pps_delays;
4814         u32 pp_on, pp_off, pp_div, pp;
4815         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4816
4817         lockdep_assert_held(&dev_priv->pps_mutex);
4818
4819         /* already initialized? */
4820         if (final->t11_t12 != 0)
4821                 return;
4822
4823         if (HAS_PCH_SPLIT(dev)) {
4824                 pp_ctrl_reg = PCH_PP_CONTROL;
4825                 pp_on_reg = PCH_PP_ON_DELAYS;
4826                 pp_off_reg = PCH_PP_OFF_DELAYS;
4827                 pp_div_reg = PCH_PP_DIVISOR;
4828         } else {
4829                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4830
4831                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4832                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4833                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4834                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4835         }
4836
4837         /* Workaround: Need to write PP_CONTROL with the unlock key as
4838          * the very first thing. */
4839         pp = ironlake_get_pp_control(intel_dp);
4840         I915_WRITE(pp_ctrl_reg, pp);
4841
4842         pp_on = I915_READ(pp_on_reg);
4843         pp_off = I915_READ(pp_off_reg);
4844         pp_div = I915_READ(pp_div_reg);
4845
4846         /* Pull timing values out of registers */
4847         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4848                 PANEL_POWER_UP_DELAY_SHIFT;
4849
4850         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4851                 PANEL_LIGHT_ON_DELAY_SHIFT;
4852
4853         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4854                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4855
4856         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4857                 PANEL_POWER_DOWN_DELAY_SHIFT;
4858
4859         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4860                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4861
4862         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4863                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4864
4865         vbt = dev_priv->vbt.edp_pps;
4866
4867         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4868          * our hw here, which are all in 100usec. */
4869         spec.t1_t3 = 210 * 10;
4870         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4871         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4872         spec.t10 = 500 * 10;
4873         /* This one is special and actually in units of 100ms, but zero
4874          * based in the hw (so we need to add 100 ms). But the sw vbt
4875          * table multiplies it with 1000 to make it in units of 100usec,
4876          * too. */
4877         spec.t11_t12 = (510 + 100) * 10;
4878
4879         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4880                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4881
4882         /* Use the max of the register settings and vbt. If both are
4883          * unset, fall back to the spec limits. */
4884 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4885                                        spec.field : \
4886                                        max(cur.field, vbt.field))
4887         assign_final(t1_t3);
4888         assign_final(t8);
4889         assign_final(t9);
4890         assign_final(t10);
4891         assign_final(t11_t12);
4892 #undef assign_final
4893
4894 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4895         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4896         intel_dp->backlight_on_delay = get_delay(t8);
4897         intel_dp->backlight_off_delay = get_delay(t9);
4898         intel_dp->panel_power_down_delay = get_delay(t10);
4899         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4900 #undef get_delay
4901
4902         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4903                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4904                       intel_dp->panel_power_cycle_delay);
4905
4906         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4907                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4908 }
4909
4910 static void
4911 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4912                                               struct intel_dp *intel_dp)
4913 {
4914         struct drm_i915_private *dev_priv = dev->dev_private;
4915         u32 pp_on, pp_off, pp_div, port_sel = 0;
4916         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4917         int pp_on_reg, pp_off_reg, pp_div_reg;
4918         enum port port = dp_to_dig_port(intel_dp)->port;
4919         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4920
4921         lockdep_assert_held(&dev_priv->pps_mutex);
4922
4923         if (HAS_PCH_SPLIT(dev)) {
4924                 pp_on_reg = PCH_PP_ON_DELAYS;
4925                 pp_off_reg = PCH_PP_OFF_DELAYS;
4926                 pp_div_reg = PCH_PP_DIVISOR;
4927         } else {
4928                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4929
4930                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4931                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4932                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4933         }
4934
4935         /*
4936          * And finally store the new values in the power sequencer. The
4937          * backlight delays are set to 1 because we do manual waits on them. For
4938          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4939          * we'll end up waiting for the backlight off delay twice: once when we
4940          * do the manual sleep, and once when we disable the panel and wait for
4941          * the PP_STATUS bit to become zero.
4942          */
4943         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4944                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4945         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4946                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4947         /* Compute the divisor for the pp clock, simply match the Bspec
4948          * formula. */
4949         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4950         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4951                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
4952
4953         /* Haswell doesn't have any port selection bits for the panel
4954          * power sequencer any more. */
4955         if (IS_VALLEYVIEW(dev)) {
4956                 port_sel = PANEL_PORT_SELECT_VLV(port);
4957         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4958                 if (port == PORT_A)
4959                         port_sel = PANEL_PORT_SELECT_DPA;
4960                 else
4961                         port_sel = PANEL_PORT_SELECT_DPD;
4962         }
4963
4964         pp_on |= port_sel;
4965
4966         I915_WRITE(pp_on_reg, pp_on);
4967         I915_WRITE(pp_off_reg, pp_off);
4968         I915_WRITE(pp_div_reg, pp_div);
4969
4970         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4971                       I915_READ(pp_on_reg),
4972                       I915_READ(pp_off_reg),
4973                       I915_READ(pp_div_reg));
4974 }
4975
4976 /**
4977  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4978  * @dev: DRM device
4979  * @refresh_rate: RR to be programmed
4980  *
4981  * This function gets called when refresh rate (RR) has to be changed from
4982  * one frequency to another. Switches can be between high and low RR
4983  * supported by the panel or to any other RR based on media playback (in
4984  * this case, RR value needs to be passed from user space).
4985  *
4986  * The caller of this function needs to take a lock on dev_priv->drrs.
4987  */
4988 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4989 {
4990         struct drm_i915_private *dev_priv = dev->dev_private;
4991         struct intel_encoder *encoder;
4992         struct intel_digital_port *dig_port = NULL;
4993         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4994         struct intel_crtc_state *config = NULL;
4995         struct intel_crtc *intel_crtc = NULL;
4996         u32 reg, val;
4997         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4998
4999         if (refresh_rate <= 0) {
5000                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5001                 return;
5002         }
5003
5004         if (intel_dp == NULL) {
5005                 DRM_DEBUG_KMS("DRRS not supported.\n");
5006                 return;
5007         }
5008
5009         /*
5010          * FIXME: This needs proper synchronization with psr state for some
5011          * platforms that cannot have PSR and DRRS enabled at the same time.
5012          */
5013
5014         dig_port = dp_to_dig_port(intel_dp);
5015         encoder = &dig_port->base;
5016         intel_crtc = to_intel_crtc(encoder->base.crtc);
5017
5018         if (!intel_crtc) {
5019                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5020                 return;
5021         }
5022
5023         config = intel_crtc->config;
5024
5025         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5026                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5027                 return;
5028         }
5029
5030         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5031                         refresh_rate)
5032                 index = DRRS_LOW_RR;
5033
5034         if (index == dev_priv->drrs.refresh_rate_type) {
5035                 DRM_DEBUG_KMS(
5036                         "DRRS requested for previously set RR...ignoring\n");
5037                 return;
5038         }
5039
5040         if (!intel_crtc->active) {
5041                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5042                 return;
5043         }
5044
5045         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5046                 switch (index) {
5047                 case DRRS_HIGH_RR:
5048                         intel_dp_set_m_n(intel_crtc, M1_N1);
5049                         break;
5050                 case DRRS_LOW_RR:
5051                         intel_dp_set_m_n(intel_crtc, M2_N2);
5052                         break;
5053                 case DRRS_MAX_RR:
5054                 default:
5055                         DRM_ERROR("Unsupported refreshrate type\n");
5056                 }
5057         } else if (INTEL_INFO(dev)->gen > 6) {
5058                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5059                 val = I915_READ(reg);
5060
5061                 if (index > DRRS_HIGH_RR) {
5062                         if (IS_VALLEYVIEW(dev))
5063                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5064                         else
5065                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5066                 } else {
5067                         if (IS_VALLEYVIEW(dev))
5068                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5069                         else
5070                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5071                 }
5072                 I915_WRITE(reg, val);
5073         }
5074
5075         dev_priv->drrs.refresh_rate_type = index;
5076
5077         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5078 }
5079
5080 /**
5081  * intel_edp_drrs_enable - init drrs struct if supported
5082  * @intel_dp: DP struct
5083  *
5084  * Initializes frontbuffer_bits and drrs.dp
5085  */
5086 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5087 {
5088         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5089         struct drm_i915_private *dev_priv = dev->dev_private;
5090         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5091         struct drm_crtc *crtc = dig_port->base.base.crtc;
5092         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5093
5094         if (!intel_crtc->config->has_drrs) {
5095                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5096                 return;
5097         }
5098
5099         mutex_lock(&dev_priv->drrs.mutex);
5100         if (WARN_ON(dev_priv->drrs.dp)) {
5101                 DRM_ERROR("DRRS already enabled\n");
5102                 goto unlock;
5103         }
5104
5105         dev_priv->drrs.busy_frontbuffer_bits = 0;
5106
5107         dev_priv->drrs.dp = intel_dp;
5108
5109 unlock:
5110         mutex_unlock(&dev_priv->drrs.mutex);
5111 }
5112
5113 /**
5114  * intel_edp_drrs_disable - Disable DRRS
5115  * @intel_dp: DP struct
5116  *
5117  */
5118 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5119 {
5120         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5121         struct drm_i915_private *dev_priv = dev->dev_private;
5122         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5123         struct drm_crtc *crtc = dig_port->base.base.crtc;
5124         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5125
5126         if (!intel_crtc->config->has_drrs)
5127                 return;
5128
5129         mutex_lock(&dev_priv->drrs.mutex);
5130         if (!dev_priv->drrs.dp) {
5131                 mutex_unlock(&dev_priv->drrs.mutex);
5132                 return;
5133         }
5134
5135         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5136                 intel_dp_set_drrs_state(dev_priv->dev,
5137                         intel_dp->attached_connector->panel.
5138                         fixed_mode->vrefresh);
5139
5140         dev_priv->drrs.dp = NULL;
5141         mutex_unlock(&dev_priv->drrs.mutex);
5142
5143         cancel_delayed_work_sync(&dev_priv->drrs.work);
5144 }
5145
5146 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5147 {
5148         struct drm_i915_private *dev_priv =
5149                 container_of(work, typeof(*dev_priv), drrs.work.work);
5150         struct intel_dp *intel_dp;
5151
5152         mutex_lock(&dev_priv->drrs.mutex);
5153
5154         intel_dp = dev_priv->drrs.dp;
5155
5156         if (!intel_dp)
5157                 goto unlock;
5158
5159         /*
5160          * The delayed work can race with an invalidate hence we need to
5161          * recheck.
5162          */
5163
5164         if (dev_priv->drrs.busy_frontbuffer_bits)
5165                 goto unlock;
5166
5167         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5168                 intel_dp_set_drrs_state(dev_priv->dev,
5169                         intel_dp->attached_connector->panel.
5170                         downclock_mode->vrefresh);
5171
5172 unlock:
5173
5174         mutex_unlock(&dev_priv->drrs.mutex);
5175 }
5176
5177 /**
5178  * intel_edp_drrs_invalidate - Invalidate DRRS
5179  * @dev: DRM device
5180  * @frontbuffer_bits: frontbuffer plane tracking bits
5181  *
5182  * When there is a disturbance on screen (due to cursor movement/time
5183  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5184  * high RR.
5185  *
5186  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5187  */
5188 void intel_edp_drrs_invalidate(struct drm_device *dev,
5189                 unsigned frontbuffer_bits)
5190 {
5191         struct drm_i915_private *dev_priv = dev->dev_private;
5192         struct drm_crtc *crtc;
5193         enum pipe pipe;
5194
5195         if (!dev_priv->drrs.dp)
5196                 return;
5197
5198         cancel_delayed_work_sync(&dev_priv->drrs.work);
5199
5200         mutex_lock(&dev_priv->drrs.mutex);
5201         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5202         pipe = to_intel_crtc(crtc)->pipe;
5203
5204         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5205                 intel_dp_set_drrs_state(dev_priv->dev,
5206                                 dev_priv->drrs.dp->attached_connector->panel.
5207                                 fixed_mode->vrefresh);
5208         }
5209
5210         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5211
5212         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5213         mutex_unlock(&dev_priv->drrs.mutex);
5214 }
5215
5216 /**
5217  * intel_edp_drrs_flush - Flush DRRS
5218  * @dev: DRM device
5219  * @frontbuffer_bits: frontbuffer plane tracking bits
5220  *
5221  * When there is no movement on screen, DRRS work can be scheduled.
5222  * This DRRS work is responsible for setting relevant registers after a
5223  * timeout of 1 second.
5224  *
5225  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5226  */
5227 void intel_edp_drrs_flush(struct drm_device *dev,
5228                 unsigned frontbuffer_bits)
5229 {
5230         struct drm_i915_private *dev_priv = dev->dev_private;
5231         struct drm_crtc *crtc;
5232         enum pipe pipe;
5233
5234         if (!dev_priv->drrs.dp)
5235                 return;
5236
5237         cancel_delayed_work_sync(&dev_priv->drrs.work);
5238
5239         mutex_lock(&dev_priv->drrs.mutex);
5240         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5241         pipe = to_intel_crtc(crtc)->pipe;
5242         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5243
5244         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5245                         !dev_priv->drrs.busy_frontbuffer_bits)
5246                 schedule_delayed_work(&dev_priv->drrs.work,
5247                                 msecs_to_jiffies(1000));
5248         mutex_unlock(&dev_priv->drrs.mutex);
5249 }
5250
5251 /**
5252  * DOC: Display Refresh Rate Switching (DRRS)
5253  *
5254  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5255  * which enables swtching between low and high refresh rates,
5256  * dynamically, based on the usage scenario. This feature is applicable
5257  * for internal panels.
5258  *
5259  * Indication that the panel supports DRRS is given by the panel EDID, which
5260  * would list multiple refresh rates for one resolution.
5261  *
5262  * DRRS is of 2 types - static and seamless.
5263  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5264  * (may appear as a blink on screen) and is used in dock-undock scenario.
5265  * Seamless DRRS involves changing RR without any visual effect to the user
5266  * and can be used during normal system usage. This is done by programming
5267  * certain registers.
5268  *
5269  * Support for static/seamless DRRS may be indicated in the VBT based on
5270  * inputs from the panel spec.
5271  *
5272  * DRRS saves power by switching to low RR based on usage scenarios.
5273  *
5274  * eDP DRRS:-
5275  *        The implementation is based on frontbuffer tracking implementation.
5276  * When there is a disturbance on the screen triggered by user activity or a
5277  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5278  * When there is no movement on screen, after a timeout of 1 second, a switch
5279  * to low RR is made.
5280  *        For integration with frontbuffer tracking code,
5281  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5282  *
5283  * DRRS can be further extended to support other internal panels and also
5284  * the scenario of video playback wherein RR is set based on the rate
5285  * requested by userspace.
5286  */
5287
5288 /**
5289  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5290  * @intel_connector: eDP connector
5291  * @fixed_mode: preferred mode of panel
5292  *
5293  * This function is  called only once at driver load to initialize basic
5294  * DRRS stuff.
5295  *
5296  * Returns:
5297  * Downclock mode if panel supports it, else return NULL.
5298  * DRRS support is determined by the presence of downclock mode (apart
5299  * from VBT setting).
5300  */
5301 static struct drm_display_mode *
5302 intel_dp_drrs_init(struct intel_connector *intel_connector,
5303                 struct drm_display_mode *fixed_mode)
5304 {
5305         struct drm_connector *connector = &intel_connector->base;
5306         struct drm_device *dev = connector->dev;
5307         struct drm_i915_private *dev_priv = dev->dev_private;
5308         struct drm_display_mode *downclock_mode = NULL;
5309
5310         if (INTEL_INFO(dev)->gen <= 6) {
5311                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5312                 return NULL;
5313         }
5314
5315         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5316                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5317                 return NULL;
5318         }
5319
5320         downclock_mode = intel_find_panel_downclock
5321                                         (dev, fixed_mode, connector);
5322
5323         if (!downclock_mode) {
5324                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5325                 return NULL;
5326         }
5327
5328         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5329
5330         mutex_init(&dev_priv->drrs.mutex);
5331
5332         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5333
5334         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5335         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5336         return downclock_mode;
5337 }
5338
5339 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5340                                      struct intel_connector *intel_connector)
5341 {
5342         struct drm_connector *connector = &intel_connector->base;
5343         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5344         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5345         struct drm_device *dev = intel_encoder->base.dev;
5346         struct drm_i915_private *dev_priv = dev->dev_private;
5347         struct drm_display_mode *fixed_mode = NULL;
5348         struct drm_display_mode *downclock_mode = NULL;
5349         bool has_dpcd;
5350         struct drm_display_mode *scan;
5351         struct edid *edid;
5352         enum pipe pipe = INVALID_PIPE;
5353
5354         if (!is_edp(intel_dp))
5355                 return true;
5356
5357         pps_lock(intel_dp);
5358         intel_edp_panel_vdd_sanitize(intel_dp);
5359         pps_unlock(intel_dp);
5360
5361         /* Cache DPCD and EDID for edp. */
5362         has_dpcd = intel_dp_get_dpcd(intel_dp);
5363
5364         if (has_dpcd) {
5365                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5366                         dev_priv->no_aux_handshake =
5367                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5368                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5369         } else {
5370                 /* if this fails, presume the device is a ghost */
5371                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5372                 return false;
5373         }
5374
5375         /* We now know it's not a ghost, init power sequence regs. */
5376         pps_lock(intel_dp);
5377         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5378         pps_unlock(intel_dp);
5379
5380         mutex_lock(&dev->mode_config.mutex);
5381         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5382         if (edid) {
5383                 if (drm_add_edid_modes(connector, edid)) {
5384                         drm_mode_connector_update_edid_property(connector,
5385                                                                 edid);
5386                         drm_edid_to_eld(connector, edid);
5387                 } else {
5388                         kfree(edid);
5389                         edid = ERR_PTR(-EINVAL);
5390                 }
5391         } else {
5392                 edid = ERR_PTR(-ENOENT);
5393         }
5394         intel_connector->edid = edid;
5395
5396         /* prefer fixed mode from EDID if available */
5397         list_for_each_entry(scan, &connector->probed_modes, head) {
5398                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5399                         fixed_mode = drm_mode_duplicate(dev, scan);
5400                         downclock_mode = intel_dp_drrs_init(
5401                                                 intel_connector, fixed_mode);
5402                         break;
5403                 }
5404         }
5405
5406         /* fallback to VBT if available for eDP */
5407         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5408                 fixed_mode = drm_mode_duplicate(dev,
5409                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5410                 if (fixed_mode)
5411                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5412         }
5413         mutex_unlock(&dev->mode_config.mutex);
5414
5415         if (IS_VALLEYVIEW(dev)) {
5416                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5417                 register_reboot_notifier(&intel_dp->edp_notifier);
5418
5419                 /*
5420                  * Figure out the current pipe for the initial backlight setup.
5421                  * If the current pipe isn't valid, try the PPS pipe, and if that
5422                  * fails just assume pipe A.
5423                  */
5424                 if (IS_CHERRYVIEW(dev))
5425                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5426                 else
5427                         pipe = PORT_TO_PIPE(intel_dp->DP);
5428
5429                 if (pipe != PIPE_A && pipe != PIPE_B)
5430                         pipe = intel_dp->pps_pipe;
5431
5432                 if (pipe != PIPE_A && pipe != PIPE_B)
5433                         pipe = PIPE_A;
5434
5435                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5436                               pipe_name(pipe));
5437         }
5438
5439         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5440         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5441         intel_panel_setup_backlight(connector, pipe);
5442
5443         return true;
5444 }
5445
5446 bool
5447 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5448                         struct intel_connector *intel_connector)
5449 {
5450         struct drm_connector *connector = &intel_connector->base;
5451         struct intel_dp *intel_dp = &intel_dig_port->dp;
5452         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5453         struct drm_device *dev = intel_encoder->base.dev;
5454         struct drm_i915_private *dev_priv = dev->dev_private;
5455         enum port port = intel_dig_port->port;
5456         int type;
5457
5458         intel_dp->pps_pipe = INVALID_PIPE;
5459
5460         /* intel_dp vfuncs */
5461         if (INTEL_INFO(dev)->gen >= 9)
5462                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5463         else if (IS_VALLEYVIEW(dev))
5464                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5465         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5466                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5467         else if (HAS_PCH_SPLIT(dev))
5468                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5469         else
5470                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5471
5472         if (INTEL_INFO(dev)->gen >= 9)
5473                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5474         else
5475                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5476
5477         /* Preserve the current hw state. */
5478         intel_dp->DP = I915_READ(intel_dp->output_reg);
5479         intel_dp->attached_connector = intel_connector;
5480
5481         if (intel_dp_is_edp(dev, port))
5482                 type = DRM_MODE_CONNECTOR_eDP;
5483         else
5484                 type = DRM_MODE_CONNECTOR_DisplayPort;
5485
5486         /*
5487          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5488          * for DP the encoder type can be set by the caller to
5489          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5490          */
5491         if (type == DRM_MODE_CONNECTOR_eDP)
5492                 intel_encoder->type = INTEL_OUTPUT_EDP;
5493
5494         /* eDP only on port B and/or C on vlv/chv */
5495         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5496                     port != PORT_B && port != PORT_C))
5497                 return false;
5498
5499         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5500                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5501                         port_name(port));
5502
5503         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5504         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5505
5506         connector->interlace_allowed = true;
5507         connector->doublescan_allowed = 0;
5508
5509         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5510                           edp_panel_vdd_work);
5511
5512         intel_connector_attach_encoder(intel_connector, intel_encoder);
5513         drm_connector_register(connector);
5514
5515         if (HAS_DDI(dev))
5516                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5517         else
5518                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5519         intel_connector->unregister = intel_dp_connector_unregister;
5520
5521         /* Set up the hotplug pin. */
5522         switch (port) {
5523         case PORT_A:
5524                 intel_encoder->hpd_pin = HPD_PORT_A;
5525                 break;
5526         case PORT_B:
5527                 intel_encoder->hpd_pin = HPD_PORT_B;
5528                 break;
5529         case PORT_C:
5530                 intel_encoder->hpd_pin = HPD_PORT_C;
5531                 break;
5532         case PORT_D:
5533                 intel_encoder->hpd_pin = HPD_PORT_D;
5534                 break;
5535         default:
5536                 BUG();
5537         }
5538
5539         if (is_edp(intel_dp)) {
5540                 pps_lock(intel_dp);
5541                 intel_dp_init_panel_power_timestamps(intel_dp);
5542                 if (IS_VALLEYVIEW(dev))
5543                         vlv_initial_power_sequencer_setup(intel_dp);
5544                 else
5545                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5546                 pps_unlock(intel_dp);
5547         }
5548
5549         intel_dp_aux_init(intel_dp, intel_connector);
5550
5551         /* init MST on ports that can support it */
5552         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5553                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5554                         intel_dp_mst_encoder_init(intel_dig_port,
5555                                                   intel_connector->base.base.id);
5556                 }
5557         }
5558
5559         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5560                 drm_dp_aux_unregister(&intel_dp->aux);
5561                 if (is_edp(intel_dp)) {
5562                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5563                         /*
5564                          * vdd might still be enabled do to the delayed vdd off.
5565                          * Make sure vdd is actually turned off here.
5566                          */
5567                         pps_lock(intel_dp);
5568                         edp_panel_vdd_off_sync(intel_dp);
5569                         pps_unlock(intel_dp);
5570                 }
5571                 drm_connector_unregister(connector);
5572                 drm_connector_cleanup(connector);
5573                 return false;
5574         }
5575
5576         intel_dp_add_properties(intel_dp, connector);
5577
5578         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5579          * 0xd.  Failure to do so will result in spurious interrupts being
5580          * generated on the port when a cable is not attached.
5581          */
5582         if (IS_G4X(dev) && !IS_GM45(dev)) {
5583                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5584                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5585         }
5586
5587         return true;
5588 }
5589
5590 void
5591 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5592 {
5593         struct drm_i915_private *dev_priv = dev->dev_private;
5594         struct intel_digital_port *intel_dig_port;
5595         struct intel_encoder *intel_encoder;
5596         struct drm_encoder *encoder;
5597         struct intel_connector *intel_connector;
5598
5599         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5600         if (!intel_dig_port)
5601                 return;
5602
5603         intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5604         if (!intel_connector) {
5605                 kfree(intel_dig_port);
5606                 return;
5607         }
5608
5609         intel_encoder = &intel_dig_port->base;
5610         encoder = &intel_encoder->base;
5611
5612         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5613                          DRM_MODE_ENCODER_TMDS);
5614
5615         intel_encoder->compute_config = intel_dp_compute_config;
5616         intel_encoder->disable = intel_disable_dp;
5617         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5618         intel_encoder->get_config = intel_dp_get_config;
5619         intel_encoder->suspend = intel_dp_encoder_suspend;
5620         if (IS_CHERRYVIEW(dev)) {
5621                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5622                 intel_encoder->pre_enable = chv_pre_enable_dp;
5623                 intel_encoder->enable = vlv_enable_dp;
5624                 intel_encoder->post_disable = chv_post_disable_dp;
5625         } else if (IS_VALLEYVIEW(dev)) {
5626                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5627                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5628                 intel_encoder->enable = vlv_enable_dp;
5629                 intel_encoder->post_disable = vlv_post_disable_dp;
5630         } else {
5631                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5632                 intel_encoder->enable = g4x_enable_dp;
5633                 if (INTEL_INFO(dev)->gen >= 5)
5634                         intel_encoder->post_disable = ilk_post_disable_dp;
5635         }
5636
5637         intel_dig_port->port = port;
5638         intel_dig_port->dp.output_reg = output_reg;
5639
5640         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5641         if (IS_CHERRYVIEW(dev)) {
5642                 if (port == PORT_D)
5643                         intel_encoder->crtc_mask = 1 << 2;
5644                 else
5645                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5646         } else {
5647                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5648         }
5649         intel_encoder->cloneable = 0;
5650         intel_encoder->hot_plug = intel_dp_hot_plug;
5651
5652         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5653         dev_priv->hpd_irq_port[port] = intel_dig_port;
5654
5655         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5656                 drm_encoder_cleanup(encoder);
5657                 kfree(intel_dig_port);
5658                 kfree(intel_connector);
5659         }
5660 }
5661
5662 void intel_dp_mst_suspend(struct drm_device *dev)
5663 {
5664         struct drm_i915_private *dev_priv = dev->dev_private;
5665         int i;
5666
5667         /* disable MST */
5668         for (i = 0; i < I915_MAX_PORTS; i++) {
5669                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5670                 if (!intel_dig_port)
5671                         continue;
5672
5673                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5674                         if (!intel_dig_port->dp.can_mst)
5675                                 continue;
5676                         if (intel_dig_port->dp.is_mst)
5677                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5678                 }
5679         }
5680 }
5681
5682 void intel_dp_mst_resume(struct drm_device *dev)
5683 {
5684         struct drm_i915_private *dev_priv = dev->dev_private;
5685         int i;
5686
5687         for (i = 0; i < I915_MAX_PORTS; i++) {
5688                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5689                 if (!intel_dig_port)
5690                         continue;
5691                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5692                         int ret;
5693
5694                         if (!intel_dig_port->dp.can_mst)
5695                                 continue;
5696
5697                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5698                         if (ret != 0) {
5699                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5700                         }
5701                 }
5702         }
5703 }