Merge tag 'drm-intel-fixes-2015-05-08' of git://anongit.freedesktop.org/drm-intel...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 struct dp_link_dpll {
45         int link_bw;
46         struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50         { DP_LINK_BW_1_62,
51                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52         { DP_LINK_BW_2_7,
53                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57         { DP_LINK_BW_1_62,
58                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59         { DP_LINK_BW_2_7,
60                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64         { DP_LINK_BW_1_62,
65                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66         { DP_LINK_BW_2_7,
67                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71  * CHV supports eDP 1.4 that have  more link rates.
72  * Below only provides the fixed rate but exclude variable rate.
73  */
74 static const struct dp_link_dpll chv_dpll[] = {
75         /*
76          * CHV requires to program fractional division for m2.
77          * m2 is stored in fixed point format using formula below
78          * (m2_int << 22) | m2_fraction
79          */
80         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
81                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
83                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
85                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89                                   324000, 432000, 540000 };
90 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91                                  243000, 270000, 324000, 405000,
92                                  420000, 432000, 540000 };
93 static const int default_rates[] = { 162000, 270000, 540000 };
94
95 /**
96  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97  * @intel_dp: DP struct
98  *
99  * If a CPU or PCH DP output is attached to an eDP panel, this function
100  * will return true, and false otherwise.
101  */
102 static bool is_edp(struct intel_dp *intel_dp)
103 {
104         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
107 }
108
109 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
110 {
111         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113         return intel_dig_port->base.base.dev;
114 }
115
116 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117 {
118         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
119 }
120
121 static void intel_dp_link_down(struct intel_dp *intel_dp);
122 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
123 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
124 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
125 static void vlv_steal_power_sequencer(struct drm_device *dev,
126                                       enum pipe pipe);
127
128 static int
129 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
130 {
131         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
132
133         switch (max_link_bw) {
134         case DP_LINK_BW_1_62:
135         case DP_LINK_BW_2_7:
136         case DP_LINK_BW_5_4:
137                 break;
138         default:
139                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140                      max_link_bw);
141                 max_link_bw = DP_LINK_BW_1_62;
142                 break;
143         }
144         return max_link_bw;
145 }
146
147 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148 {
149         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150         struct drm_device *dev = intel_dig_port->base.base.dev;
151         u8 source_max, sink_max;
152
153         source_max = 4;
154         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156                 source_max = 2;
157
158         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160         return min(source_max, sink_max);
161 }
162
163 /*
164  * The units on the numbers in the next two are... bizarre.  Examples will
165  * make it clearer; this one parallels an example in the eDP spec.
166  *
167  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168  *
169  *     270000 * 1 * 8 / 10 == 216000
170  *
171  * The actual data capacity of that configuration is 2.16Gbit/s, so the
172  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
173  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174  * 119000.  At 18bpp that's 2142000 kilobits per second.
175  *
176  * Thus the strange-looking division by 10 in intel_dp_link_required, to
177  * get the result in decakilobits instead of kilobits.
178  */
179
180 static int
181 intel_dp_link_required(int pixel_clock, int bpp)
182 {
183         return (pixel_clock * bpp + 9) / 10;
184 }
185
186 static int
187 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188 {
189         return (max_link_clock * max_lanes * 8) / 10;
190 }
191
192 static enum drm_mode_status
193 intel_dp_mode_valid(struct drm_connector *connector,
194                     struct drm_display_mode *mode)
195 {
196         struct intel_dp *intel_dp = intel_attached_dp(connector);
197         struct intel_connector *intel_connector = to_intel_connector(connector);
198         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
199         int target_clock = mode->clock;
200         int max_rate, mode_rate, max_lanes, max_link_clock;
201
202         if (is_edp(intel_dp) && fixed_mode) {
203                 if (mode->hdisplay > fixed_mode->hdisplay)
204                         return MODE_PANEL;
205
206                 if (mode->vdisplay > fixed_mode->vdisplay)
207                         return MODE_PANEL;
208
209                 target_clock = fixed_mode->clock;
210         }
211
212         max_link_clock = intel_dp_max_link_rate(intel_dp);
213         max_lanes = intel_dp_max_lane_count(intel_dp);
214
215         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216         mode_rate = intel_dp_link_required(target_clock, 18);
217
218         if (mode_rate > max_rate)
219                 return MODE_CLOCK_HIGH;
220
221         if (mode->clock < 10000)
222                 return MODE_CLOCK_LOW;
223
224         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225                 return MODE_H_ILLEGAL;
226
227         return MODE_OK;
228 }
229
230 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
231 {
232         int     i;
233         uint32_t v = 0;
234
235         if (src_bytes > 4)
236                 src_bytes = 4;
237         for (i = 0; i < src_bytes; i++)
238                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239         return v;
240 }
241
242 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
243 {
244         int i;
245         if (dst_bytes > 4)
246                 dst_bytes = 4;
247         for (i = 0; i < dst_bytes; i++)
248                 dst[i] = src >> ((3-i) * 8);
249 }
250
251 /* hrawclock is 1/4 the FSB frequency */
252 static int
253 intel_hrawclk(struct drm_device *dev)
254 {
255         struct drm_i915_private *dev_priv = dev->dev_private;
256         uint32_t clkcfg;
257
258         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259         if (IS_VALLEYVIEW(dev))
260                 return 200;
261
262         clkcfg = I915_READ(CLKCFG);
263         switch (clkcfg & CLKCFG_FSB_MASK) {
264         case CLKCFG_FSB_400:
265                 return 100;
266         case CLKCFG_FSB_533:
267                 return 133;
268         case CLKCFG_FSB_667:
269                 return 166;
270         case CLKCFG_FSB_800:
271                 return 200;
272         case CLKCFG_FSB_1067:
273                 return 266;
274         case CLKCFG_FSB_1333:
275                 return 333;
276         /* these two are just a guess; one of them might be right */
277         case CLKCFG_FSB_1600:
278         case CLKCFG_FSB_1600_ALT:
279                 return 400;
280         default:
281                 return 133;
282         }
283 }
284
285 static void
286 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
287                                     struct intel_dp *intel_dp);
288 static void
289 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290                                               struct intel_dp *intel_dp);
291
292 static void pps_lock(struct intel_dp *intel_dp)
293 {
294         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295         struct intel_encoder *encoder = &intel_dig_port->base;
296         struct drm_device *dev = encoder->base.dev;
297         struct drm_i915_private *dev_priv = dev->dev_private;
298         enum intel_display_power_domain power_domain;
299
300         /*
301          * See vlv_power_sequencer_reset() why we need
302          * a power domain reference here.
303          */
304         power_domain = intel_display_port_power_domain(encoder);
305         intel_display_power_get(dev_priv, power_domain);
306
307         mutex_lock(&dev_priv->pps_mutex);
308 }
309
310 static void pps_unlock(struct intel_dp *intel_dp)
311 {
312         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313         struct intel_encoder *encoder = &intel_dig_port->base;
314         struct drm_device *dev = encoder->base.dev;
315         struct drm_i915_private *dev_priv = dev->dev_private;
316         enum intel_display_power_domain power_domain;
317
318         mutex_unlock(&dev_priv->pps_mutex);
319
320         power_domain = intel_display_port_power_domain(encoder);
321         intel_display_power_put(dev_priv, power_domain);
322 }
323
324 static void
325 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326 {
327         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328         struct drm_device *dev = intel_dig_port->base.base.dev;
329         struct drm_i915_private *dev_priv = dev->dev_private;
330         enum pipe pipe = intel_dp->pps_pipe;
331         bool pll_enabled;
332         uint32_t DP;
333
334         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336                  pipe_name(pipe), port_name(intel_dig_port->port)))
337                 return;
338
339         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340                       pipe_name(pipe), port_name(intel_dig_port->port));
341
342         /* Preserve the BIOS-computed detected bit. This is
343          * supposed to be read-only.
344          */
345         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347         DP |= DP_PORT_WIDTH(1);
348         DP |= DP_LINK_TRAIN_PAT_1;
349
350         if (IS_CHERRYVIEW(dev))
351                 DP |= DP_PIPE_SELECT_CHV(pipe);
352         else if (pipe == PIPE_B)
353                 DP |= DP_PIPEB_SELECT;
354
355         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357         /*
358          * The DPLL for the pipe must be enabled for this to work.
359          * So enable temporarily it if it's not already enabled.
360          */
361         if (!pll_enabled)
362                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
365         /*
366          * Similar magic as in intel_dp_enable_port().
367          * We _must_ do this port enable + disable trick
368          * to make this power seqeuencer lock onto the port.
369          * Otherwise even VDD force bit won't work.
370          */
371         I915_WRITE(intel_dp->output_reg, DP);
372         POSTING_READ(intel_dp->output_reg);
373
374         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375         POSTING_READ(intel_dp->output_reg);
376
377         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378         POSTING_READ(intel_dp->output_reg);
379
380         if (!pll_enabled)
381                 vlv_force_pll_off(dev, pipe);
382 }
383
384 static enum pipe
385 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386 {
387         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
388         struct drm_device *dev = intel_dig_port->base.base.dev;
389         struct drm_i915_private *dev_priv = dev->dev_private;
390         struct intel_encoder *encoder;
391         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
392         enum pipe pipe;
393
394         lockdep_assert_held(&dev_priv->pps_mutex);
395
396         /* We should never land here with regular DP ports */
397         WARN_ON(!is_edp(intel_dp));
398
399         if (intel_dp->pps_pipe != INVALID_PIPE)
400                 return intel_dp->pps_pipe;
401
402         /*
403          * We don't have power sequencer currently.
404          * Pick one that's not used by other ports.
405          */
406         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407                             base.head) {
408                 struct intel_dp *tmp;
409
410                 if (encoder->type != INTEL_OUTPUT_EDP)
411                         continue;
412
413                 tmp = enc_to_intel_dp(&encoder->base);
414
415                 if (tmp->pps_pipe != INVALID_PIPE)
416                         pipes &= ~(1 << tmp->pps_pipe);
417         }
418
419         /*
420          * Didn't find one. This should not happen since there
421          * are two power sequencers and up to two eDP ports.
422          */
423         if (WARN_ON(pipes == 0))
424                 pipe = PIPE_A;
425         else
426                 pipe = ffs(pipes) - 1;
427
428         vlv_steal_power_sequencer(dev, pipe);
429         intel_dp->pps_pipe = pipe;
430
431         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432                       pipe_name(intel_dp->pps_pipe),
433                       port_name(intel_dig_port->port));
434
435         /* init power sequencer on this pipe and port */
436         intel_dp_init_panel_power_sequencer(dev, intel_dp);
437         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
438
439         /*
440          * Even vdd force doesn't work until we've made
441          * the power sequencer lock in on the port.
442          */
443         vlv_power_sequencer_kick(intel_dp);
444
445         return intel_dp->pps_pipe;
446 }
447
448 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449                                enum pipe pipe);
450
451 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452                                enum pipe pipe)
453 {
454         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455 }
456
457 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458                                 enum pipe pipe)
459 {
460         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461 }
462
463 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464                          enum pipe pipe)
465 {
466         return true;
467 }
468
469 static enum pipe
470 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471                      enum port port,
472                      vlv_pipe_check pipe_check)
473 {
474         enum pipe pipe;
475
476         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478                         PANEL_PORT_SELECT_MASK;
479
480                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481                         continue;
482
483                 if (!pipe_check(dev_priv, pipe))
484                         continue;
485
486                 return pipe;
487         }
488
489         return INVALID_PIPE;
490 }
491
492 static void
493 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494 {
495         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496         struct drm_device *dev = intel_dig_port->base.base.dev;
497         struct drm_i915_private *dev_priv = dev->dev_private;
498         enum port port = intel_dig_port->port;
499
500         lockdep_assert_held(&dev_priv->pps_mutex);
501
502         /* try to find a pipe with this port selected */
503         /* first pick one where the panel is on */
504         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505                                                   vlv_pipe_has_pp_on);
506         /* didn't find one? pick one where vdd is on */
507         if (intel_dp->pps_pipe == INVALID_PIPE)
508                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509                                                           vlv_pipe_has_vdd_on);
510         /* didn't find one? pick one with just the correct port */
511         if (intel_dp->pps_pipe == INVALID_PIPE)
512                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513                                                           vlv_pipe_any);
514
515         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516         if (intel_dp->pps_pipe == INVALID_PIPE) {
517                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518                               port_name(port));
519                 return;
520         }
521
522         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523                       port_name(port), pipe_name(intel_dp->pps_pipe));
524
525         intel_dp_init_panel_power_sequencer(dev, intel_dp);
526         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
527 }
528
529 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530 {
531         struct drm_device *dev = dev_priv->dev;
532         struct intel_encoder *encoder;
533
534         if (WARN_ON(!IS_VALLEYVIEW(dev)))
535                 return;
536
537         /*
538          * We can't grab pps_mutex here due to deadlock with power_domain
539          * mutex when power_domain functions are called while holding pps_mutex.
540          * That also means that in order to use pps_pipe the code needs to
541          * hold both a power domain reference and pps_mutex, and the power domain
542          * reference get/put must be done while _not_ holding pps_mutex.
543          * pps_{lock,unlock}() do these steps in the correct order, so one
544          * should use them always.
545          */
546
547         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548                 struct intel_dp *intel_dp;
549
550                 if (encoder->type != INTEL_OUTPUT_EDP)
551                         continue;
552
553                 intel_dp = enc_to_intel_dp(&encoder->base);
554                 intel_dp->pps_pipe = INVALID_PIPE;
555         }
556 }
557
558 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559 {
560         struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562         if (HAS_PCH_SPLIT(dev))
563                 return PCH_PP_CONTROL;
564         else
565                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569 {
570         struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572         if (HAS_PCH_SPLIT(dev))
573                 return PCH_PP_STATUS;
574         else
575                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576 }
577
578 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579    This function only applicable when panel PM state is not to be tracked */
580 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581                               void *unused)
582 {
583         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584                                                  edp_notifier);
585         struct drm_device *dev = intel_dp_to_dev(intel_dp);
586         struct drm_i915_private *dev_priv = dev->dev_private;
587         u32 pp_div;
588         u32 pp_ctrl_reg, pp_div_reg;
589
590         if (!is_edp(intel_dp) || code != SYS_RESTART)
591                 return 0;
592
593         pps_lock(intel_dp);
594
595         if (IS_VALLEYVIEW(dev)) {
596                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
598                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
600                 pp_div = I915_READ(pp_div_reg);
601                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606                 msleep(intel_dp->panel_power_cycle_delay);
607         }
608
609         pps_unlock(intel_dp);
610
611         return 0;
612 }
613
614 static bool edp_have_panel_power(struct intel_dp *intel_dp)
615 {
616         struct drm_device *dev = intel_dp_to_dev(intel_dp);
617         struct drm_i915_private *dev_priv = dev->dev_private;
618
619         lockdep_assert_held(&dev_priv->pps_mutex);
620
621         if (IS_VALLEYVIEW(dev) &&
622             intel_dp->pps_pipe == INVALID_PIPE)
623                 return false;
624
625         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
626 }
627
628 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
629 {
630         struct drm_device *dev = intel_dp_to_dev(intel_dp);
631         struct drm_i915_private *dev_priv = dev->dev_private;
632
633         lockdep_assert_held(&dev_priv->pps_mutex);
634
635         if (IS_VALLEYVIEW(dev) &&
636             intel_dp->pps_pipe == INVALID_PIPE)
637                 return false;
638
639         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
640 }
641
642 static void
643 intel_dp_check_edp(struct intel_dp *intel_dp)
644 {
645         struct drm_device *dev = intel_dp_to_dev(intel_dp);
646         struct drm_i915_private *dev_priv = dev->dev_private;
647
648         if (!is_edp(intel_dp))
649                 return;
650
651         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
652                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
654                               I915_READ(_pp_stat_reg(intel_dp)),
655                               I915_READ(_pp_ctrl_reg(intel_dp)));
656         }
657 }
658
659 static uint32_t
660 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661 {
662         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663         struct drm_device *dev = intel_dig_port->base.base.dev;
664         struct drm_i915_private *dev_priv = dev->dev_private;
665         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
666         uint32_t status;
667         bool done;
668
669 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
670         if (has_aux_irq)
671                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
672                                           msecs_to_jiffies_timeout(10));
673         else
674                 done = wait_for_atomic(C, 10) == 0;
675         if (!done)
676                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677                           has_aux_irq);
678 #undef C
679
680         return status;
681 }
682
683 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
684 {
685         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686         struct drm_device *dev = intel_dig_port->base.base.dev;
687
688         /*
689          * The clock divider is based off the hrawclk, and would like to run at
690          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
691          */
692         return index ? 0 : intel_hrawclk(dev) / 2;
693 }
694
695 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698         struct drm_device *dev = intel_dig_port->base.base.dev;
699
700         if (index)
701                 return 0;
702
703         if (intel_dig_port->port == PORT_A) {
704                 if (IS_GEN6(dev) || IS_GEN7(dev))
705                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
706                 else
707                         return 225; /* eDP input clock at 450Mhz */
708         } else {
709                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710         }
711 }
712
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714 {
715         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716         struct drm_device *dev = intel_dig_port->base.base.dev;
717         struct drm_i915_private *dev_priv = dev->dev_private;
718
719         if (intel_dig_port->port == PORT_A) {
720                 if (index)
721                         return 0;
722                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
723         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724                 /* Workaround for non-ULT HSW */
725                 switch (index) {
726                 case 0: return 63;
727                 case 1: return 72;
728                 default: return 0;
729                 }
730         } else  {
731                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
732         }
733 }
734
735 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736 {
737         return index ? 0 : 100;
738 }
739
740 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741 {
742         /*
743          * SKL doesn't need us to program the AUX clock divider (Hardware will
744          * derive the clock from CDCLK automatically). We still implement the
745          * get_aux_clock_divider vfunc to plug-in into the existing code.
746          */
747         return index ? 0 : 1;
748 }
749
750 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751                                       bool has_aux_irq,
752                                       int send_bytes,
753                                       uint32_t aux_clock_divider)
754 {
755         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756         struct drm_device *dev = intel_dig_port->base.base.dev;
757         uint32_t precharge, timeout;
758
759         if (IS_GEN6(dev))
760                 precharge = 3;
761         else
762                 precharge = 5;
763
764         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766         else
767                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769         return DP_AUX_CH_CTL_SEND_BUSY |
770                DP_AUX_CH_CTL_DONE |
771                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
772                DP_AUX_CH_CTL_TIME_OUT_ERROR |
773                timeout |
774                DP_AUX_CH_CTL_RECEIVE_ERROR |
775                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
777                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
778 }
779
780 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781                                       bool has_aux_irq,
782                                       int send_bytes,
783                                       uint32_t unused)
784 {
785         return DP_AUX_CH_CTL_SEND_BUSY |
786                DP_AUX_CH_CTL_DONE |
787                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788                DP_AUX_CH_CTL_TIME_OUT_ERROR |
789                DP_AUX_CH_CTL_TIME_OUT_1600us |
790                DP_AUX_CH_CTL_RECEIVE_ERROR |
791                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793 }
794
795 static int
796 intel_dp_aux_ch(struct intel_dp *intel_dp,
797                 const uint8_t *send, int send_bytes,
798                 uint8_t *recv, int recv_size)
799 {
800         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801         struct drm_device *dev = intel_dig_port->base.base.dev;
802         struct drm_i915_private *dev_priv = dev->dev_private;
803         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804         uint32_t ch_data = ch_ctl + 4;
805         uint32_t aux_clock_divider;
806         int i, ret, recv_bytes;
807         uint32_t status;
808         int try, clock = 0;
809         bool has_aux_irq = HAS_AUX_IRQ(dev);
810         bool vdd;
811
812         pps_lock(intel_dp);
813
814         /*
815          * We will be called with VDD already enabled for dpcd/edid/oui reads.
816          * In such cases we want to leave VDD enabled and it's up to upper layers
817          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818          * ourselves.
819          */
820         vdd = edp_panel_vdd_on(intel_dp);
821
822         /* dp aux is extremely sensitive to irq latency, hence request the
823          * lowest possible wakeup latency and so prevent the cpu from going into
824          * deep sleep states.
825          */
826         pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828         intel_dp_check_edp(intel_dp);
829
830         intel_aux_display_runtime_get(dev_priv);
831
832         /* Try to wait for any previous AUX channel activity */
833         for (try = 0; try < 3; try++) {
834                 status = I915_READ_NOTRACE(ch_ctl);
835                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836                         break;
837                 msleep(1);
838         }
839
840         if (try == 3) {
841                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842                      I915_READ(ch_ctl));
843                 ret = -EBUSY;
844                 goto out;
845         }
846
847         /* Only 5 data registers! */
848         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849                 ret = -E2BIG;
850                 goto out;
851         }
852
853         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
854                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855                                                           has_aux_irq,
856                                                           send_bytes,
857                                                           aux_clock_divider);
858
859                 /* Must try at least 3 times according to DP spec */
860                 for (try = 0; try < 5; try++) {
861                         /* Load the send data into the aux channel data registers */
862                         for (i = 0; i < send_bytes; i += 4)
863                                 I915_WRITE(ch_data + i,
864                                            intel_dp_pack_aux(send + i,
865                                                              send_bytes - i));
866
867                         /* Send the command and wait for it to complete */
868                         I915_WRITE(ch_ctl, send_ctl);
869
870                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872                         /* Clear done status and any errors */
873                         I915_WRITE(ch_ctl,
874                                    status |
875                                    DP_AUX_CH_CTL_DONE |
876                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
877                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879                         if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880                                       DP_AUX_CH_CTL_RECEIVE_ERROR))
881                                 continue;
882                         if (status & DP_AUX_CH_CTL_DONE)
883                                 break;
884                 }
885                 if (status & DP_AUX_CH_CTL_DONE)
886                         break;
887         }
888
889         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
891                 ret = -EBUSY;
892                 goto out;
893         }
894
895         /* Check for timeout or receive error.
896          * Timeouts occur when the sink is not connected
897          */
898         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
899                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
900                 ret = -EIO;
901                 goto out;
902         }
903
904         /* Timeouts occur when the device isn't connected, so they're
905          * "normal" -- don't fill the kernel log with these */
906         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
907                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
908                 ret = -ETIMEDOUT;
909                 goto out;
910         }
911
912         /* Unload any bytes sent back from the other side */
913         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
915         if (recv_bytes > recv_size)
916                 recv_bytes = recv_size;
917
918         for (i = 0; i < recv_bytes; i += 4)
919                 intel_dp_unpack_aux(I915_READ(ch_data + i),
920                                     recv + i, recv_bytes - i);
921
922         ret = recv_bytes;
923 out:
924         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
925         intel_aux_display_runtime_put(dev_priv);
926
927         if (vdd)
928                 edp_panel_vdd_off(intel_dp, false);
929
930         pps_unlock(intel_dp);
931
932         return ret;
933 }
934
935 #define BARE_ADDRESS_SIZE       3
936 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
937 static ssize_t
938 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
939 {
940         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941         uint8_t txbuf[20], rxbuf[20];
942         size_t txsize, rxsize;
943         int ret;
944
945         txbuf[0] = (msg->request << 4) |
946                 ((msg->address >> 16) & 0xf);
947         txbuf[1] = (msg->address >> 8) & 0xff;
948         txbuf[2] = msg->address & 0xff;
949         txbuf[3] = msg->size - 1;
950
951         switch (msg->request & ~DP_AUX_I2C_MOT) {
952         case DP_AUX_NATIVE_WRITE:
953         case DP_AUX_I2C_WRITE:
954                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
955                 rxsize = 2; /* 0 or 1 data bytes */
956
957                 if (WARN_ON(txsize > 20))
958                         return -E2BIG;
959
960                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
961
962                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963                 if (ret > 0) {
964                         msg->reply = rxbuf[0] >> 4;
965
966                         if (ret > 1) {
967                                 /* Number of bytes written in a short write. */
968                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969                         } else {
970                                 /* Return payload size. */
971                                 ret = msg->size;
972                         }
973                 }
974                 break;
975
976         case DP_AUX_NATIVE_READ:
977         case DP_AUX_I2C_READ:
978                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
979                 rxsize = msg->size + 1;
980
981                 if (WARN_ON(rxsize > 20))
982                         return -E2BIG;
983
984                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985                 if (ret > 0) {
986                         msg->reply = rxbuf[0] >> 4;
987                         /*
988                          * Assume happy day, and copy the data. The caller is
989                          * expected to check msg->reply before touching it.
990                          *
991                          * Return payload size.
992                          */
993                         ret--;
994                         memcpy(msg->buffer, rxbuf + 1, ret);
995                 }
996                 break;
997
998         default:
999                 ret = -EINVAL;
1000                 break;
1001         }
1002
1003         return ret;
1004 }
1005
1006 static void
1007 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1008 {
1009         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1010         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1011         enum port port = intel_dig_port->port;
1012         const char *name = NULL;
1013         int ret;
1014
1015         switch (port) {
1016         case PORT_A:
1017                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1018                 name = "DPDDC-A";
1019                 break;
1020         case PORT_B:
1021                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1022                 name = "DPDDC-B";
1023                 break;
1024         case PORT_C:
1025                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1026                 name = "DPDDC-C";
1027                 break;
1028         case PORT_D:
1029                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1030                 name = "DPDDC-D";
1031                 break;
1032         default:
1033                 BUG();
1034         }
1035
1036         /*
1037          * The AUX_CTL register is usually DP_CTL + 0x10.
1038          *
1039          * On Haswell and Broadwell though:
1040          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1041          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042          *
1043          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044          */
1045         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1046                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1047
1048         intel_dp->aux.name = name;
1049         intel_dp->aux.dev = dev->dev;
1050         intel_dp->aux.transfer = intel_dp_aux_transfer;
1051
1052         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1053                       connector->base.kdev->kobj.name);
1054
1055         ret = drm_dp_aux_register(&intel_dp->aux);
1056         if (ret < 0) {
1057                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1058                           name, ret);
1059                 return;
1060         }
1061
1062         ret = sysfs_create_link(&connector->base.kdev->kobj,
1063                                 &intel_dp->aux.ddc.dev.kobj,
1064                                 intel_dp->aux.ddc.dev.kobj.name);
1065         if (ret < 0) {
1066                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1067                 drm_dp_aux_unregister(&intel_dp->aux);
1068         }
1069 }
1070
1071 static void
1072 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1073 {
1074         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1075
1076         if (!intel_connector->mst_port)
1077                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1078                                   intel_dp->aux.ddc.dev.kobj.name);
1079         intel_connector_unregister(intel_connector);
1080 }
1081
1082 static void
1083 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1084 {
1085         u32 ctrl1;
1086
1087         pipe_config->ddi_pll_sel = SKL_DPLL0;
1088         pipe_config->dpll_hw_state.cfgcr1 = 0;
1089         pipe_config->dpll_hw_state.cfgcr2 = 0;
1090
1091         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1092         switch (link_clock / 2) {
1093         case 81000:
1094                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1095                                               SKL_DPLL0);
1096                 break;
1097         case 135000:
1098                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1099                                               SKL_DPLL0);
1100                 break;
1101         case 270000:
1102                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1103                                               SKL_DPLL0);
1104                 break;
1105         case 162000:
1106                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1107                                               SKL_DPLL0);
1108                 break;
1109         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1110         results in CDCLK change. Need to handle the change of CDCLK by
1111         disabling pipes and re-enabling them */
1112         case 108000:
1113                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1114                                               SKL_DPLL0);
1115                 break;
1116         case 216000:
1117                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1118                                               SKL_DPLL0);
1119                 break;
1120
1121         }
1122         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1123 }
1124
1125 static void
1126 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1127 {
1128         switch (link_bw) {
1129         case DP_LINK_BW_1_62:
1130                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1131                 break;
1132         case DP_LINK_BW_2_7:
1133                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1134                 break;
1135         case DP_LINK_BW_5_4:
1136                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1137                 break;
1138         }
1139 }
1140
1141 static int
1142 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1143 {
1144         if (intel_dp->num_sink_rates) {
1145                 *sink_rates = intel_dp->sink_rates;
1146                 return intel_dp->num_sink_rates;
1147         }
1148
1149         *sink_rates = default_rates;
1150
1151         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1152 }
1153
1154 static int
1155 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1156 {
1157         if (INTEL_INFO(dev)->gen >= 9) {
1158                 *source_rates = gen9_rates;
1159                 return ARRAY_SIZE(gen9_rates);
1160         } else if (IS_CHERRYVIEW(dev)) {
1161                 *source_rates = chv_rates;
1162                 return ARRAY_SIZE(chv_rates);
1163         }
1164
1165         *source_rates = default_rates;
1166
1167         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1168                 /* WaDisableHBR2:skl */
1169                 return (DP_LINK_BW_2_7 >> 3) + 1;
1170         else if (INTEL_INFO(dev)->gen >= 8 ||
1171             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1172                 return (DP_LINK_BW_5_4 >> 3) + 1;
1173         else
1174                 return (DP_LINK_BW_2_7 >> 3) + 1;
1175 }
1176
1177 static void
1178 intel_dp_set_clock(struct intel_encoder *encoder,
1179                    struct intel_crtc_state *pipe_config, int link_bw)
1180 {
1181         struct drm_device *dev = encoder->base.dev;
1182         const struct dp_link_dpll *divisor = NULL;
1183         int i, count = 0;
1184
1185         if (IS_G4X(dev)) {
1186                 divisor = gen4_dpll;
1187                 count = ARRAY_SIZE(gen4_dpll);
1188         } else if (HAS_PCH_SPLIT(dev)) {
1189                 divisor = pch_dpll;
1190                 count = ARRAY_SIZE(pch_dpll);
1191         } else if (IS_CHERRYVIEW(dev)) {
1192                 divisor = chv_dpll;
1193                 count = ARRAY_SIZE(chv_dpll);
1194         } else if (IS_VALLEYVIEW(dev)) {
1195                 divisor = vlv_dpll;
1196                 count = ARRAY_SIZE(vlv_dpll);
1197         }
1198
1199         if (divisor && count) {
1200                 for (i = 0; i < count; i++) {
1201                         if (link_bw == divisor[i].link_bw) {
1202                                 pipe_config->dpll = divisor[i].dpll;
1203                                 pipe_config->clock_set = true;
1204                                 break;
1205                         }
1206                 }
1207         }
1208 }
1209
1210 static int intersect_rates(const int *source_rates, int source_len,
1211                            const int *sink_rates, int sink_len,
1212                            int *common_rates)
1213 {
1214         int i = 0, j = 0, k = 0;
1215
1216         while (i < source_len && j < sink_len) {
1217                 if (source_rates[i] == sink_rates[j]) {
1218                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219                                 return k;
1220                         common_rates[k] = source_rates[i];
1221                         ++k;
1222                         ++i;
1223                         ++j;
1224                 } else if (source_rates[i] < sink_rates[j]) {
1225                         ++i;
1226                 } else {
1227                         ++j;
1228                 }
1229         }
1230         return k;
1231 }
1232
1233 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1234                                  int *common_rates)
1235 {
1236         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1237         const int *source_rates, *sink_rates;
1238         int source_len, sink_len;
1239
1240         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1241         source_len = intel_dp_source_rates(dev, &source_rates);
1242
1243         return intersect_rates(source_rates, source_len,
1244                                sink_rates, sink_len,
1245                                common_rates);
1246 }
1247
1248 static void snprintf_int_array(char *str, size_t len,
1249                                const int *array, int nelem)
1250 {
1251         int i;
1252
1253         str[0] = '\0';
1254
1255         for (i = 0; i < nelem; i++) {
1256                 int r = snprintf(str, len, "%d,", array[i]);
1257                 if (r >= len)
1258                         return;
1259                 str += r;
1260                 len -= r;
1261         }
1262 }
1263
1264 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265 {
1266         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1267         const int *source_rates, *sink_rates;
1268         int source_len, sink_len, common_len;
1269         int common_rates[DP_MAX_SUPPORTED_RATES];
1270         char str[128]; /* FIXME: too big for stack? */
1271
1272         if ((drm_debug & DRM_UT_KMS) == 0)
1273                 return;
1274
1275         source_len = intel_dp_source_rates(dev, &source_rates);
1276         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1277         DRM_DEBUG_KMS("source rates: %s\n", str);
1278
1279         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1280         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1281         DRM_DEBUG_KMS("sink rates: %s\n", str);
1282
1283         common_len = intel_dp_common_rates(intel_dp, common_rates);
1284         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1285         DRM_DEBUG_KMS("common rates: %s\n", str);
1286 }
1287
1288 static int rate_to_index(int find, const int *rates)
1289 {
1290         int i = 0;
1291
1292         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1293                 if (find == rates[i])
1294                         break;
1295
1296         return i;
1297 }
1298
1299 int
1300 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301 {
1302         int rates[DP_MAX_SUPPORTED_RATES] = {};
1303         int len;
1304
1305         len = intel_dp_common_rates(intel_dp, rates);
1306         if (WARN_ON(len <= 0))
1307                 return 162000;
1308
1309         return rates[rate_to_index(0, rates) - 1];
1310 }
1311
1312 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313 {
1314         return rate_to_index(rate, intel_dp->sink_rates);
1315 }
1316
1317 bool
1318 intel_dp_compute_config(struct intel_encoder *encoder,
1319                         struct intel_crtc_state *pipe_config)
1320 {
1321         struct drm_device *dev = encoder->base.dev;
1322         struct drm_i915_private *dev_priv = dev->dev_private;
1323         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1324         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1325         enum port port = dp_to_dig_port(intel_dp)->port;
1326         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1327         struct intel_connector *intel_connector = intel_dp->attached_connector;
1328         int lane_count, clock;
1329         int min_lane_count = 1;
1330         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1331         /* Conveniently, the link BW constants become indices with a shift...*/
1332         int min_clock = 0;
1333         int max_clock;
1334         int bpp, mode_rate;
1335         int link_avail, link_clock;
1336         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1337         int common_len;
1338
1339         common_len = intel_dp_common_rates(intel_dp, common_rates);
1340
1341         /* No common link rates between source and sink */
1342         WARN_ON(common_len <= 0);
1343
1344         max_clock = common_len - 1;
1345
1346         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1347                 pipe_config->has_pch_encoder = true;
1348
1349         pipe_config->has_dp_encoder = true;
1350         pipe_config->has_drrs = false;
1351         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1352
1353         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1354                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1355                                        adjusted_mode);
1356                 if (!HAS_PCH_SPLIT(dev))
1357                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1358                                                  intel_connector->panel.fitting_mode);
1359                 else
1360                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1361                                                 intel_connector->panel.fitting_mode);
1362         }
1363
1364         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1365                 return false;
1366
1367         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1368                       "max bw %d pixel clock %iKHz\n",
1369                       max_lane_count, common_rates[max_clock],
1370                       adjusted_mode->crtc_clock);
1371
1372         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1373          * bpc in between. */
1374         bpp = pipe_config->pipe_bpp;
1375         if (is_edp(intel_dp)) {
1376                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1377                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1378                                       dev_priv->vbt.edp_bpp);
1379                         bpp = dev_priv->vbt.edp_bpp;
1380                 }
1381
1382                 /*
1383                  * Use the maximum clock and number of lanes the eDP panel
1384                  * advertizes being capable of. The panels are generally
1385                  * designed to support only a single clock and lane
1386                  * configuration, and typically these values correspond to the
1387                  * native resolution of the panel.
1388                  */
1389                 min_lane_count = max_lane_count;
1390                 min_clock = max_clock;
1391         }
1392
1393         for (; bpp >= 6*3; bpp -= 2*3) {
1394                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1395                                                    bpp);
1396
1397                 for (clock = min_clock; clock <= max_clock; clock++) {
1398                         for (lane_count = min_lane_count;
1399                                 lane_count <= max_lane_count;
1400                                 lane_count <<= 1) {
1401
1402                                 link_clock = common_rates[clock];
1403                                 link_avail = intel_dp_max_data_rate(link_clock,
1404                                                                     lane_count);
1405
1406                                 if (mode_rate <= link_avail) {
1407                                         goto found;
1408                                 }
1409                         }
1410                 }
1411         }
1412
1413         return false;
1414
1415 found:
1416         if (intel_dp->color_range_auto) {
1417                 /*
1418                  * See:
1419                  * CEA-861-E - 5.1 Default Encoding Parameters
1420                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1421                  */
1422                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1423                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1424                 else
1425                         intel_dp->color_range = 0;
1426         }
1427
1428         if (intel_dp->color_range)
1429                 pipe_config->limited_color_range = true;
1430
1431         intel_dp->lane_count = lane_count;
1432
1433         if (intel_dp->num_sink_rates) {
1434                 intel_dp->link_bw = 0;
1435                 intel_dp->rate_select =
1436                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1437         } else {
1438                 intel_dp->link_bw =
1439                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1440                 intel_dp->rate_select = 0;
1441         }
1442
1443         pipe_config->pipe_bpp = bpp;
1444         pipe_config->port_clock = common_rates[clock];
1445
1446         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1447                       intel_dp->link_bw, intel_dp->lane_count,
1448                       pipe_config->port_clock, bpp);
1449         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1450                       mode_rate, link_avail);
1451
1452         intel_link_compute_m_n(bpp, lane_count,
1453                                adjusted_mode->crtc_clock,
1454                                pipe_config->port_clock,
1455                                &pipe_config->dp_m_n);
1456
1457         if (intel_connector->panel.downclock_mode != NULL &&
1458                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1459                         pipe_config->has_drrs = true;
1460                         intel_link_compute_m_n(bpp, lane_count,
1461                                 intel_connector->panel.downclock_mode->clock,
1462                                 pipe_config->port_clock,
1463                                 &pipe_config->dp_m2_n2);
1464         }
1465
1466         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1467                 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1468         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1469                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1470         else
1471                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1472
1473         return true;
1474 }
1475
1476 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1477 {
1478         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1479         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1480         struct drm_device *dev = crtc->base.dev;
1481         struct drm_i915_private *dev_priv = dev->dev_private;
1482         u32 dpa_ctl;
1483
1484         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1485                       crtc->config->port_clock);
1486         dpa_ctl = I915_READ(DP_A);
1487         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1488
1489         if (crtc->config->port_clock == 162000) {
1490                 /* For a long time we've carried around a ILK-DevA w/a for the
1491                  * 160MHz clock. If we're really unlucky, it's still required.
1492                  */
1493                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1494                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1495                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1496         } else {
1497                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1498                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1499         }
1500
1501         I915_WRITE(DP_A, dpa_ctl);
1502
1503         POSTING_READ(DP_A);
1504         udelay(500);
1505 }
1506
1507 static void intel_dp_prepare(struct intel_encoder *encoder)
1508 {
1509         struct drm_device *dev = encoder->base.dev;
1510         struct drm_i915_private *dev_priv = dev->dev_private;
1511         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1512         enum port port = dp_to_dig_port(intel_dp)->port;
1513         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1514         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1515
1516         /*
1517          * There are four kinds of DP registers:
1518          *
1519          *      IBX PCH
1520          *      SNB CPU
1521          *      IVB CPU
1522          *      CPT PCH
1523          *
1524          * IBX PCH and CPU are the same for almost everything,
1525          * except that the CPU DP PLL is configured in this
1526          * register
1527          *
1528          * CPT PCH is quite different, having many bits moved
1529          * to the TRANS_DP_CTL register instead. That
1530          * configuration happens (oddly) in ironlake_pch_enable
1531          */
1532
1533         /* Preserve the BIOS-computed detected bit. This is
1534          * supposed to be read-only.
1535          */
1536         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1537
1538         /* Handle DP bits in common between all three register formats */
1539         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1540         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1541
1542         if (crtc->config->has_audio)
1543                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1544
1545         /* Split out the IBX/CPU vs CPT settings */
1546
1547         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1548                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1549                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1550                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1551                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1552                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1553
1554                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1555                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1556
1557                 intel_dp->DP |= crtc->pipe << 29;
1558         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1559                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1560                         intel_dp->DP |= intel_dp->color_range;
1561
1562                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1563                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1564                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1565                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1566                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1567
1568                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1569                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1570
1571                 if (!IS_CHERRYVIEW(dev)) {
1572                         if (crtc->pipe == 1)
1573                                 intel_dp->DP |= DP_PIPEB_SELECT;
1574                 } else {
1575                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1576                 }
1577         } else {
1578                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1579         }
1580 }
1581
1582 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1583 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1584
1585 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1586 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1587
1588 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1589 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1590
1591 static void wait_panel_status(struct intel_dp *intel_dp,
1592                                        u32 mask,
1593                                        u32 value)
1594 {
1595         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1596         struct drm_i915_private *dev_priv = dev->dev_private;
1597         u32 pp_stat_reg, pp_ctrl_reg;
1598
1599         lockdep_assert_held(&dev_priv->pps_mutex);
1600
1601         pp_stat_reg = _pp_stat_reg(intel_dp);
1602         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1603
1604         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1605                         mask, value,
1606                         I915_READ(pp_stat_reg),
1607                         I915_READ(pp_ctrl_reg));
1608
1609         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1610                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1611                                 I915_READ(pp_stat_reg),
1612                                 I915_READ(pp_ctrl_reg));
1613         }
1614
1615         DRM_DEBUG_KMS("Wait complete\n");
1616 }
1617
1618 static void wait_panel_on(struct intel_dp *intel_dp)
1619 {
1620         DRM_DEBUG_KMS("Wait for panel power on\n");
1621         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1622 }
1623
1624 static void wait_panel_off(struct intel_dp *intel_dp)
1625 {
1626         DRM_DEBUG_KMS("Wait for panel power off time\n");
1627         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1628 }
1629
1630 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1631 {
1632         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1633
1634         /* When we disable the VDD override bit last we have to do the manual
1635          * wait. */
1636         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1637                                        intel_dp->panel_power_cycle_delay);
1638
1639         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1640 }
1641
1642 static void wait_backlight_on(struct intel_dp *intel_dp)
1643 {
1644         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1645                                        intel_dp->backlight_on_delay);
1646 }
1647
1648 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1649 {
1650         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1651                                        intel_dp->backlight_off_delay);
1652 }
1653
1654 /* Read the current pp_control value, unlocking the register if it
1655  * is locked
1656  */
1657
1658 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1659 {
1660         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1661         struct drm_i915_private *dev_priv = dev->dev_private;
1662         u32 control;
1663
1664         lockdep_assert_held(&dev_priv->pps_mutex);
1665
1666         control = I915_READ(_pp_ctrl_reg(intel_dp));
1667         control &= ~PANEL_UNLOCK_MASK;
1668         control |= PANEL_UNLOCK_REGS;
1669         return control;
1670 }
1671
1672 /*
1673  * Must be paired with edp_panel_vdd_off().
1674  * Must hold pps_mutex around the whole on/off sequence.
1675  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1676  */
1677 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1678 {
1679         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1680         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1681         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1682         struct drm_i915_private *dev_priv = dev->dev_private;
1683         enum intel_display_power_domain power_domain;
1684         u32 pp;
1685         u32 pp_stat_reg, pp_ctrl_reg;
1686         bool need_to_disable = !intel_dp->want_panel_vdd;
1687
1688         lockdep_assert_held(&dev_priv->pps_mutex);
1689
1690         if (!is_edp(intel_dp))
1691                 return false;
1692
1693         cancel_delayed_work(&intel_dp->panel_vdd_work);
1694         intel_dp->want_panel_vdd = true;
1695
1696         if (edp_have_panel_vdd(intel_dp))
1697                 return need_to_disable;
1698
1699         power_domain = intel_display_port_power_domain(intel_encoder);
1700         intel_display_power_get(dev_priv, power_domain);
1701
1702         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1703                       port_name(intel_dig_port->port));
1704
1705         if (!edp_have_panel_power(intel_dp))
1706                 wait_panel_power_cycle(intel_dp);
1707
1708         pp = ironlake_get_pp_control(intel_dp);
1709         pp |= EDP_FORCE_VDD;
1710
1711         pp_stat_reg = _pp_stat_reg(intel_dp);
1712         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1713
1714         I915_WRITE(pp_ctrl_reg, pp);
1715         POSTING_READ(pp_ctrl_reg);
1716         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1717                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1718         /*
1719          * If the panel wasn't on, delay before accessing aux channel
1720          */
1721         if (!edp_have_panel_power(intel_dp)) {
1722                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1723                               port_name(intel_dig_port->port));
1724                 msleep(intel_dp->panel_power_up_delay);
1725         }
1726
1727         return need_to_disable;
1728 }
1729
1730 /*
1731  * Must be paired with intel_edp_panel_vdd_off() or
1732  * intel_edp_panel_off().
1733  * Nested calls to these functions are not allowed since
1734  * we drop the lock. Caller must use some higher level
1735  * locking to prevent nested calls from other threads.
1736  */
1737 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1738 {
1739         bool vdd;
1740
1741         if (!is_edp(intel_dp))
1742                 return;
1743
1744         pps_lock(intel_dp);
1745         vdd = edp_panel_vdd_on(intel_dp);
1746         pps_unlock(intel_dp);
1747
1748         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1749              port_name(dp_to_dig_port(intel_dp)->port));
1750 }
1751
1752 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1753 {
1754         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1755         struct drm_i915_private *dev_priv = dev->dev_private;
1756         struct intel_digital_port *intel_dig_port =
1757                 dp_to_dig_port(intel_dp);
1758         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1759         enum intel_display_power_domain power_domain;
1760         u32 pp;
1761         u32 pp_stat_reg, pp_ctrl_reg;
1762
1763         lockdep_assert_held(&dev_priv->pps_mutex);
1764
1765         WARN_ON(intel_dp->want_panel_vdd);
1766
1767         if (!edp_have_panel_vdd(intel_dp))
1768                 return;
1769
1770         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1771                       port_name(intel_dig_port->port));
1772
1773         pp = ironlake_get_pp_control(intel_dp);
1774         pp &= ~EDP_FORCE_VDD;
1775
1776         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1777         pp_stat_reg = _pp_stat_reg(intel_dp);
1778
1779         I915_WRITE(pp_ctrl_reg, pp);
1780         POSTING_READ(pp_ctrl_reg);
1781
1782         /* Make sure sequencer is idle before allowing subsequent activity */
1783         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1784         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1785
1786         if ((pp & POWER_TARGET_ON) == 0)
1787                 intel_dp->last_power_cycle = jiffies;
1788
1789         power_domain = intel_display_port_power_domain(intel_encoder);
1790         intel_display_power_put(dev_priv, power_domain);
1791 }
1792
1793 static void edp_panel_vdd_work(struct work_struct *__work)
1794 {
1795         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1796                                                  struct intel_dp, panel_vdd_work);
1797
1798         pps_lock(intel_dp);
1799         if (!intel_dp->want_panel_vdd)
1800                 edp_panel_vdd_off_sync(intel_dp);
1801         pps_unlock(intel_dp);
1802 }
1803
1804 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1805 {
1806         unsigned long delay;
1807
1808         /*
1809          * Queue the timer to fire a long time from now (relative to the power
1810          * down delay) to keep the panel power up across a sequence of
1811          * operations.
1812          */
1813         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1814         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1815 }
1816
1817 /*
1818  * Must be paired with edp_panel_vdd_on().
1819  * Must hold pps_mutex around the whole on/off sequence.
1820  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1821  */
1822 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1823 {
1824         struct drm_i915_private *dev_priv =
1825                 intel_dp_to_dev(intel_dp)->dev_private;
1826
1827         lockdep_assert_held(&dev_priv->pps_mutex);
1828
1829         if (!is_edp(intel_dp))
1830                 return;
1831
1832         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1833              port_name(dp_to_dig_port(intel_dp)->port));
1834
1835         intel_dp->want_panel_vdd = false;
1836
1837         if (sync)
1838                 edp_panel_vdd_off_sync(intel_dp);
1839         else
1840                 edp_panel_vdd_schedule_off(intel_dp);
1841 }
1842
1843 static void edp_panel_on(struct intel_dp *intel_dp)
1844 {
1845         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1846         struct drm_i915_private *dev_priv = dev->dev_private;
1847         u32 pp;
1848         u32 pp_ctrl_reg;
1849
1850         lockdep_assert_held(&dev_priv->pps_mutex);
1851
1852         if (!is_edp(intel_dp))
1853                 return;
1854
1855         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1856                       port_name(dp_to_dig_port(intel_dp)->port));
1857
1858         if (WARN(edp_have_panel_power(intel_dp),
1859                  "eDP port %c panel power already on\n",
1860                  port_name(dp_to_dig_port(intel_dp)->port)))
1861                 return;
1862
1863         wait_panel_power_cycle(intel_dp);
1864
1865         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1866         pp = ironlake_get_pp_control(intel_dp);
1867         if (IS_GEN5(dev)) {
1868                 /* ILK workaround: disable reset around power sequence */
1869                 pp &= ~PANEL_POWER_RESET;
1870                 I915_WRITE(pp_ctrl_reg, pp);
1871                 POSTING_READ(pp_ctrl_reg);
1872         }
1873
1874         pp |= POWER_TARGET_ON;
1875         if (!IS_GEN5(dev))
1876                 pp |= PANEL_POWER_RESET;
1877
1878         I915_WRITE(pp_ctrl_reg, pp);
1879         POSTING_READ(pp_ctrl_reg);
1880
1881         wait_panel_on(intel_dp);
1882         intel_dp->last_power_on = jiffies;
1883
1884         if (IS_GEN5(dev)) {
1885                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1886                 I915_WRITE(pp_ctrl_reg, pp);
1887                 POSTING_READ(pp_ctrl_reg);
1888         }
1889 }
1890
1891 void intel_edp_panel_on(struct intel_dp *intel_dp)
1892 {
1893         if (!is_edp(intel_dp))
1894                 return;
1895
1896         pps_lock(intel_dp);
1897         edp_panel_on(intel_dp);
1898         pps_unlock(intel_dp);
1899 }
1900
1901
1902 static void edp_panel_off(struct intel_dp *intel_dp)
1903 {
1904         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1905         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1906         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1907         struct drm_i915_private *dev_priv = dev->dev_private;
1908         enum intel_display_power_domain power_domain;
1909         u32 pp;
1910         u32 pp_ctrl_reg;
1911
1912         lockdep_assert_held(&dev_priv->pps_mutex);
1913
1914         if (!is_edp(intel_dp))
1915                 return;
1916
1917         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1918                       port_name(dp_to_dig_port(intel_dp)->port));
1919
1920         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1921              port_name(dp_to_dig_port(intel_dp)->port));
1922
1923         pp = ironlake_get_pp_control(intel_dp);
1924         /* We need to switch off panel power _and_ force vdd, for otherwise some
1925          * panels get very unhappy and cease to work. */
1926         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1927                 EDP_BLC_ENABLE);
1928
1929         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1930
1931         intel_dp->want_panel_vdd = false;
1932
1933         I915_WRITE(pp_ctrl_reg, pp);
1934         POSTING_READ(pp_ctrl_reg);
1935
1936         intel_dp->last_power_cycle = jiffies;
1937         wait_panel_off(intel_dp);
1938
1939         /* We got a reference when we enabled the VDD. */
1940         power_domain = intel_display_port_power_domain(intel_encoder);
1941         intel_display_power_put(dev_priv, power_domain);
1942 }
1943
1944 void intel_edp_panel_off(struct intel_dp *intel_dp)
1945 {
1946         if (!is_edp(intel_dp))
1947                 return;
1948
1949         pps_lock(intel_dp);
1950         edp_panel_off(intel_dp);
1951         pps_unlock(intel_dp);
1952 }
1953
1954 /* Enable backlight in the panel power control. */
1955 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1956 {
1957         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1958         struct drm_device *dev = intel_dig_port->base.base.dev;
1959         struct drm_i915_private *dev_priv = dev->dev_private;
1960         u32 pp;
1961         u32 pp_ctrl_reg;
1962
1963         /*
1964          * If we enable the backlight right away following a panel power
1965          * on, we may see slight flicker as the panel syncs with the eDP
1966          * link.  So delay a bit to make sure the image is solid before
1967          * allowing it to appear.
1968          */
1969         wait_backlight_on(intel_dp);
1970
1971         pps_lock(intel_dp);
1972
1973         pp = ironlake_get_pp_control(intel_dp);
1974         pp |= EDP_BLC_ENABLE;
1975
1976         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1977
1978         I915_WRITE(pp_ctrl_reg, pp);
1979         POSTING_READ(pp_ctrl_reg);
1980
1981         pps_unlock(intel_dp);
1982 }
1983
1984 /* Enable backlight PWM and backlight PP control. */
1985 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1986 {
1987         if (!is_edp(intel_dp))
1988                 return;
1989
1990         DRM_DEBUG_KMS("\n");
1991
1992         intel_panel_enable_backlight(intel_dp->attached_connector);
1993         _intel_edp_backlight_on(intel_dp);
1994 }
1995
1996 /* Disable backlight in the panel power control. */
1997 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1998 {
1999         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2000         struct drm_i915_private *dev_priv = dev->dev_private;
2001         u32 pp;
2002         u32 pp_ctrl_reg;
2003
2004         if (!is_edp(intel_dp))
2005                 return;
2006
2007         pps_lock(intel_dp);
2008
2009         pp = ironlake_get_pp_control(intel_dp);
2010         pp &= ~EDP_BLC_ENABLE;
2011
2012         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2013
2014         I915_WRITE(pp_ctrl_reg, pp);
2015         POSTING_READ(pp_ctrl_reg);
2016
2017         pps_unlock(intel_dp);
2018
2019         intel_dp->last_backlight_off = jiffies;
2020         edp_wait_backlight_off(intel_dp);
2021 }
2022
2023 /* Disable backlight PP control and backlight PWM. */
2024 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2025 {
2026         if (!is_edp(intel_dp))
2027                 return;
2028
2029         DRM_DEBUG_KMS("\n");
2030
2031         _intel_edp_backlight_off(intel_dp);
2032         intel_panel_disable_backlight(intel_dp->attached_connector);
2033 }
2034
2035 /*
2036  * Hook for controlling the panel power control backlight through the bl_power
2037  * sysfs attribute. Take care to handle multiple calls.
2038  */
2039 static void intel_edp_backlight_power(struct intel_connector *connector,
2040                                       bool enable)
2041 {
2042         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2043         bool is_enabled;
2044
2045         pps_lock(intel_dp);
2046         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2047         pps_unlock(intel_dp);
2048
2049         if (is_enabled == enable)
2050                 return;
2051
2052         DRM_DEBUG_KMS("panel power control backlight %s\n",
2053                       enable ? "enable" : "disable");
2054
2055         if (enable)
2056                 _intel_edp_backlight_on(intel_dp);
2057         else
2058                 _intel_edp_backlight_off(intel_dp);
2059 }
2060
2061 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2062 {
2063         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2064         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2065         struct drm_device *dev = crtc->dev;
2066         struct drm_i915_private *dev_priv = dev->dev_private;
2067         u32 dpa_ctl;
2068
2069         assert_pipe_disabled(dev_priv,
2070                              to_intel_crtc(crtc)->pipe);
2071
2072         DRM_DEBUG_KMS("\n");
2073         dpa_ctl = I915_READ(DP_A);
2074         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2075         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2076
2077         /* We don't adjust intel_dp->DP while tearing down the link, to
2078          * facilitate link retraining (e.g. after hotplug). Hence clear all
2079          * enable bits here to ensure that we don't enable too much. */
2080         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2081         intel_dp->DP |= DP_PLL_ENABLE;
2082         I915_WRITE(DP_A, intel_dp->DP);
2083         POSTING_READ(DP_A);
2084         udelay(200);
2085 }
2086
2087 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2088 {
2089         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2090         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2091         struct drm_device *dev = crtc->dev;
2092         struct drm_i915_private *dev_priv = dev->dev_private;
2093         u32 dpa_ctl;
2094
2095         assert_pipe_disabled(dev_priv,
2096                              to_intel_crtc(crtc)->pipe);
2097
2098         dpa_ctl = I915_READ(DP_A);
2099         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2100              "dp pll off, should be on\n");
2101         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102
2103         /* We can't rely on the value tracked for the DP register in
2104          * intel_dp->DP because link_down must not change that (otherwise link
2105          * re-training will fail. */
2106         dpa_ctl &= ~DP_PLL_ENABLE;
2107         I915_WRITE(DP_A, dpa_ctl);
2108         POSTING_READ(DP_A);
2109         udelay(200);
2110 }
2111
2112 /* If the sink supports it, try to set the power state appropriately */
2113 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2114 {
2115         int ret, i;
2116
2117         /* Should have a valid DPCD by this point */
2118         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2119                 return;
2120
2121         if (mode != DRM_MODE_DPMS_ON) {
2122                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2123                                          DP_SET_POWER_D3);
2124         } else {
2125                 /*
2126                  * When turning on, we need to retry for 1ms to give the sink
2127                  * time to wake up.
2128                  */
2129                 for (i = 0; i < 3; i++) {
2130                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2131                                                  DP_SET_POWER_D0);
2132                         if (ret == 1)
2133                                 break;
2134                         msleep(1);
2135                 }
2136         }
2137
2138         if (ret != 1)
2139                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2140                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2141 }
2142
2143 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2144                                   enum pipe *pipe)
2145 {
2146         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2147         enum port port = dp_to_dig_port(intel_dp)->port;
2148         struct drm_device *dev = encoder->base.dev;
2149         struct drm_i915_private *dev_priv = dev->dev_private;
2150         enum intel_display_power_domain power_domain;
2151         u32 tmp;
2152
2153         power_domain = intel_display_port_power_domain(encoder);
2154         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2155                 return false;
2156
2157         tmp = I915_READ(intel_dp->output_reg);
2158
2159         if (!(tmp & DP_PORT_EN))
2160                 return false;
2161
2162         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2163                 *pipe = PORT_TO_PIPE_CPT(tmp);
2164         } else if (IS_CHERRYVIEW(dev)) {
2165                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2166         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2167                 *pipe = PORT_TO_PIPE(tmp);
2168         } else {
2169                 u32 trans_sel;
2170                 u32 trans_dp;
2171                 int i;
2172
2173                 switch (intel_dp->output_reg) {
2174                 case PCH_DP_B:
2175                         trans_sel = TRANS_DP_PORT_SEL_B;
2176                         break;
2177                 case PCH_DP_C:
2178                         trans_sel = TRANS_DP_PORT_SEL_C;
2179                         break;
2180                 case PCH_DP_D:
2181                         trans_sel = TRANS_DP_PORT_SEL_D;
2182                         break;
2183                 default:
2184                         return true;
2185                 }
2186
2187                 for_each_pipe(dev_priv, i) {
2188                         trans_dp = I915_READ(TRANS_DP_CTL(i));
2189                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2190                                 *pipe = i;
2191                                 return true;
2192                         }
2193                 }
2194
2195                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2196                               intel_dp->output_reg);
2197         }
2198
2199         return true;
2200 }
2201
2202 static void intel_dp_get_config(struct intel_encoder *encoder,
2203                                 struct intel_crtc_state *pipe_config)
2204 {
2205         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2206         u32 tmp, flags = 0;
2207         struct drm_device *dev = encoder->base.dev;
2208         struct drm_i915_private *dev_priv = dev->dev_private;
2209         enum port port = dp_to_dig_port(intel_dp)->port;
2210         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2211         int dotclock;
2212
2213         tmp = I915_READ(intel_dp->output_reg);
2214
2215         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2216
2217         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2218                 if (tmp & DP_SYNC_HS_HIGH)
2219                         flags |= DRM_MODE_FLAG_PHSYNC;
2220                 else
2221                         flags |= DRM_MODE_FLAG_NHSYNC;
2222
2223                 if (tmp & DP_SYNC_VS_HIGH)
2224                         flags |= DRM_MODE_FLAG_PVSYNC;
2225                 else
2226                         flags |= DRM_MODE_FLAG_NVSYNC;
2227         } else {
2228                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2229                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2230                         flags |= DRM_MODE_FLAG_PHSYNC;
2231                 else
2232                         flags |= DRM_MODE_FLAG_NHSYNC;
2233
2234                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2235                         flags |= DRM_MODE_FLAG_PVSYNC;
2236                 else
2237                         flags |= DRM_MODE_FLAG_NVSYNC;
2238         }
2239
2240         pipe_config->base.adjusted_mode.flags |= flags;
2241
2242         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2243             tmp & DP_COLOR_RANGE_16_235)
2244                 pipe_config->limited_color_range = true;
2245
2246         pipe_config->has_dp_encoder = true;
2247
2248         intel_dp_get_m_n(crtc, pipe_config);
2249
2250         if (port == PORT_A) {
2251                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2252                         pipe_config->port_clock = 162000;
2253                 else
2254                         pipe_config->port_clock = 270000;
2255         }
2256
2257         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2258                                             &pipe_config->dp_m_n);
2259
2260         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2261                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2262
2263         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2264
2265         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2266             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2267                 /*
2268                  * This is a big fat ugly hack.
2269                  *
2270                  * Some machines in UEFI boot mode provide us a VBT that has 18
2271                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2272                  * unknown we fail to light up. Yet the same BIOS boots up with
2273                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2274                  * max, not what it tells us to use.
2275                  *
2276                  * Note: This will still be broken if the eDP panel is not lit
2277                  * up by the BIOS, and thus we can't get the mode at module
2278                  * load.
2279                  */
2280                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2281                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2282                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2283         }
2284 }
2285
2286 static void intel_disable_dp(struct intel_encoder *encoder)
2287 {
2288         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2289         struct drm_device *dev = encoder->base.dev;
2290         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2291
2292         if (crtc->config->has_audio)
2293                 intel_audio_codec_disable(encoder);
2294
2295         if (HAS_PSR(dev) && !HAS_DDI(dev))
2296                 intel_psr_disable(intel_dp);
2297
2298         /* Make sure the panel is off before trying to change the mode. But also
2299          * ensure that we have vdd while we switch off the panel. */
2300         intel_edp_panel_vdd_on(intel_dp);
2301         intel_edp_backlight_off(intel_dp);
2302         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2303         intel_edp_panel_off(intel_dp);
2304
2305         /* disable the port before the pipe on g4x */
2306         if (INTEL_INFO(dev)->gen < 5)
2307                 intel_dp_link_down(intel_dp);
2308 }
2309
2310 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2311 {
2312         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2313         enum port port = dp_to_dig_port(intel_dp)->port;
2314
2315         intel_dp_link_down(intel_dp);
2316         if (port == PORT_A)
2317                 ironlake_edp_pll_off(intel_dp);
2318 }
2319
2320 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2321 {
2322         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2323
2324         intel_dp_link_down(intel_dp);
2325 }
2326
2327 static void chv_post_disable_dp(struct intel_encoder *encoder)
2328 {
2329         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2330         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2331         struct drm_device *dev = encoder->base.dev;
2332         struct drm_i915_private *dev_priv = dev->dev_private;
2333         struct intel_crtc *intel_crtc =
2334                 to_intel_crtc(encoder->base.crtc);
2335         enum dpio_channel ch = vlv_dport_to_channel(dport);
2336         enum pipe pipe = intel_crtc->pipe;
2337         u32 val;
2338
2339         intel_dp_link_down(intel_dp);
2340
2341         mutex_lock(&dev_priv->dpio_lock);
2342
2343         /* Propagate soft reset to data lane reset */
2344         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2345         val |= CHV_PCS_REQ_SOFTRESET_EN;
2346         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2347
2348         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2349         val |= CHV_PCS_REQ_SOFTRESET_EN;
2350         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2351
2352         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2353         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2354         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2355
2356         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2357         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2358         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2359
2360         mutex_unlock(&dev_priv->dpio_lock);
2361 }
2362
2363 static void
2364 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2365                          uint32_t *DP,
2366                          uint8_t dp_train_pat)
2367 {
2368         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2369         struct drm_device *dev = intel_dig_port->base.base.dev;
2370         struct drm_i915_private *dev_priv = dev->dev_private;
2371         enum port port = intel_dig_port->port;
2372
2373         if (HAS_DDI(dev)) {
2374                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2375
2376                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2377                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2378                 else
2379                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2380
2381                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2382                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2383                 case DP_TRAINING_PATTERN_DISABLE:
2384                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2385
2386                         break;
2387                 case DP_TRAINING_PATTERN_1:
2388                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2389                         break;
2390                 case DP_TRAINING_PATTERN_2:
2391                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2392                         break;
2393                 case DP_TRAINING_PATTERN_3:
2394                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2395                         break;
2396                 }
2397                 I915_WRITE(DP_TP_CTL(port), temp);
2398
2399         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2400                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2401
2402                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2403                 case DP_TRAINING_PATTERN_DISABLE:
2404                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2405                         break;
2406                 case DP_TRAINING_PATTERN_1:
2407                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2408                         break;
2409                 case DP_TRAINING_PATTERN_2:
2410                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2411                         break;
2412                 case DP_TRAINING_PATTERN_3:
2413                         DRM_ERROR("DP training pattern 3 not supported\n");
2414                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2415                         break;
2416                 }
2417
2418         } else {
2419                 if (IS_CHERRYVIEW(dev))
2420                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2421                 else
2422                         *DP &= ~DP_LINK_TRAIN_MASK;
2423
2424                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2425                 case DP_TRAINING_PATTERN_DISABLE:
2426                         *DP |= DP_LINK_TRAIN_OFF;
2427                         break;
2428                 case DP_TRAINING_PATTERN_1:
2429                         *DP |= DP_LINK_TRAIN_PAT_1;
2430                         break;
2431                 case DP_TRAINING_PATTERN_2:
2432                         *DP |= DP_LINK_TRAIN_PAT_2;
2433                         break;
2434                 case DP_TRAINING_PATTERN_3:
2435                         if (IS_CHERRYVIEW(dev)) {
2436                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2437                         } else {
2438                                 DRM_ERROR("DP training pattern 3 not supported\n");
2439                                 *DP |= DP_LINK_TRAIN_PAT_2;
2440                         }
2441                         break;
2442                 }
2443         }
2444 }
2445
2446 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2447 {
2448         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2449         struct drm_i915_private *dev_priv = dev->dev_private;
2450
2451         /* enable with pattern 1 (as per spec) */
2452         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2453                                  DP_TRAINING_PATTERN_1);
2454
2455         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2456         POSTING_READ(intel_dp->output_reg);
2457
2458         /*
2459          * Magic for VLV/CHV. We _must_ first set up the register
2460          * without actually enabling the port, and then do another
2461          * write to enable the port. Otherwise link training will
2462          * fail when the power sequencer is freshly used for this port.
2463          */
2464         intel_dp->DP |= DP_PORT_EN;
2465
2466         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2467         POSTING_READ(intel_dp->output_reg);
2468 }
2469
2470 static void intel_enable_dp(struct intel_encoder *encoder)
2471 {
2472         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473         struct drm_device *dev = encoder->base.dev;
2474         struct drm_i915_private *dev_priv = dev->dev_private;
2475         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2476         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2477
2478         if (WARN_ON(dp_reg & DP_PORT_EN))
2479                 return;
2480
2481         pps_lock(intel_dp);
2482
2483         if (IS_VALLEYVIEW(dev))
2484                 vlv_init_panel_power_sequencer(intel_dp);
2485
2486         intel_dp_enable_port(intel_dp);
2487
2488         edp_panel_vdd_on(intel_dp);
2489         edp_panel_on(intel_dp);
2490         edp_panel_vdd_off(intel_dp, true);
2491
2492         pps_unlock(intel_dp);
2493
2494         if (IS_VALLEYVIEW(dev))
2495                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2496
2497         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2498         intel_dp_start_link_train(intel_dp);
2499         intel_dp_complete_link_train(intel_dp);
2500         intel_dp_stop_link_train(intel_dp);
2501
2502         if (crtc->config->has_audio) {
2503                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2504                                  pipe_name(crtc->pipe));
2505                 intel_audio_codec_enable(encoder);
2506         }
2507 }
2508
2509 static void g4x_enable_dp(struct intel_encoder *encoder)
2510 {
2511         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512
2513         intel_enable_dp(encoder);
2514         intel_edp_backlight_on(intel_dp);
2515 }
2516
2517 static void vlv_enable_dp(struct intel_encoder *encoder)
2518 {
2519         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2520
2521         intel_edp_backlight_on(intel_dp);
2522         intel_psr_enable(intel_dp);
2523 }
2524
2525 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2526 {
2527         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2528         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2529
2530         intel_dp_prepare(encoder);
2531
2532         /* Only ilk+ has port A */
2533         if (dport->port == PORT_A) {
2534                 ironlake_set_pll_cpu_edp(intel_dp);
2535                 ironlake_edp_pll_on(intel_dp);
2536         }
2537 }
2538
2539 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2540 {
2541         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2542         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2543         enum pipe pipe = intel_dp->pps_pipe;
2544         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2545
2546         edp_panel_vdd_off_sync(intel_dp);
2547
2548         /*
2549          * VLV seems to get confused when multiple power seqeuencers
2550          * have the same port selected (even if only one has power/vdd
2551          * enabled). The failure manifests as vlv_wait_port_ready() failing
2552          * CHV on the other hand doesn't seem to mind having the same port
2553          * selected in multiple power seqeuencers, but let's clear the
2554          * port select always when logically disconnecting a power sequencer
2555          * from a port.
2556          */
2557         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2558                       pipe_name(pipe), port_name(intel_dig_port->port));
2559         I915_WRITE(pp_on_reg, 0);
2560         POSTING_READ(pp_on_reg);
2561
2562         intel_dp->pps_pipe = INVALID_PIPE;
2563 }
2564
2565 static void vlv_steal_power_sequencer(struct drm_device *dev,
2566                                       enum pipe pipe)
2567 {
2568         struct drm_i915_private *dev_priv = dev->dev_private;
2569         struct intel_encoder *encoder;
2570
2571         lockdep_assert_held(&dev_priv->pps_mutex);
2572
2573         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2574                 return;
2575
2576         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2577                             base.head) {
2578                 struct intel_dp *intel_dp;
2579                 enum port port;
2580
2581                 if (encoder->type != INTEL_OUTPUT_EDP)
2582                         continue;
2583
2584                 intel_dp = enc_to_intel_dp(&encoder->base);
2585                 port = dp_to_dig_port(intel_dp)->port;
2586
2587                 if (intel_dp->pps_pipe != pipe)
2588                         continue;
2589
2590                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2591                               pipe_name(pipe), port_name(port));
2592
2593                 WARN(encoder->connectors_active,
2594                      "stealing pipe %c power sequencer from active eDP port %c\n",
2595                      pipe_name(pipe), port_name(port));
2596
2597                 /* make sure vdd is off before we steal it */
2598                 vlv_detach_power_sequencer(intel_dp);
2599         }
2600 }
2601
2602 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2603 {
2604         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2605         struct intel_encoder *encoder = &intel_dig_port->base;
2606         struct drm_device *dev = encoder->base.dev;
2607         struct drm_i915_private *dev_priv = dev->dev_private;
2608         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2609
2610         lockdep_assert_held(&dev_priv->pps_mutex);
2611
2612         if (!is_edp(intel_dp))
2613                 return;
2614
2615         if (intel_dp->pps_pipe == crtc->pipe)
2616                 return;
2617
2618         /*
2619          * If another power sequencer was being used on this
2620          * port previously make sure to turn off vdd there while
2621          * we still have control of it.
2622          */
2623         if (intel_dp->pps_pipe != INVALID_PIPE)
2624                 vlv_detach_power_sequencer(intel_dp);
2625
2626         /*
2627          * We may be stealing the power
2628          * sequencer from another port.
2629          */
2630         vlv_steal_power_sequencer(dev, crtc->pipe);
2631
2632         /* now it's all ours */
2633         intel_dp->pps_pipe = crtc->pipe;
2634
2635         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2636                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2637
2638         /* init power sequencer on this pipe and port */
2639         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2640         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2641 }
2642
2643 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2644 {
2645         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2646         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2647         struct drm_device *dev = encoder->base.dev;
2648         struct drm_i915_private *dev_priv = dev->dev_private;
2649         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2650         enum dpio_channel port = vlv_dport_to_channel(dport);
2651         int pipe = intel_crtc->pipe;
2652         u32 val;
2653
2654         mutex_lock(&dev_priv->dpio_lock);
2655
2656         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2657         val = 0;
2658         if (pipe)
2659                 val |= (1<<21);
2660         else
2661                 val &= ~(1<<21);
2662         val |= 0x001000c4;
2663         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2664         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2665         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2666
2667         mutex_unlock(&dev_priv->dpio_lock);
2668
2669         intel_enable_dp(encoder);
2670 }
2671
2672 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2673 {
2674         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2675         struct drm_device *dev = encoder->base.dev;
2676         struct drm_i915_private *dev_priv = dev->dev_private;
2677         struct intel_crtc *intel_crtc =
2678                 to_intel_crtc(encoder->base.crtc);
2679         enum dpio_channel port = vlv_dport_to_channel(dport);
2680         int pipe = intel_crtc->pipe;
2681
2682         intel_dp_prepare(encoder);
2683
2684         /* Program Tx lane resets to default */
2685         mutex_lock(&dev_priv->dpio_lock);
2686         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2687                          DPIO_PCS_TX_LANE2_RESET |
2688                          DPIO_PCS_TX_LANE1_RESET);
2689         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2690                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2691                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2692                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2693                                  DPIO_PCS_CLK_SOFT_RESET);
2694
2695         /* Fix up inter-pair skew failure */
2696         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2697         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2698         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2699         mutex_unlock(&dev_priv->dpio_lock);
2700 }
2701
2702 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2703 {
2704         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2705         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2706         struct drm_device *dev = encoder->base.dev;
2707         struct drm_i915_private *dev_priv = dev->dev_private;
2708         struct intel_crtc *intel_crtc =
2709                 to_intel_crtc(encoder->base.crtc);
2710         enum dpio_channel ch = vlv_dport_to_channel(dport);
2711         int pipe = intel_crtc->pipe;
2712         int data, i;
2713         u32 val;
2714
2715         mutex_lock(&dev_priv->dpio_lock);
2716
2717         /* allow hardware to manage TX FIFO reset source */
2718         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2719         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2720         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2721
2722         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2723         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2724         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2725
2726         /* Deassert soft data lane reset*/
2727         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2728         val |= CHV_PCS_REQ_SOFTRESET_EN;
2729         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2730
2731         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2732         val |= CHV_PCS_REQ_SOFTRESET_EN;
2733         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2734
2735         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2736         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2737         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2738
2739         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2740         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2741         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2742
2743         /* Program Tx lane latency optimal setting*/
2744         for (i = 0; i < 4; i++) {
2745                 /* Set the upar bit */
2746                 data = (i == 1) ? 0x0 : 0x1;
2747                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2748                                 data << DPIO_UPAR_SHIFT);
2749         }
2750
2751         /* Data lane stagger programming */
2752         /* FIXME: Fix up value only after power analysis */
2753
2754         mutex_unlock(&dev_priv->dpio_lock);
2755
2756         intel_enable_dp(encoder);
2757 }
2758
2759 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2760 {
2761         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2762         struct drm_device *dev = encoder->base.dev;
2763         struct drm_i915_private *dev_priv = dev->dev_private;
2764         struct intel_crtc *intel_crtc =
2765                 to_intel_crtc(encoder->base.crtc);
2766         enum dpio_channel ch = vlv_dport_to_channel(dport);
2767         enum pipe pipe = intel_crtc->pipe;
2768         u32 val;
2769
2770         intel_dp_prepare(encoder);
2771
2772         mutex_lock(&dev_priv->dpio_lock);
2773
2774         /* program left/right clock distribution */
2775         if (pipe != PIPE_B) {
2776                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2777                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2778                 if (ch == DPIO_CH0)
2779                         val |= CHV_BUFLEFTENA1_FORCE;
2780                 if (ch == DPIO_CH1)
2781                         val |= CHV_BUFRIGHTENA1_FORCE;
2782                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2783         } else {
2784                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2785                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2786                 if (ch == DPIO_CH0)
2787                         val |= CHV_BUFLEFTENA2_FORCE;
2788                 if (ch == DPIO_CH1)
2789                         val |= CHV_BUFRIGHTENA2_FORCE;
2790                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2791         }
2792
2793         /* program clock channel usage */
2794         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2795         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2796         if (pipe != PIPE_B)
2797                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2798         else
2799                 val |= CHV_PCS_USEDCLKCHANNEL;
2800         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2801
2802         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2803         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2804         if (pipe != PIPE_B)
2805                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2806         else
2807                 val |= CHV_PCS_USEDCLKCHANNEL;
2808         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2809
2810         /*
2811          * This a a bit weird since generally CL
2812          * matches the pipe, but here we need to
2813          * pick the CL based on the port.
2814          */
2815         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2816         if (pipe != PIPE_B)
2817                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2818         else
2819                 val |= CHV_CMN_USEDCLKCHANNEL;
2820         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2821
2822         mutex_unlock(&dev_priv->dpio_lock);
2823 }
2824
2825 /*
2826  * Native read with retry for link status and receiver capability reads for
2827  * cases where the sink may still be asleep.
2828  *
2829  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2830  * supposed to retry 3 times per the spec.
2831  */
2832 static ssize_t
2833 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2834                         void *buffer, size_t size)
2835 {
2836         ssize_t ret;
2837         int i;
2838
2839         /*
2840          * Sometime we just get the same incorrect byte repeated
2841          * over the entire buffer. Doing just one throw away read
2842          * initially seems to "solve" it.
2843          */
2844         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2845
2846         for (i = 0; i < 3; i++) {
2847                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2848                 if (ret == size)
2849                         return ret;
2850                 msleep(1);
2851         }
2852
2853         return ret;
2854 }
2855
2856 /*
2857  * Fetch AUX CH registers 0x202 - 0x207 which contain
2858  * link status information
2859  */
2860 static bool
2861 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2862 {
2863         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2864                                        DP_LANE0_1_STATUS,
2865                                        link_status,
2866                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2867 }
2868
2869 /* These are source-specific values. */
2870 static uint8_t
2871 intel_dp_voltage_max(struct intel_dp *intel_dp)
2872 {
2873         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2874         struct drm_i915_private *dev_priv = dev->dev_private;
2875         enum port port = dp_to_dig_port(intel_dp)->port;
2876
2877         if (INTEL_INFO(dev)->gen >= 9) {
2878                 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2879                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2880                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2881         } else if (IS_VALLEYVIEW(dev))
2882                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2883         else if (IS_GEN7(dev) && port == PORT_A)
2884                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2885         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2886                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2887         else
2888                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2889 }
2890
2891 static uint8_t
2892 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2893 {
2894         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2895         enum port port = dp_to_dig_port(intel_dp)->port;
2896
2897         if (INTEL_INFO(dev)->gen >= 9) {
2898                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2899                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2900                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2901                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2902                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2903                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2904                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2905                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2906                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2907                 default:
2908                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2909                 }
2910         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2911                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2912                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2913                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2914                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2915                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2916                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2917                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2918                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2919                 default:
2920                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2921                 }
2922         } else if (IS_VALLEYVIEW(dev)) {
2923                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2924                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2925                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2926                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2927                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2928                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2929                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2930                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2931                 default:
2932                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2933                 }
2934         } else if (IS_GEN7(dev) && port == PORT_A) {
2935                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2936                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2937                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2938                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2939                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2940                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2941                 default:
2942                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2943                 }
2944         } else {
2945                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2946                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2947                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2948                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2949                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2950                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2951                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2952                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2953                 default:
2954                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2955                 }
2956         }
2957 }
2958
2959 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2960 {
2961         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2962         struct drm_i915_private *dev_priv = dev->dev_private;
2963         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2964         struct intel_crtc *intel_crtc =
2965                 to_intel_crtc(dport->base.base.crtc);
2966         unsigned long demph_reg_value, preemph_reg_value,
2967                 uniqtranscale_reg_value;
2968         uint8_t train_set = intel_dp->train_set[0];
2969         enum dpio_channel port = vlv_dport_to_channel(dport);
2970         int pipe = intel_crtc->pipe;
2971
2972         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2973         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2974                 preemph_reg_value = 0x0004000;
2975                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2976                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2977                         demph_reg_value = 0x2B405555;
2978                         uniqtranscale_reg_value = 0x552AB83A;
2979                         break;
2980                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2981                         demph_reg_value = 0x2B404040;
2982                         uniqtranscale_reg_value = 0x5548B83A;
2983                         break;
2984                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2985                         demph_reg_value = 0x2B245555;
2986                         uniqtranscale_reg_value = 0x5560B83A;
2987                         break;
2988                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2989                         demph_reg_value = 0x2B405555;
2990                         uniqtranscale_reg_value = 0x5598DA3A;
2991                         break;
2992                 default:
2993                         return 0;
2994                 }
2995                 break;
2996         case DP_TRAIN_PRE_EMPH_LEVEL_1:
2997                 preemph_reg_value = 0x0002000;
2998                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2999                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3000                         demph_reg_value = 0x2B404040;
3001                         uniqtranscale_reg_value = 0x5552B83A;
3002                         break;
3003                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3004                         demph_reg_value = 0x2B404848;
3005                         uniqtranscale_reg_value = 0x5580B83A;
3006                         break;
3007                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3008                         demph_reg_value = 0x2B404040;
3009                         uniqtranscale_reg_value = 0x55ADDA3A;
3010                         break;
3011                 default:
3012                         return 0;
3013                 }
3014                 break;
3015         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3016                 preemph_reg_value = 0x0000000;
3017                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3018                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3019                         demph_reg_value = 0x2B305555;
3020                         uniqtranscale_reg_value = 0x5570B83A;
3021                         break;
3022                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3023                         demph_reg_value = 0x2B2B4040;
3024                         uniqtranscale_reg_value = 0x55ADDA3A;
3025                         break;
3026                 default:
3027                         return 0;
3028                 }
3029                 break;
3030         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3031                 preemph_reg_value = 0x0006000;
3032                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3033                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3034                         demph_reg_value = 0x1B405555;
3035                         uniqtranscale_reg_value = 0x55ADDA3A;
3036                         break;
3037                 default:
3038                         return 0;
3039                 }
3040                 break;
3041         default:
3042                 return 0;
3043         }
3044
3045         mutex_lock(&dev_priv->dpio_lock);
3046         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3047         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3048         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3049                          uniqtranscale_reg_value);
3050         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3051         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3052         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3053         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3054         mutex_unlock(&dev_priv->dpio_lock);
3055
3056         return 0;
3057 }
3058
3059 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3060 {
3061         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3062         struct drm_i915_private *dev_priv = dev->dev_private;
3063         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3064         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3065         u32 deemph_reg_value, margin_reg_value, val;
3066         uint8_t train_set = intel_dp->train_set[0];
3067         enum dpio_channel ch = vlv_dport_to_channel(dport);
3068         enum pipe pipe = intel_crtc->pipe;
3069         int i;
3070
3071         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3072         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3073                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3074                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3075                         deemph_reg_value = 128;
3076                         margin_reg_value = 52;
3077                         break;
3078                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3079                         deemph_reg_value = 128;
3080                         margin_reg_value = 77;
3081                         break;
3082                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3083                         deemph_reg_value = 128;
3084                         margin_reg_value = 102;
3085                         break;
3086                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3087                         deemph_reg_value = 128;
3088                         margin_reg_value = 154;
3089                         /* FIXME extra to set for 1200 */
3090                         break;
3091                 default:
3092                         return 0;
3093                 }
3094                 break;
3095         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3096                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3097                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3098                         deemph_reg_value = 85;
3099                         margin_reg_value = 78;
3100                         break;
3101                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3102                         deemph_reg_value = 85;
3103                         margin_reg_value = 116;
3104                         break;
3105                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3106                         deemph_reg_value = 85;
3107                         margin_reg_value = 154;
3108                         break;
3109                 default:
3110                         return 0;
3111                 }
3112                 break;
3113         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3114                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3115                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3116                         deemph_reg_value = 64;
3117                         margin_reg_value = 104;
3118                         break;
3119                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3120                         deemph_reg_value = 64;
3121                         margin_reg_value = 154;
3122                         break;
3123                 default:
3124                         return 0;
3125                 }
3126                 break;
3127         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3128                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3129                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3130                         deemph_reg_value = 43;
3131                         margin_reg_value = 154;
3132                         break;
3133                 default:
3134                         return 0;
3135                 }
3136                 break;
3137         default:
3138                 return 0;
3139         }
3140
3141         mutex_lock(&dev_priv->dpio_lock);
3142
3143         /* Clear calc init */
3144         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3145         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3146         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3147         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3148         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3149
3150         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3151         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3152         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3153         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3154         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3155
3156         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3157         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3158         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3159         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3160
3161         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3162         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3163         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3164         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3165
3166         /* Program swing deemph */
3167         for (i = 0; i < 4; i++) {
3168                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3169                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3170                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3171                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3172         }
3173
3174         /* Program swing margin */
3175         for (i = 0; i < 4; i++) {
3176                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3177                 val &= ~DPIO_SWING_MARGIN000_MASK;
3178                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3179                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3180         }
3181
3182         /* Disable unique transition scale */
3183         for (i = 0; i < 4; i++) {
3184                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3185                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3186                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3187         }
3188
3189         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3190                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3191                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3192                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3193
3194                 /*
3195                  * The document said it needs to set bit 27 for ch0 and bit 26
3196                  * for ch1. Might be a typo in the doc.
3197                  * For now, for this unique transition scale selection, set bit
3198                  * 27 for ch0 and ch1.
3199                  */
3200                 for (i = 0; i < 4; i++) {
3201                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3202                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3203                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3204                 }
3205
3206                 for (i = 0; i < 4; i++) {
3207                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3208                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3209                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3210                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3211                 }
3212         }
3213
3214         /* Start swing calculation */
3215         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3216         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3217         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3218
3219         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3220         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3221         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3222
3223         /* LRC Bypass */
3224         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3225         val |= DPIO_LRC_BYPASS;
3226         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3227
3228         mutex_unlock(&dev_priv->dpio_lock);
3229
3230         return 0;
3231 }
3232
3233 static void
3234 intel_get_adjust_train(struct intel_dp *intel_dp,
3235                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3236 {
3237         uint8_t v = 0;
3238         uint8_t p = 0;
3239         int lane;
3240         uint8_t voltage_max;
3241         uint8_t preemph_max;
3242
3243         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3244                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3245                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3246
3247                 if (this_v > v)
3248                         v = this_v;
3249                 if (this_p > p)
3250                         p = this_p;
3251         }
3252
3253         voltage_max = intel_dp_voltage_max(intel_dp);
3254         if (v >= voltage_max)
3255                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3256
3257         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3258         if (p >= preemph_max)
3259                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3260
3261         for (lane = 0; lane < 4; lane++)
3262                 intel_dp->train_set[lane] = v | p;
3263 }
3264
3265 static uint32_t
3266 intel_gen4_signal_levels(uint8_t train_set)
3267 {
3268         uint32_t        signal_levels = 0;
3269
3270         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3271         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3272         default:
3273                 signal_levels |= DP_VOLTAGE_0_4;
3274                 break;
3275         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3276                 signal_levels |= DP_VOLTAGE_0_6;
3277                 break;
3278         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3279                 signal_levels |= DP_VOLTAGE_0_8;
3280                 break;
3281         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3282                 signal_levels |= DP_VOLTAGE_1_2;
3283                 break;
3284         }
3285         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3286         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3287         default:
3288                 signal_levels |= DP_PRE_EMPHASIS_0;
3289                 break;
3290         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3291                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3292                 break;
3293         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3294                 signal_levels |= DP_PRE_EMPHASIS_6;
3295                 break;
3296         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3297                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3298                 break;
3299         }
3300         return signal_levels;
3301 }
3302
3303 /* Gen6's DP voltage swing and pre-emphasis control */
3304 static uint32_t
3305 intel_gen6_edp_signal_levels(uint8_t train_set)
3306 {
3307         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3308                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3309         switch (signal_levels) {
3310         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3311         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3312                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3313         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3314                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3315         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3316         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3317                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3318         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3320                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3321         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3322         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3323                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3324         default:
3325                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3326                               "0x%x\n", signal_levels);
3327                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3328         }
3329 }
3330
3331 /* Gen7's DP voltage swing and pre-emphasis control */
3332 static uint32_t
3333 intel_gen7_edp_signal_levels(uint8_t train_set)
3334 {
3335         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3336                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3337         switch (signal_levels) {
3338         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3339                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3340         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3341                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3342         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3343                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3344
3345         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3346                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3347         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3348                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3349
3350         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3351                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3352         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3353                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3354
3355         default:
3356                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3357                               "0x%x\n", signal_levels);
3358                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3359         }
3360 }
3361
3362 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3363 static uint32_t
3364 intel_hsw_signal_levels(uint8_t train_set)
3365 {
3366         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3367                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3368         switch (signal_levels) {
3369         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3370                 return DDI_BUF_TRANS_SELECT(0);
3371         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3372                 return DDI_BUF_TRANS_SELECT(1);
3373         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3374                 return DDI_BUF_TRANS_SELECT(2);
3375         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3376                 return DDI_BUF_TRANS_SELECT(3);
3377
3378         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3379                 return DDI_BUF_TRANS_SELECT(4);
3380         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3381                 return DDI_BUF_TRANS_SELECT(5);
3382         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3383                 return DDI_BUF_TRANS_SELECT(6);
3384
3385         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3386                 return DDI_BUF_TRANS_SELECT(7);
3387         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3388                 return DDI_BUF_TRANS_SELECT(8);
3389
3390         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3391                 return DDI_BUF_TRANS_SELECT(9);
3392         default:
3393                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3394                               "0x%x\n", signal_levels);
3395                 return DDI_BUF_TRANS_SELECT(0);
3396         }
3397 }
3398
3399 /* Properly updates "DP" with the correct signal levels. */
3400 static void
3401 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3402 {
3403         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3404         enum port port = intel_dig_port->port;
3405         struct drm_device *dev = intel_dig_port->base.base.dev;
3406         uint32_t signal_levels, mask;
3407         uint8_t train_set = intel_dp->train_set[0];
3408
3409         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3410                 signal_levels = intel_hsw_signal_levels(train_set);
3411                 mask = DDI_BUF_EMP_MASK;
3412         } else if (IS_CHERRYVIEW(dev)) {
3413                 signal_levels = intel_chv_signal_levels(intel_dp);
3414                 mask = 0;
3415         } else if (IS_VALLEYVIEW(dev)) {
3416                 signal_levels = intel_vlv_signal_levels(intel_dp);
3417                 mask = 0;
3418         } else if (IS_GEN7(dev) && port == PORT_A) {
3419                 signal_levels = intel_gen7_edp_signal_levels(train_set);
3420                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3421         } else if (IS_GEN6(dev) && port == PORT_A) {
3422                 signal_levels = intel_gen6_edp_signal_levels(train_set);
3423                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3424         } else {
3425                 signal_levels = intel_gen4_signal_levels(train_set);
3426                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3427         }
3428
3429         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3430
3431         *DP = (*DP & ~mask) | signal_levels;
3432 }
3433
3434 static bool
3435 intel_dp_set_link_train(struct intel_dp *intel_dp,
3436                         uint32_t *DP,
3437                         uint8_t dp_train_pat)
3438 {
3439         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3440         struct drm_device *dev = intel_dig_port->base.base.dev;
3441         struct drm_i915_private *dev_priv = dev->dev_private;
3442         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3443         int ret, len;
3444
3445         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3446
3447         I915_WRITE(intel_dp->output_reg, *DP);
3448         POSTING_READ(intel_dp->output_reg);
3449
3450         buf[0] = dp_train_pat;
3451         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3452             DP_TRAINING_PATTERN_DISABLE) {
3453                 /* don't write DP_TRAINING_LANEx_SET on disable */
3454                 len = 1;
3455         } else {
3456                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3457                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3458                 len = intel_dp->lane_count + 1;
3459         }
3460
3461         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3462                                 buf, len);
3463
3464         return ret == len;
3465 }
3466
3467 static bool
3468 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3469                         uint8_t dp_train_pat)
3470 {
3471         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3472         intel_dp_set_signal_levels(intel_dp, DP);
3473         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3474 }
3475
3476 static bool
3477 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3478                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3479 {
3480         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3481         struct drm_device *dev = intel_dig_port->base.base.dev;
3482         struct drm_i915_private *dev_priv = dev->dev_private;
3483         int ret;
3484
3485         intel_get_adjust_train(intel_dp, link_status);
3486         intel_dp_set_signal_levels(intel_dp, DP);
3487
3488         I915_WRITE(intel_dp->output_reg, *DP);
3489         POSTING_READ(intel_dp->output_reg);
3490
3491         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3492                                 intel_dp->train_set, intel_dp->lane_count);
3493
3494         return ret == intel_dp->lane_count;
3495 }
3496
3497 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3498 {
3499         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3500         struct drm_device *dev = intel_dig_port->base.base.dev;
3501         struct drm_i915_private *dev_priv = dev->dev_private;
3502         enum port port = intel_dig_port->port;
3503         uint32_t val;
3504
3505         if (!HAS_DDI(dev))
3506                 return;
3507
3508         val = I915_READ(DP_TP_CTL(port));
3509         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3510         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3511         I915_WRITE(DP_TP_CTL(port), val);
3512
3513         /*
3514          * On PORT_A we can have only eDP in SST mode. There the only reason
3515          * we need to set idle transmission mode is to work around a HW issue
3516          * where we enable the pipe while not in idle link-training mode.
3517          * In this case there is requirement to wait for a minimum number of
3518          * idle patterns to be sent.
3519          */
3520         if (port == PORT_A)
3521                 return;
3522
3523         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3524                      1))
3525                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3526 }
3527
3528 /* Enable corresponding port and start training pattern 1 */
3529 void
3530 intel_dp_start_link_train(struct intel_dp *intel_dp)
3531 {
3532         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3533         struct drm_device *dev = encoder->dev;
3534         int i;
3535         uint8_t voltage;
3536         int voltage_tries, loop_tries;
3537         uint32_t DP = intel_dp->DP;
3538         uint8_t link_config[2];
3539
3540         if (HAS_DDI(dev))
3541                 intel_ddi_prepare_link_retrain(encoder);
3542
3543         /* Write the link configuration data */
3544         link_config[0] = intel_dp->link_bw;
3545         link_config[1] = intel_dp->lane_count;
3546         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3547                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3548         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3549         if (intel_dp->num_sink_rates)
3550                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3551                                 &intel_dp->rate_select, 1);
3552
3553         link_config[0] = 0;
3554         link_config[1] = DP_SET_ANSI_8B10B;
3555         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3556
3557         DP |= DP_PORT_EN;
3558
3559         /* clock recovery */
3560         if (!intel_dp_reset_link_train(intel_dp, &DP,
3561                                        DP_TRAINING_PATTERN_1 |
3562                                        DP_LINK_SCRAMBLING_DISABLE)) {
3563                 DRM_ERROR("failed to enable link training\n");
3564                 return;
3565         }
3566
3567         voltage = 0xff;
3568         voltage_tries = 0;
3569         loop_tries = 0;
3570         for (;;) {
3571                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3572
3573                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3574                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3575                         DRM_ERROR("failed to get link status\n");
3576                         break;
3577                 }
3578
3579                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3580                         DRM_DEBUG_KMS("clock recovery OK\n");
3581                         break;
3582                 }
3583
3584                 /* Check to see if we've tried the max voltage */
3585                 for (i = 0; i < intel_dp->lane_count; i++)
3586                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3587                                 break;
3588                 if (i == intel_dp->lane_count) {
3589                         ++loop_tries;
3590                         if (loop_tries == 5) {
3591                                 DRM_ERROR("too many full retries, give up\n");
3592                                 break;
3593                         }
3594                         intel_dp_reset_link_train(intel_dp, &DP,
3595                                                   DP_TRAINING_PATTERN_1 |
3596                                                   DP_LINK_SCRAMBLING_DISABLE);
3597                         voltage_tries = 0;
3598                         continue;
3599                 }
3600
3601                 /* Check to see if we've tried the same voltage 5 times */
3602                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3603                         ++voltage_tries;
3604                         if (voltage_tries == 5) {
3605                                 DRM_ERROR("too many voltage retries, give up\n");
3606                                 break;
3607                         }
3608                 } else
3609                         voltage_tries = 0;
3610                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3611
3612                 /* Update training set as requested by target */
3613                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3614                         DRM_ERROR("failed to update link training\n");
3615                         break;
3616                 }
3617         }
3618
3619         intel_dp->DP = DP;
3620 }
3621
3622 void
3623 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3624 {
3625         bool channel_eq = false;
3626         int tries, cr_tries;
3627         uint32_t DP = intel_dp->DP;
3628         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3629
3630         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3631         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3632                 training_pattern = DP_TRAINING_PATTERN_3;
3633
3634         /* channel equalization */
3635         if (!intel_dp_set_link_train(intel_dp, &DP,
3636                                      training_pattern |
3637                                      DP_LINK_SCRAMBLING_DISABLE)) {
3638                 DRM_ERROR("failed to start channel equalization\n");
3639                 return;
3640         }
3641
3642         tries = 0;
3643         cr_tries = 0;
3644         channel_eq = false;
3645         for (;;) {
3646                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3647
3648                 if (cr_tries > 5) {
3649                         DRM_ERROR("failed to train DP, aborting\n");
3650                         break;
3651                 }
3652
3653                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3654                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3655                         DRM_ERROR("failed to get link status\n");
3656                         break;
3657                 }
3658
3659                 /* Make sure clock is still ok */
3660                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3661                         intel_dp_start_link_train(intel_dp);
3662                         intel_dp_set_link_train(intel_dp, &DP,
3663                                                 training_pattern |
3664                                                 DP_LINK_SCRAMBLING_DISABLE);
3665                         cr_tries++;
3666                         continue;
3667                 }
3668
3669                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3670                         channel_eq = true;
3671                         break;
3672                 }
3673
3674                 /* Try 5 times, then try clock recovery if that fails */
3675                 if (tries > 5) {
3676                         intel_dp_start_link_train(intel_dp);
3677                         intel_dp_set_link_train(intel_dp, &DP,
3678                                                 training_pattern |
3679                                                 DP_LINK_SCRAMBLING_DISABLE);
3680                         tries = 0;
3681                         cr_tries++;
3682                         continue;
3683                 }
3684
3685                 /* Update training set as requested by target */
3686                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3687                         DRM_ERROR("failed to update link training\n");
3688                         break;
3689                 }
3690                 ++tries;
3691         }
3692
3693         intel_dp_set_idle_link_train(intel_dp);
3694
3695         intel_dp->DP = DP;
3696
3697         if (channel_eq)
3698                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3699
3700 }
3701
3702 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3703 {
3704         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3705                                 DP_TRAINING_PATTERN_DISABLE);
3706 }
3707
3708 static void
3709 intel_dp_link_down(struct intel_dp *intel_dp)
3710 {
3711         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3712         enum port port = intel_dig_port->port;
3713         struct drm_device *dev = intel_dig_port->base.base.dev;
3714         struct drm_i915_private *dev_priv = dev->dev_private;
3715         uint32_t DP = intel_dp->DP;
3716
3717         if (WARN_ON(HAS_DDI(dev)))
3718                 return;
3719
3720         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3721                 return;
3722
3723         DRM_DEBUG_KMS("\n");
3724
3725         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3726                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3727                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3728         } else {
3729                 if (IS_CHERRYVIEW(dev))
3730                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3731                 else
3732                         DP &= ~DP_LINK_TRAIN_MASK;
3733                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3734         }
3735         POSTING_READ(intel_dp->output_reg);
3736
3737         if (HAS_PCH_IBX(dev) &&
3738             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3739                 /* Hardware workaround: leaving our transcoder select
3740                  * set to transcoder B while it's off will prevent the
3741                  * corresponding HDMI output on transcoder A.
3742                  *
3743                  * Combine this with another hardware workaround:
3744                  * transcoder select bit can only be cleared while the
3745                  * port is enabled.
3746                  */
3747                 DP &= ~DP_PIPEB_SELECT;
3748                 I915_WRITE(intel_dp->output_reg, DP);
3749                 POSTING_READ(intel_dp->output_reg);
3750         }
3751
3752         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3753         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3754         POSTING_READ(intel_dp->output_reg);
3755         msleep(intel_dp->panel_power_down_delay);
3756 }
3757
3758 static bool
3759 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3760 {
3761         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3762         struct drm_device *dev = dig_port->base.base.dev;
3763         struct drm_i915_private *dev_priv = dev->dev_private;
3764         uint8_t rev;
3765
3766         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3767                                     sizeof(intel_dp->dpcd)) < 0)
3768                 return false; /* aux transfer failed */
3769
3770         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3771
3772         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3773                 return false; /* DPCD not present */
3774
3775         /* Check if the panel supports PSR */
3776         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3777         if (is_edp(intel_dp)) {
3778                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3779                                         intel_dp->psr_dpcd,
3780                                         sizeof(intel_dp->psr_dpcd));
3781                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3782                         dev_priv->psr.sink_support = true;
3783                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3784                 }
3785         }
3786
3787         /* Training Pattern 3 support, both source and sink */
3788         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3789             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3790             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3791                 intel_dp->use_tps3 = true;
3792                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3793         } else
3794                 intel_dp->use_tps3 = false;
3795
3796         /* Intermediate frequency support */
3797         if (is_edp(intel_dp) &&
3798             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3799             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3800             (rev >= 0x03)) { /* eDp v1.4 or higher */
3801                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3802                 int i;
3803
3804                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3805                                 DP_SUPPORTED_LINK_RATES,
3806                                 sink_rates,
3807                                 sizeof(sink_rates));
3808
3809                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3810                         int val = le16_to_cpu(sink_rates[i]);
3811
3812                         if (val == 0)
3813                                 break;
3814
3815                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3816                         intel_dp->sink_rates[i] = (val * 200) / 10;
3817                 }
3818                 intel_dp->num_sink_rates = i;
3819         }
3820
3821         intel_dp_print_rates(intel_dp);
3822
3823         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3824               DP_DWN_STRM_PORT_PRESENT))
3825                 return true; /* native DP sink */
3826
3827         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3828                 return true; /* no per-port downstream info */
3829
3830         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3831                                     intel_dp->downstream_ports,
3832                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3833                 return false; /* downstream port status fetch failed */
3834
3835         return true;
3836 }
3837
3838 static void
3839 intel_dp_probe_oui(struct intel_dp *intel_dp)
3840 {
3841         u8 buf[3];
3842
3843         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3844                 return;
3845
3846         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3847                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3848                               buf[0], buf[1], buf[2]);
3849
3850         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3851                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3852                               buf[0], buf[1], buf[2]);
3853 }
3854
3855 static bool
3856 intel_dp_probe_mst(struct intel_dp *intel_dp)
3857 {
3858         u8 buf[1];
3859
3860         if (!intel_dp->can_mst)
3861                 return false;
3862
3863         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3864                 return false;
3865
3866         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3867                 if (buf[0] & DP_MST_CAP) {
3868                         DRM_DEBUG_KMS("Sink is MST capable\n");
3869                         intel_dp->is_mst = true;
3870                 } else {
3871                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3872                         intel_dp->is_mst = false;
3873                 }
3874         }
3875
3876         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3877         return intel_dp->is_mst;
3878 }
3879
3880 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3881 {
3882         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3883         struct drm_device *dev = intel_dig_port->base.base.dev;
3884         struct intel_crtc *intel_crtc =
3885                 to_intel_crtc(intel_dig_port->base.base.crtc);
3886         u8 buf;
3887         int test_crc_count;
3888         int attempts = 6;
3889
3890         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3891                 return -EIO;
3892
3893         if (!(buf & DP_TEST_CRC_SUPPORTED))
3894                 return -ENOTTY;
3895
3896         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3897                 return -EIO;
3898
3899         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3900                                 buf | DP_TEST_SINK_START) < 0)
3901                 return -EIO;
3902
3903         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3904                 return -EIO;
3905         test_crc_count = buf & DP_TEST_COUNT_MASK;
3906
3907         do {
3908                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3909                                       DP_TEST_SINK_MISC, &buf) < 0)
3910                         return -EIO;
3911                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3912         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3913
3914         if (attempts == 0) {
3915                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3916                 return -ETIMEDOUT;
3917         }
3918
3919         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3920                 return -EIO;
3921
3922         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3923                 return -EIO;
3924         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3925                                buf & ~DP_TEST_SINK_START) < 0)
3926                 return -EIO;
3927
3928         return 0;
3929 }
3930
3931 static bool
3932 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3933 {
3934         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3935                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3936                                        sink_irq_vector, 1) == 1;
3937 }
3938
3939 static bool
3940 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3941 {
3942         int ret;
3943
3944         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3945                                              DP_SINK_COUNT_ESI,
3946                                              sink_irq_vector, 14);
3947         if (ret != 14)
3948                 return false;
3949
3950         return true;
3951 }
3952
3953 static void
3954 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3955 {
3956         /* NAK by default */
3957         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3958 }
3959
3960 static int
3961 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3962 {
3963         bool bret;
3964
3965         if (intel_dp->is_mst) {
3966                 u8 esi[16] = { 0 };
3967                 int ret = 0;
3968                 int retry;
3969                 bool handled;
3970                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3971 go_again:
3972                 if (bret == true) {
3973
3974                         /* check link status - esi[10] = 0x200c */
3975                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3976                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3977                                 intel_dp_start_link_train(intel_dp);
3978                                 intel_dp_complete_link_train(intel_dp);
3979                                 intel_dp_stop_link_train(intel_dp);
3980                         }
3981
3982                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
3983                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3984
3985                         if (handled) {
3986                                 for (retry = 0; retry < 3; retry++) {
3987                                         int wret;
3988                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3989                                                                  DP_SINK_COUNT_ESI+1,
3990                                                                  &esi[1], 3);
3991                                         if (wret == 3) {
3992                                                 break;
3993                                         }
3994                                 }
3995
3996                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3997                                 if (bret == true) {
3998                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3999                                         goto go_again;
4000                                 }
4001                         } else
4002                                 ret = 0;
4003
4004                         return ret;
4005                 } else {
4006                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4007                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4008                         intel_dp->is_mst = false;
4009                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4010                         /* send a hotplug event */
4011                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4012                 }
4013         }
4014         return -EINVAL;
4015 }
4016
4017 /*
4018  * According to DP spec
4019  * 5.1.2:
4020  *  1. Read DPCD
4021  *  2. Configure link according to Receiver Capabilities
4022  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4023  *  4. Check link status on receipt of hot-plug interrupt
4024  */
4025 static void
4026 intel_dp_check_link_status(struct intel_dp *intel_dp)
4027 {
4028         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4029         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4030         u8 sink_irq_vector;
4031         u8 link_status[DP_LINK_STATUS_SIZE];
4032
4033         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4034
4035         if (!intel_encoder->connectors_active)
4036                 return;
4037
4038         if (WARN_ON(!intel_encoder->base.crtc))
4039                 return;
4040
4041         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4042                 return;
4043
4044         /* Try to read receiver status if the link appears to be up */
4045         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4046                 return;
4047         }
4048
4049         /* Now read the DPCD to see if it's actually running */
4050         if (!intel_dp_get_dpcd(intel_dp)) {
4051                 return;
4052         }
4053
4054         /* Try to read the source of the interrupt */
4055         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4056             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4057                 /* Clear interrupt source */
4058                 drm_dp_dpcd_writeb(&intel_dp->aux,
4059                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4060                                    sink_irq_vector);
4061
4062                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4063                         intel_dp_handle_test_request(intel_dp);
4064                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4065                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4066         }
4067
4068         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4069                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4070                               intel_encoder->base.name);
4071                 intel_dp_start_link_train(intel_dp);
4072                 intel_dp_complete_link_train(intel_dp);
4073                 intel_dp_stop_link_train(intel_dp);
4074         }
4075 }
4076
4077 /* XXX this is probably wrong for multiple downstream ports */
4078 static enum drm_connector_status
4079 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4080 {
4081         uint8_t *dpcd = intel_dp->dpcd;
4082         uint8_t type;
4083
4084         if (!intel_dp_get_dpcd(intel_dp))
4085                 return connector_status_disconnected;
4086
4087         /* if there's no downstream port, we're done */
4088         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4089                 return connector_status_connected;
4090
4091         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4092         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4093             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4094                 uint8_t reg;
4095
4096                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4097                                             &reg, 1) < 0)
4098                         return connector_status_unknown;
4099
4100                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4101                                               : connector_status_disconnected;
4102         }
4103
4104         /* If no HPD, poke DDC gently */
4105         if (drm_probe_ddc(&intel_dp->aux.ddc))
4106                 return connector_status_connected;
4107
4108         /* Well we tried, say unknown for unreliable port types */
4109         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4110                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4111                 if (type == DP_DS_PORT_TYPE_VGA ||
4112                     type == DP_DS_PORT_TYPE_NON_EDID)
4113                         return connector_status_unknown;
4114         } else {
4115                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4116                         DP_DWN_STRM_PORT_TYPE_MASK;
4117                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4118                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4119                         return connector_status_unknown;
4120         }
4121
4122         /* Anything else is out of spec, warn and ignore */
4123         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4124         return connector_status_disconnected;
4125 }
4126
4127 static enum drm_connector_status
4128 edp_detect(struct intel_dp *intel_dp)
4129 {
4130         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4131         enum drm_connector_status status;
4132
4133         status = intel_panel_detect(dev);
4134         if (status == connector_status_unknown)
4135                 status = connector_status_connected;
4136
4137         return status;
4138 }
4139
4140 static enum drm_connector_status
4141 ironlake_dp_detect(struct intel_dp *intel_dp)
4142 {
4143         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4144         struct drm_i915_private *dev_priv = dev->dev_private;
4145         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4146
4147         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4148                 return connector_status_disconnected;
4149
4150         return intel_dp_detect_dpcd(intel_dp);
4151 }
4152
4153 static int g4x_digital_port_connected(struct drm_device *dev,
4154                                        struct intel_digital_port *intel_dig_port)
4155 {
4156         struct drm_i915_private *dev_priv = dev->dev_private;
4157         uint32_t bit;
4158
4159         if (IS_VALLEYVIEW(dev)) {
4160                 switch (intel_dig_port->port) {
4161                 case PORT_B:
4162                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4163                         break;
4164                 case PORT_C:
4165                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4166                         break;
4167                 case PORT_D:
4168                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4169                         break;
4170                 default:
4171                         return -EINVAL;
4172                 }
4173         } else {
4174                 switch (intel_dig_port->port) {
4175                 case PORT_B:
4176                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4177                         break;
4178                 case PORT_C:
4179                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4180                         break;
4181                 case PORT_D:
4182                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4183                         break;
4184                 default:
4185                         return -EINVAL;
4186                 }
4187         }
4188
4189         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4190                 return 0;
4191         return 1;
4192 }
4193
4194 static enum drm_connector_status
4195 g4x_dp_detect(struct intel_dp *intel_dp)
4196 {
4197         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4198         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4199         int ret;
4200
4201         /* Can't disconnect eDP, but you can close the lid... */
4202         if (is_edp(intel_dp)) {
4203                 enum drm_connector_status status;
4204
4205                 status = intel_panel_detect(dev);
4206                 if (status == connector_status_unknown)
4207                         status = connector_status_connected;
4208                 return status;
4209         }
4210
4211         ret = g4x_digital_port_connected(dev, intel_dig_port);
4212         if (ret == -EINVAL)
4213                 return connector_status_unknown;
4214         else if (ret == 0)
4215                 return connector_status_disconnected;
4216
4217         return intel_dp_detect_dpcd(intel_dp);
4218 }
4219
4220 static struct edid *
4221 intel_dp_get_edid(struct intel_dp *intel_dp)
4222 {
4223         struct intel_connector *intel_connector = intel_dp->attached_connector;
4224
4225         /* use cached edid if we have one */
4226         if (intel_connector->edid) {
4227                 /* invalid edid */
4228                 if (IS_ERR(intel_connector->edid))
4229                         return NULL;
4230
4231                 return drm_edid_duplicate(intel_connector->edid);
4232         } else
4233                 return drm_get_edid(&intel_connector->base,
4234                                     &intel_dp->aux.ddc);
4235 }
4236
4237 static void
4238 intel_dp_set_edid(struct intel_dp *intel_dp)
4239 {
4240         struct intel_connector *intel_connector = intel_dp->attached_connector;
4241         struct edid *edid;
4242
4243         edid = intel_dp_get_edid(intel_dp);
4244         intel_connector->detect_edid = edid;
4245
4246         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4247                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4248         else
4249                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4250 }
4251
4252 static void
4253 intel_dp_unset_edid(struct intel_dp *intel_dp)
4254 {
4255         struct intel_connector *intel_connector = intel_dp->attached_connector;
4256
4257         kfree(intel_connector->detect_edid);
4258         intel_connector->detect_edid = NULL;
4259
4260         intel_dp->has_audio = false;
4261 }
4262
4263 static enum intel_display_power_domain
4264 intel_dp_power_get(struct intel_dp *dp)
4265 {
4266         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4267         enum intel_display_power_domain power_domain;
4268
4269         power_domain = intel_display_port_power_domain(encoder);
4270         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4271
4272         return power_domain;
4273 }
4274
4275 static void
4276 intel_dp_power_put(struct intel_dp *dp,
4277                    enum intel_display_power_domain power_domain)
4278 {
4279         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4280         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4281 }
4282
4283 static enum drm_connector_status
4284 intel_dp_detect(struct drm_connector *connector, bool force)
4285 {
4286         struct intel_dp *intel_dp = intel_attached_dp(connector);
4287         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4288         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4289         struct drm_device *dev = connector->dev;
4290         enum drm_connector_status status;
4291         enum intel_display_power_domain power_domain;
4292         bool ret;
4293
4294         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4295                       connector->base.id, connector->name);
4296         intel_dp_unset_edid(intel_dp);
4297
4298         if (intel_dp->is_mst) {
4299                 /* MST devices are disconnected from a monitor POV */
4300                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4301                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4302                 return connector_status_disconnected;
4303         }
4304
4305         power_domain = intel_dp_power_get(intel_dp);
4306
4307         /* Can't disconnect eDP, but you can close the lid... */
4308         if (is_edp(intel_dp))
4309                 status = edp_detect(intel_dp);
4310         else if (HAS_PCH_SPLIT(dev))
4311                 status = ironlake_dp_detect(intel_dp);
4312         else
4313                 status = g4x_dp_detect(intel_dp);
4314         if (status != connector_status_connected)
4315                 goto out;
4316
4317         intel_dp_probe_oui(intel_dp);
4318
4319         ret = intel_dp_probe_mst(intel_dp);
4320         if (ret) {
4321                 /* if we are in MST mode then this connector
4322                    won't appear connected or have anything with EDID on it */
4323                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4324                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4325                 status = connector_status_disconnected;
4326                 goto out;
4327         }
4328
4329         intel_dp_set_edid(intel_dp);
4330
4331         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4332                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4333         status = connector_status_connected;
4334
4335 out:
4336         intel_dp_power_put(intel_dp, power_domain);
4337         return status;
4338 }
4339
4340 static void
4341 intel_dp_force(struct drm_connector *connector)
4342 {
4343         struct intel_dp *intel_dp = intel_attached_dp(connector);
4344         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4345         enum intel_display_power_domain power_domain;
4346
4347         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4348                       connector->base.id, connector->name);
4349         intel_dp_unset_edid(intel_dp);
4350
4351         if (connector->status != connector_status_connected)
4352                 return;
4353
4354         power_domain = intel_dp_power_get(intel_dp);
4355
4356         intel_dp_set_edid(intel_dp);
4357
4358         intel_dp_power_put(intel_dp, power_domain);
4359
4360         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4361                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4362 }
4363
4364 static int intel_dp_get_modes(struct drm_connector *connector)
4365 {
4366         struct intel_connector *intel_connector = to_intel_connector(connector);
4367         struct edid *edid;
4368
4369         edid = intel_connector->detect_edid;
4370         if (edid) {
4371                 int ret = intel_connector_update_modes(connector, edid);
4372                 if (ret)
4373                         return ret;
4374         }
4375
4376         /* if eDP has no EDID, fall back to fixed mode */
4377         if (is_edp(intel_attached_dp(connector)) &&
4378             intel_connector->panel.fixed_mode) {
4379                 struct drm_display_mode *mode;
4380
4381                 mode = drm_mode_duplicate(connector->dev,
4382                                           intel_connector->panel.fixed_mode);
4383                 if (mode) {
4384                         drm_mode_probed_add(connector, mode);
4385                         return 1;
4386                 }
4387         }
4388
4389         return 0;
4390 }
4391
4392 static bool
4393 intel_dp_detect_audio(struct drm_connector *connector)
4394 {
4395         bool has_audio = false;
4396         struct edid *edid;
4397
4398         edid = to_intel_connector(connector)->detect_edid;
4399         if (edid)
4400                 has_audio = drm_detect_monitor_audio(edid);
4401
4402         return has_audio;
4403 }
4404
4405 static int
4406 intel_dp_set_property(struct drm_connector *connector,
4407                       struct drm_property *property,
4408                       uint64_t val)
4409 {
4410         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4411         struct intel_connector *intel_connector = to_intel_connector(connector);
4412         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4413         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4414         int ret;
4415
4416         ret = drm_object_property_set_value(&connector->base, property, val);
4417         if (ret)
4418                 return ret;
4419
4420         if (property == dev_priv->force_audio_property) {
4421                 int i = val;
4422                 bool has_audio;
4423
4424                 if (i == intel_dp->force_audio)
4425                         return 0;
4426
4427                 intel_dp->force_audio = i;
4428
4429                 if (i == HDMI_AUDIO_AUTO)
4430                         has_audio = intel_dp_detect_audio(connector);
4431                 else
4432                         has_audio = (i == HDMI_AUDIO_ON);
4433
4434                 if (has_audio == intel_dp->has_audio)
4435                         return 0;
4436
4437                 intel_dp->has_audio = has_audio;
4438                 goto done;
4439         }
4440
4441         if (property == dev_priv->broadcast_rgb_property) {
4442                 bool old_auto = intel_dp->color_range_auto;
4443                 uint32_t old_range = intel_dp->color_range;
4444
4445                 switch (val) {
4446                 case INTEL_BROADCAST_RGB_AUTO:
4447                         intel_dp->color_range_auto = true;
4448                         break;
4449                 case INTEL_BROADCAST_RGB_FULL:
4450                         intel_dp->color_range_auto = false;
4451                         intel_dp->color_range = 0;
4452                         break;
4453                 case INTEL_BROADCAST_RGB_LIMITED:
4454                         intel_dp->color_range_auto = false;
4455                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4456                         break;
4457                 default:
4458                         return -EINVAL;
4459                 }
4460
4461                 if (old_auto == intel_dp->color_range_auto &&
4462                     old_range == intel_dp->color_range)
4463                         return 0;
4464
4465                 goto done;
4466         }
4467
4468         if (is_edp(intel_dp) &&
4469             property == connector->dev->mode_config.scaling_mode_property) {
4470                 if (val == DRM_MODE_SCALE_NONE) {
4471                         DRM_DEBUG_KMS("no scaling not supported\n");
4472                         return -EINVAL;
4473                 }
4474
4475                 if (intel_connector->panel.fitting_mode == val) {
4476                         /* the eDP scaling property is not changed */
4477                         return 0;
4478                 }
4479                 intel_connector->panel.fitting_mode = val;
4480
4481                 goto done;
4482         }
4483
4484         return -EINVAL;
4485
4486 done:
4487         if (intel_encoder->base.crtc)
4488                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4489
4490         return 0;
4491 }
4492
4493 static void
4494 intel_dp_connector_destroy(struct drm_connector *connector)
4495 {
4496         struct intel_connector *intel_connector = to_intel_connector(connector);
4497
4498         kfree(intel_connector->detect_edid);
4499
4500         if (!IS_ERR_OR_NULL(intel_connector->edid))
4501                 kfree(intel_connector->edid);
4502
4503         /* Can't call is_edp() since the encoder may have been destroyed
4504          * already. */
4505         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4506                 intel_panel_fini(&intel_connector->panel);
4507
4508         drm_connector_cleanup(connector);
4509         kfree(connector);
4510 }
4511
4512 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4513 {
4514         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4515         struct intel_dp *intel_dp = &intel_dig_port->dp;
4516
4517         drm_dp_aux_unregister(&intel_dp->aux);
4518         intel_dp_mst_encoder_cleanup(intel_dig_port);
4519         if (is_edp(intel_dp)) {
4520                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4521                 /*
4522                  * vdd might still be enabled do to the delayed vdd off.
4523                  * Make sure vdd is actually turned off here.
4524                  */
4525                 pps_lock(intel_dp);
4526                 edp_panel_vdd_off_sync(intel_dp);
4527                 pps_unlock(intel_dp);
4528
4529                 if (intel_dp->edp_notifier.notifier_call) {
4530                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4531                         intel_dp->edp_notifier.notifier_call = NULL;
4532                 }
4533         }
4534         drm_encoder_cleanup(encoder);
4535         kfree(intel_dig_port);
4536 }
4537
4538 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4539 {
4540         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4541
4542         if (!is_edp(intel_dp))
4543                 return;
4544
4545         /*
4546          * vdd might still be enabled do to the delayed vdd off.
4547          * Make sure vdd is actually turned off here.
4548          */
4549         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4550         pps_lock(intel_dp);
4551         edp_panel_vdd_off_sync(intel_dp);
4552         pps_unlock(intel_dp);
4553 }
4554
4555 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4556 {
4557         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4558         struct drm_device *dev = intel_dig_port->base.base.dev;
4559         struct drm_i915_private *dev_priv = dev->dev_private;
4560         enum intel_display_power_domain power_domain;
4561
4562         lockdep_assert_held(&dev_priv->pps_mutex);
4563
4564         if (!edp_have_panel_vdd(intel_dp))
4565                 return;
4566
4567         /*
4568          * The VDD bit needs a power domain reference, so if the bit is
4569          * already enabled when we boot or resume, grab this reference and
4570          * schedule a vdd off, so we don't hold on to the reference
4571          * indefinitely.
4572          */
4573         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4574         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4575         intel_display_power_get(dev_priv, power_domain);
4576
4577         edp_panel_vdd_schedule_off(intel_dp);
4578 }
4579
4580 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4581 {
4582         struct intel_dp *intel_dp;
4583
4584         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4585                 return;
4586
4587         intel_dp = enc_to_intel_dp(encoder);
4588
4589         pps_lock(intel_dp);
4590
4591         /*
4592          * Read out the current power sequencer assignment,
4593          * in case the BIOS did something with it.
4594          */
4595         if (IS_VALLEYVIEW(encoder->dev))
4596                 vlv_initial_power_sequencer_setup(intel_dp);
4597
4598         intel_edp_panel_vdd_sanitize(intel_dp);
4599
4600         pps_unlock(intel_dp);
4601 }
4602
4603 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4604         .dpms = intel_connector_dpms,
4605         .detect = intel_dp_detect,
4606         .force = intel_dp_force,
4607         .fill_modes = drm_helper_probe_single_connector_modes,
4608         .set_property = intel_dp_set_property,
4609         .atomic_get_property = intel_connector_atomic_get_property,
4610         .destroy = intel_dp_connector_destroy,
4611         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4612         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4613 };
4614
4615 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4616         .get_modes = intel_dp_get_modes,
4617         .mode_valid = intel_dp_mode_valid,
4618         .best_encoder = intel_best_encoder,
4619 };
4620
4621 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4622         .reset = intel_dp_encoder_reset,
4623         .destroy = intel_dp_encoder_destroy,
4624 };
4625
4626 void
4627 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4628 {
4629         return;
4630 }
4631
4632 enum irqreturn
4633 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4634 {
4635         struct intel_dp *intel_dp = &intel_dig_port->dp;
4636         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4637         struct drm_device *dev = intel_dig_port->base.base.dev;
4638         struct drm_i915_private *dev_priv = dev->dev_private;
4639         enum intel_display_power_domain power_domain;
4640         enum irqreturn ret = IRQ_NONE;
4641
4642         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4643                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4644
4645         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4646                 /*
4647                  * vdd off can generate a long pulse on eDP which
4648                  * would require vdd on to handle it, and thus we
4649                  * would end up in an endless cycle of
4650                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4651                  */
4652                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4653                               port_name(intel_dig_port->port));
4654                 return IRQ_HANDLED;
4655         }
4656
4657         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4658                       port_name(intel_dig_port->port),
4659                       long_hpd ? "long" : "short");
4660
4661         power_domain = intel_display_port_power_domain(intel_encoder);
4662         intel_display_power_get(dev_priv, power_domain);
4663
4664         if (long_hpd) {
4665
4666                 if (HAS_PCH_SPLIT(dev)) {
4667                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4668                                 goto mst_fail;
4669                 } else {
4670                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4671                                 goto mst_fail;
4672                 }
4673
4674                 if (!intel_dp_get_dpcd(intel_dp)) {
4675                         goto mst_fail;
4676                 }
4677
4678                 intel_dp_probe_oui(intel_dp);
4679
4680                 if (!intel_dp_probe_mst(intel_dp))
4681                         goto mst_fail;
4682
4683         } else {
4684                 if (intel_dp->is_mst) {
4685                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4686                                 goto mst_fail;
4687                 }
4688
4689                 if (!intel_dp->is_mst) {
4690                         /*
4691                          * we'll check the link status via the normal hot plug path later -
4692                          * but for short hpds we should check it now
4693                          */
4694                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4695                         intel_dp_check_link_status(intel_dp);
4696                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4697                 }
4698         }
4699
4700         ret = IRQ_HANDLED;
4701
4702         goto put_power;
4703 mst_fail:
4704         /* if we were in MST mode, and device is not there get out of MST mode */
4705         if (intel_dp->is_mst) {
4706                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4707                 intel_dp->is_mst = false;
4708                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4709         }
4710 put_power:
4711         intel_display_power_put(dev_priv, power_domain);
4712
4713         return ret;
4714 }
4715
4716 /* Return which DP Port should be selected for Transcoder DP control */
4717 int
4718 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4719 {
4720         struct drm_device *dev = crtc->dev;
4721         struct intel_encoder *intel_encoder;
4722         struct intel_dp *intel_dp;
4723
4724         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4725                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4726
4727                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4728                     intel_encoder->type == INTEL_OUTPUT_EDP)
4729                         return intel_dp->output_reg;
4730         }
4731
4732         return -1;
4733 }
4734
4735 /* check the VBT to see whether the eDP is on DP-D port */
4736 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4737 {
4738         struct drm_i915_private *dev_priv = dev->dev_private;
4739         union child_device_config *p_child;
4740         int i;
4741         static const short port_mapping[] = {
4742                 [PORT_B] = PORT_IDPB,
4743                 [PORT_C] = PORT_IDPC,
4744                 [PORT_D] = PORT_IDPD,
4745         };
4746
4747         if (port == PORT_A)
4748                 return true;
4749
4750         if (!dev_priv->vbt.child_dev_num)
4751                 return false;
4752
4753         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4754                 p_child = dev_priv->vbt.child_dev + i;
4755
4756                 if (p_child->common.dvo_port == port_mapping[port] &&
4757                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4758                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4759                         return true;
4760         }
4761         return false;
4762 }
4763
4764 void
4765 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4766 {
4767         struct intel_connector *intel_connector = to_intel_connector(connector);
4768
4769         intel_attach_force_audio_property(connector);
4770         intel_attach_broadcast_rgb_property(connector);
4771         intel_dp->color_range_auto = true;
4772
4773         if (is_edp(intel_dp)) {
4774                 drm_mode_create_scaling_mode_property(connector->dev);
4775                 drm_object_attach_property(
4776                         &connector->base,
4777                         connector->dev->mode_config.scaling_mode_property,
4778                         DRM_MODE_SCALE_ASPECT);
4779                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4780         }
4781 }
4782
4783 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4784 {
4785         intel_dp->last_power_cycle = jiffies;
4786         intel_dp->last_power_on = jiffies;
4787         intel_dp->last_backlight_off = jiffies;
4788 }
4789
4790 static void
4791 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4792                                     struct intel_dp *intel_dp)
4793 {
4794         struct drm_i915_private *dev_priv = dev->dev_private;
4795         struct edp_power_seq cur, vbt, spec,
4796                 *final = &intel_dp->pps_delays;
4797         u32 pp_on, pp_off, pp_div, pp;
4798         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4799
4800         lockdep_assert_held(&dev_priv->pps_mutex);
4801
4802         /* already initialized? */
4803         if (final->t11_t12 != 0)
4804                 return;
4805
4806         if (HAS_PCH_SPLIT(dev)) {
4807                 pp_ctrl_reg = PCH_PP_CONTROL;
4808                 pp_on_reg = PCH_PP_ON_DELAYS;
4809                 pp_off_reg = PCH_PP_OFF_DELAYS;
4810                 pp_div_reg = PCH_PP_DIVISOR;
4811         } else {
4812                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4813
4814                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4815                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4816                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4817                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4818         }
4819
4820         /* Workaround: Need to write PP_CONTROL with the unlock key as
4821          * the very first thing. */
4822         pp = ironlake_get_pp_control(intel_dp);
4823         I915_WRITE(pp_ctrl_reg, pp);
4824
4825         pp_on = I915_READ(pp_on_reg);
4826         pp_off = I915_READ(pp_off_reg);
4827         pp_div = I915_READ(pp_div_reg);
4828
4829         /* Pull timing values out of registers */
4830         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4831                 PANEL_POWER_UP_DELAY_SHIFT;
4832
4833         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4834                 PANEL_LIGHT_ON_DELAY_SHIFT;
4835
4836         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4837                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4838
4839         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4840                 PANEL_POWER_DOWN_DELAY_SHIFT;
4841
4842         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4843                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4844
4845         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4846                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4847
4848         vbt = dev_priv->vbt.edp_pps;
4849
4850         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4851          * our hw here, which are all in 100usec. */
4852         spec.t1_t3 = 210 * 10;
4853         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4854         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4855         spec.t10 = 500 * 10;
4856         /* This one is special and actually in units of 100ms, but zero
4857          * based in the hw (so we need to add 100 ms). But the sw vbt
4858          * table multiplies it with 1000 to make it in units of 100usec,
4859          * too. */
4860         spec.t11_t12 = (510 + 100) * 10;
4861
4862         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4863                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4864
4865         /* Use the max of the register settings and vbt. If both are
4866          * unset, fall back to the spec limits. */
4867 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4868                                        spec.field : \
4869                                        max(cur.field, vbt.field))
4870         assign_final(t1_t3);
4871         assign_final(t8);
4872         assign_final(t9);
4873         assign_final(t10);
4874         assign_final(t11_t12);
4875 #undef assign_final
4876
4877 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4878         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4879         intel_dp->backlight_on_delay = get_delay(t8);
4880         intel_dp->backlight_off_delay = get_delay(t9);
4881         intel_dp->panel_power_down_delay = get_delay(t10);
4882         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4883 #undef get_delay
4884
4885         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4886                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4887                       intel_dp->panel_power_cycle_delay);
4888
4889         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4890                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4891 }
4892
4893 static void
4894 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4895                                               struct intel_dp *intel_dp)
4896 {
4897         struct drm_i915_private *dev_priv = dev->dev_private;
4898         u32 pp_on, pp_off, pp_div, port_sel = 0;
4899         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4900         int pp_on_reg, pp_off_reg, pp_div_reg;
4901         enum port port = dp_to_dig_port(intel_dp)->port;
4902         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4903
4904         lockdep_assert_held(&dev_priv->pps_mutex);
4905
4906         if (HAS_PCH_SPLIT(dev)) {
4907                 pp_on_reg = PCH_PP_ON_DELAYS;
4908                 pp_off_reg = PCH_PP_OFF_DELAYS;
4909                 pp_div_reg = PCH_PP_DIVISOR;
4910         } else {
4911                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4912
4913                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4914                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4915                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4916         }
4917
4918         /*
4919          * And finally store the new values in the power sequencer. The
4920          * backlight delays are set to 1 because we do manual waits on them. For
4921          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4922          * we'll end up waiting for the backlight off delay twice: once when we
4923          * do the manual sleep, and once when we disable the panel and wait for
4924          * the PP_STATUS bit to become zero.
4925          */
4926         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4927                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4928         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4929                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4930         /* Compute the divisor for the pp clock, simply match the Bspec
4931          * formula. */
4932         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4933         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4934                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
4935
4936         /* Haswell doesn't have any port selection bits for the panel
4937          * power sequencer any more. */
4938         if (IS_VALLEYVIEW(dev)) {
4939                 port_sel = PANEL_PORT_SELECT_VLV(port);
4940         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4941                 if (port == PORT_A)
4942                         port_sel = PANEL_PORT_SELECT_DPA;
4943                 else
4944                         port_sel = PANEL_PORT_SELECT_DPD;
4945         }
4946
4947         pp_on |= port_sel;
4948
4949         I915_WRITE(pp_on_reg, pp_on);
4950         I915_WRITE(pp_off_reg, pp_off);
4951         I915_WRITE(pp_div_reg, pp_div);
4952
4953         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4954                       I915_READ(pp_on_reg),
4955                       I915_READ(pp_off_reg),
4956                       I915_READ(pp_div_reg));
4957 }
4958
4959 /**
4960  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4961  * @dev: DRM device
4962  * @refresh_rate: RR to be programmed
4963  *
4964  * This function gets called when refresh rate (RR) has to be changed from
4965  * one frequency to another. Switches can be between high and low RR
4966  * supported by the panel or to any other RR based on media playback (in
4967  * this case, RR value needs to be passed from user space).
4968  *
4969  * The caller of this function needs to take a lock on dev_priv->drrs.
4970  */
4971 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4972 {
4973         struct drm_i915_private *dev_priv = dev->dev_private;
4974         struct intel_encoder *encoder;
4975         struct intel_digital_port *dig_port = NULL;
4976         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4977         struct intel_crtc_state *config = NULL;
4978         struct intel_crtc *intel_crtc = NULL;
4979         u32 reg, val;
4980         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4981
4982         if (refresh_rate <= 0) {
4983                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4984                 return;
4985         }
4986
4987         if (intel_dp == NULL) {
4988                 DRM_DEBUG_KMS("DRRS not supported.\n");
4989                 return;
4990         }
4991
4992         /*
4993          * FIXME: This needs proper synchronization with psr state for some
4994          * platforms that cannot have PSR and DRRS enabled at the same time.
4995          */
4996
4997         dig_port = dp_to_dig_port(intel_dp);
4998         encoder = &dig_port->base;
4999         intel_crtc = to_intel_crtc(encoder->base.crtc);
5000
5001         if (!intel_crtc) {
5002                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5003                 return;
5004         }
5005
5006         config = intel_crtc->config;
5007
5008         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5009                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5010                 return;
5011         }
5012
5013         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5014                         refresh_rate)
5015                 index = DRRS_LOW_RR;
5016
5017         if (index == dev_priv->drrs.refresh_rate_type) {
5018                 DRM_DEBUG_KMS(
5019                         "DRRS requested for previously set RR...ignoring\n");
5020                 return;
5021         }
5022
5023         if (!intel_crtc->active) {
5024                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5025                 return;
5026         }
5027
5028         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5029                 switch (index) {
5030                 case DRRS_HIGH_RR:
5031                         intel_dp_set_m_n(intel_crtc, M1_N1);
5032                         break;
5033                 case DRRS_LOW_RR:
5034                         intel_dp_set_m_n(intel_crtc, M2_N2);
5035                         break;
5036                 case DRRS_MAX_RR:
5037                 default:
5038                         DRM_ERROR("Unsupported refreshrate type\n");
5039                 }
5040         } else if (INTEL_INFO(dev)->gen > 6) {
5041                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5042                 val = I915_READ(reg);
5043
5044                 if (index > DRRS_HIGH_RR) {
5045                         if (IS_VALLEYVIEW(dev))
5046                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5047                         else
5048                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5049                 } else {
5050                         if (IS_VALLEYVIEW(dev))
5051                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5052                         else
5053                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5054                 }
5055                 I915_WRITE(reg, val);
5056         }
5057
5058         dev_priv->drrs.refresh_rate_type = index;
5059
5060         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5061 }
5062
5063 /**
5064  * intel_edp_drrs_enable - init drrs struct if supported
5065  * @intel_dp: DP struct
5066  *
5067  * Initializes frontbuffer_bits and drrs.dp
5068  */
5069 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5070 {
5071         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5072         struct drm_i915_private *dev_priv = dev->dev_private;
5073         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5074         struct drm_crtc *crtc = dig_port->base.base.crtc;
5075         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5076
5077         if (!intel_crtc->config->has_drrs) {
5078                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5079                 return;
5080         }
5081
5082         mutex_lock(&dev_priv->drrs.mutex);
5083         if (WARN_ON(dev_priv->drrs.dp)) {
5084                 DRM_ERROR("DRRS already enabled\n");
5085                 goto unlock;
5086         }
5087
5088         dev_priv->drrs.busy_frontbuffer_bits = 0;
5089
5090         dev_priv->drrs.dp = intel_dp;
5091
5092 unlock:
5093         mutex_unlock(&dev_priv->drrs.mutex);
5094 }
5095
5096 /**
5097  * intel_edp_drrs_disable - Disable DRRS
5098  * @intel_dp: DP struct
5099  *
5100  */
5101 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5102 {
5103         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5104         struct drm_i915_private *dev_priv = dev->dev_private;
5105         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5106         struct drm_crtc *crtc = dig_port->base.base.crtc;
5107         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5108
5109         if (!intel_crtc->config->has_drrs)
5110                 return;
5111
5112         mutex_lock(&dev_priv->drrs.mutex);
5113         if (!dev_priv->drrs.dp) {
5114                 mutex_unlock(&dev_priv->drrs.mutex);
5115                 return;
5116         }
5117
5118         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5119                 intel_dp_set_drrs_state(dev_priv->dev,
5120                         intel_dp->attached_connector->panel.
5121                         fixed_mode->vrefresh);
5122
5123         dev_priv->drrs.dp = NULL;
5124         mutex_unlock(&dev_priv->drrs.mutex);
5125
5126         cancel_delayed_work_sync(&dev_priv->drrs.work);
5127 }
5128
5129 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5130 {
5131         struct drm_i915_private *dev_priv =
5132                 container_of(work, typeof(*dev_priv), drrs.work.work);
5133         struct intel_dp *intel_dp;
5134
5135         mutex_lock(&dev_priv->drrs.mutex);
5136
5137         intel_dp = dev_priv->drrs.dp;
5138
5139         if (!intel_dp)
5140                 goto unlock;
5141
5142         /*
5143          * The delayed work can race with an invalidate hence we need to
5144          * recheck.
5145          */
5146
5147         if (dev_priv->drrs.busy_frontbuffer_bits)
5148                 goto unlock;
5149
5150         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5151                 intel_dp_set_drrs_state(dev_priv->dev,
5152                         intel_dp->attached_connector->panel.
5153                         downclock_mode->vrefresh);
5154
5155 unlock:
5156         mutex_unlock(&dev_priv->drrs.mutex);
5157 }
5158
5159 /**
5160  * intel_edp_drrs_invalidate - Invalidate DRRS
5161  * @dev: DRM device
5162  * @frontbuffer_bits: frontbuffer plane tracking bits
5163  *
5164  * When there is a disturbance on screen (due to cursor movement/time
5165  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5166  * high RR.
5167  *
5168  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5169  */
5170 void intel_edp_drrs_invalidate(struct drm_device *dev,
5171                 unsigned frontbuffer_bits)
5172 {
5173         struct drm_i915_private *dev_priv = dev->dev_private;
5174         struct drm_crtc *crtc;
5175         enum pipe pipe;
5176
5177         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5178                 return;
5179
5180         cancel_delayed_work(&dev_priv->drrs.work);
5181
5182         mutex_lock(&dev_priv->drrs.mutex);
5183         if (!dev_priv->drrs.dp) {
5184                 mutex_unlock(&dev_priv->drrs.mutex);
5185                 return;
5186         }
5187
5188         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5189         pipe = to_intel_crtc(crtc)->pipe;
5190
5191         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5192                 intel_dp_set_drrs_state(dev_priv->dev,
5193                                 dev_priv->drrs.dp->attached_connector->panel.
5194                                 fixed_mode->vrefresh);
5195         }
5196
5197         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5198
5199         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5200         mutex_unlock(&dev_priv->drrs.mutex);
5201 }
5202
5203 /**
5204  * intel_edp_drrs_flush - Flush DRRS
5205  * @dev: DRM device
5206  * @frontbuffer_bits: frontbuffer plane tracking bits
5207  *
5208  * When there is no movement on screen, DRRS work can be scheduled.
5209  * This DRRS work is responsible for setting relevant registers after a
5210  * timeout of 1 second.
5211  *
5212  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5213  */
5214 void intel_edp_drrs_flush(struct drm_device *dev,
5215                 unsigned frontbuffer_bits)
5216 {
5217         struct drm_i915_private *dev_priv = dev->dev_private;
5218         struct drm_crtc *crtc;
5219         enum pipe pipe;
5220
5221         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5222                 return;
5223
5224         cancel_delayed_work(&dev_priv->drrs.work);
5225
5226         mutex_lock(&dev_priv->drrs.mutex);
5227         if (!dev_priv->drrs.dp) {
5228                 mutex_unlock(&dev_priv->drrs.mutex);
5229                 return;
5230         }
5231
5232         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5233         pipe = to_intel_crtc(crtc)->pipe;
5234         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5235
5236         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5237                         !dev_priv->drrs.busy_frontbuffer_bits)
5238                 schedule_delayed_work(&dev_priv->drrs.work,
5239                                 msecs_to_jiffies(1000));
5240         mutex_unlock(&dev_priv->drrs.mutex);
5241 }
5242
5243 /**
5244  * DOC: Display Refresh Rate Switching (DRRS)
5245  *
5246  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5247  * which enables swtching between low and high refresh rates,
5248  * dynamically, based on the usage scenario. This feature is applicable
5249  * for internal panels.
5250  *
5251  * Indication that the panel supports DRRS is given by the panel EDID, which
5252  * would list multiple refresh rates for one resolution.
5253  *
5254  * DRRS is of 2 types - static and seamless.
5255  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5256  * (may appear as a blink on screen) and is used in dock-undock scenario.
5257  * Seamless DRRS involves changing RR without any visual effect to the user
5258  * and can be used during normal system usage. This is done by programming
5259  * certain registers.
5260  *
5261  * Support for static/seamless DRRS may be indicated in the VBT based on
5262  * inputs from the panel spec.
5263  *
5264  * DRRS saves power by switching to low RR based on usage scenarios.
5265  *
5266  * eDP DRRS:-
5267  *        The implementation is based on frontbuffer tracking implementation.
5268  * When there is a disturbance on the screen triggered by user activity or a
5269  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5270  * When there is no movement on screen, after a timeout of 1 second, a switch
5271  * to low RR is made.
5272  *        For integration with frontbuffer tracking code,
5273  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5274  *
5275  * DRRS can be further extended to support other internal panels and also
5276  * the scenario of video playback wherein RR is set based on the rate
5277  * requested by userspace.
5278  */
5279
5280 /**
5281  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5282  * @intel_connector: eDP connector
5283  * @fixed_mode: preferred mode of panel
5284  *
5285  * This function is  called only once at driver load to initialize basic
5286  * DRRS stuff.
5287  *
5288  * Returns:
5289  * Downclock mode if panel supports it, else return NULL.
5290  * DRRS support is determined by the presence of downclock mode (apart
5291  * from VBT setting).
5292  */
5293 static struct drm_display_mode *
5294 intel_dp_drrs_init(struct intel_connector *intel_connector,
5295                 struct drm_display_mode *fixed_mode)
5296 {
5297         struct drm_connector *connector = &intel_connector->base;
5298         struct drm_device *dev = connector->dev;
5299         struct drm_i915_private *dev_priv = dev->dev_private;
5300         struct drm_display_mode *downclock_mode = NULL;
5301
5302         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5303         mutex_init(&dev_priv->drrs.mutex);
5304
5305         if (INTEL_INFO(dev)->gen <= 6) {
5306                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5307                 return NULL;
5308         }
5309
5310         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5311                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5312                 return NULL;
5313         }
5314
5315         downclock_mode = intel_find_panel_downclock
5316                                         (dev, fixed_mode, connector);
5317
5318         if (!downclock_mode) {
5319                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5320                 return NULL;
5321         }
5322
5323         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5324
5325         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5326         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5327         return downclock_mode;
5328 }
5329
5330 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5331                                      struct intel_connector *intel_connector)
5332 {
5333         struct drm_connector *connector = &intel_connector->base;
5334         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5335         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5336         struct drm_device *dev = intel_encoder->base.dev;
5337         struct drm_i915_private *dev_priv = dev->dev_private;
5338         struct drm_display_mode *fixed_mode = NULL;
5339         struct drm_display_mode *downclock_mode = NULL;
5340         bool has_dpcd;
5341         struct drm_display_mode *scan;
5342         struct edid *edid;
5343         enum pipe pipe = INVALID_PIPE;
5344
5345         if (!is_edp(intel_dp))
5346                 return true;
5347
5348         pps_lock(intel_dp);
5349         intel_edp_panel_vdd_sanitize(intel_dp);
5350         pps_unlock(intel_dp);
5351
5352         /* Cache DPCD and EDID for edp. */
5353         has_dpcd = intel_dp_get_dpcd(intel_dp);
5354
5355         if (has_dpcd) {
5356                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5357                         dev_priv->no_aux_handshake =
5358                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5359                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5360         } else {
5361                 /* if this fails, presume the device is a ghost */
5362                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5363                 return false;
5364         }
5365
5366         /* We now know it's not a ghost, init power sequence regs. */
5367         pps_lock(intel_dp);
5368         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5369         pps_unlock(intel_dp);
5370
5371         mutex_lock(&dev->mode_config.mutex);
5372         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5373         if (edid) {
5374                 if (drm_add_edid_modes(connector, edid)) {
5375                         drm_mode_connector_update_edid_property(connector,
5376                                                                 edid);
5377                         drm_edid_to_eld(connector, edid);
5378                 } else {
5379                         kfree(edid);
5380                         edid = ERR_PTR(-EINVAL);
5381                 }
5382         } else {
5383                 edid = ERR_PTR(-ENOENT);
5384         }
5385         intel_connector->edid = edid;
5386
5387         /* prefer fixed mode from EDID if available */
5388         list_for_each_entry(scan, &connector->probed_modes, head) {
5389                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5390                         fixed_mode = drm_mode_duplicate(dev, scan);
5391                         downclock_mode = intel_dp_drrs_init(
5392                                                 intel_connector, fixed_mode);
5393                         break;
5394                 }
5395         }
5396
5397         /* fallback to VBT if available for eDP */
5398         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5399                 fixed_mode = drm_mode_duplicate(dev,
5400                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5401                 if (fixed_mode)
5402                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5403         }
5404         mutex_unlock(&dev->mode_config.mutex);
5405
5406         if (IS_VALLEYVIEW(dev)) {
5407                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5408                 register_reboot_notifier(&intel_dp->edp_notifier);
5409
5410                 /*
5411                  * Figure out the current pipe for the initial backlight setup.
5412                  * If the current pipe isn't valid, try the PPS pipe, and if that
5413                  * fails just assume pipe A.
5414                  */
5415                 if (IS_CHERRYVIEW(dev))
5416                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5417                 else
5418                         pipe = PORT_TO_PIPE(intel_dp->DP);
5419
5420                 if (pipe != PIPE_A && pipe != PIPE_B)
5421                         pipe = intel_dp->pps_pipe;
5422
5423                 if (pipe != PIPE_A && pipe != PIPE_B)
5424                         pipe = PIPE_A;
5425
5426                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5427                               pipe_name(pipe));
5428         }
5429
5430         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5431         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5432         intel_panel_setup_backlight(connector, pipe);
5433
5434         return true;
5435 }
5436
5437 bool
5438 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5439                         struct intel_connector *intel_connector)
5440 {
5441         struct drm_connector *connector = &intel_connector->base;
5442         struct intel_dp *intel_dp = &intel_dig_port->dp;
5443         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5444         struct drm_device *dev = intel_encoder->base.dev;
5445         struct drm_i915_private *dev_priv = dev->dev_private;
5446         enum port port = intel_dig_port->port;
5447         int type;
5448
5449         intel_dp->pps_pipe = INVALID_PIPE;
5450
5451         /* intel_dp vfuncs */
5452         if (INTEL_INFO(dev)->gen >= 9)
5453                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5454         else if (IS_VALLEYVIEW(dev))
5455                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5456         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5457                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5458         else if (HAS_PCH_SPLIT(dev))
5459                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5460         else
5461                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5462
5463         if (INTEL_INFO(dev)->gen >= 9)
5464                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5465         else
5466                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5467
5468         /* Preserve the current hw state. */
5469         intel_dp->DP = I915_READ(intel_dp->output_reg);
5470         intel_dp->attached_connector = intel_connector;
5471
5472         if (intel_dp_is_edp(dev, port))
5473                 type = DRM_MODE_CONNECTOR_eDP;
5474         else
5475                 type = DRM_MODE_CONNECTOR_DisplayPort;
5476
5477         /*
5478          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5479          * for DP the encoder type can be set by the caller to
5480          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5481          */
5482         if (type == DRM_MODE_CONNECTOR_eDP)
5483                 intel_encoder->type = INTEL_OUTPUT_EDP;
5484
5485         /* eDP only on port B and/or C on vlv/chv */
5486         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5487                     port != PORT_B && port != PORT_C))
5488                 return false;
5489
5490         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5491                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5492                         port_name(port));
5493
5494         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5495         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5496
5497         connector->interlace_allowed = true;
5498         connector->doublescan_allowed = 0;
5499
5500         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5501                           edp_panel_vdd_work);
5502
5503         intel_connector_attach_encoder(intel_connector, intel_encoder);
5504         drm_connector_register(connector);
5505
5506         if (HAS_DDI(dev))
5507                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5508         else
5509                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5510         intel_connector->unregister = intel_dp_connector_unregister;
5511
5512         /* Set up the hotplug pin. */
5513         switch (port) {
5514         case PORT_A:
5515                 intel_encoder->hpd_pin = HPD_PORT_A;
5516                 break;
5517         case PORT_B:
5518                 intel_encoder->hpd_pin = HPD_PORT_B;
5519                 break;
5520         case PORT_C:
5521                 intel_encoder->hpd_pin = HPD_PORT_C;
5522                 break;
5523         case PORT_D:
5524                 intel_encoder->hpd_pin = HPD_PORT_D;
5525                 break;
5526         default:
5527                 BUG();
5528         }
5529
5530         if (is_edp(intel_dp)) {
5531                 pps_lock(intel_dp);
5532                 intel_dp_init_panel_power_timestamps(intel_dp);
5533                 if (IS_VALLEYVIEW(dev))
5534                         vlv_initial_power_sequencer_setup(intel_dp);
5535                 else
5536                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5537                 pps_unlock(intel_dp);
5538         }
5539
5540         intel_dp_aux_init(intel_dp, intel_connector);
5541
5542         /* init MST on ports that can support it */
5543         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5544                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5545                         intel_dp_mst_encoder_init(intel_dig_port,
5546                                                   intel_connector->base.base.id);
5547                 }
5548         }
5549
5550         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5551                 drm_dp_aux_unregister(&intel_dp->aux);
5552                 if (is_edp(intel_dp)) {
5553                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5554                         /*
5555                          * vdd might still be enabled do to the delayed vdd off.
5556                          * Make sure vdd is actually turned off here.
5557                          */
5558                         pps_lock(intel_dp);
5559                         edp_panel_vdd_off_sync(intel_dp);
5560                         pps_unlock(intel_dp);
5561                 }
5562                 drm_connector_unregister(connector);
5563                 drm_connector_cleanup(connector);
5564                 return false;
5565         }
5566
5567         intel_dp_add_properties(intel_dp, connector);
5568
5569         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5570          * 0xd.  Failure to do so will result in spurious interrupts being
5571          * generated on the port when a cable is not attached.
5572          */
5573         if (IS_G4X(dev) && !IS_GM45(dev)) {
5574                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5575                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5576         }
5577
5578         return true;
5579 }
5580
5581 void
5582 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5583 {
5584         struct drm_i915_private *dev_priv = dev->dev_private;
5585         struct intel_digital_port *intel_dig_port;
5586         struct intel_encoder *intel_encoder;
5587         struct drm_encoder *encoder;
5588         struct intel_connector *intel_connector;
5589
5590         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5591         if (!intel_dig_port)
5592                 return;
5593
5594         intel_connector = intel_connector_alloc();
5595         if (!intel_connector) {
5596                 kfree(intel_dig_port);
5597                 return;
5598         }
5599
5600         intel_encoder = &intel_dig_port->base;
5601         encoder = &intel_encoder->base;
5602
5603         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5604                          DRM_MODE_ENCODER_TMDS);
5605
5606         intel_encoder->compute_config = intel_dp_compute_config;
5607         intel_encoder->disable = intel_disable_dp;
5608         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5609         intel_encoder->get_config = intel_dp_get_config;
5610         intel_encoder->suspend = intel_dp_encoder_suspend;
5611         if (IS_CHERRYVIEW(dev)) {
5612                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5613                 intel_encoder->pre_enable = chv_pre_enable_dp;
5614                 intel_encoder->enable = vlv_enable_dp;
5615                 intel_encoder->post_disable = chv_post_disable_dp;
5616         } else if (IS_VALLEYVIEW(dev)) {
5617                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5618                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5619                 intel_encoder->enable = vlv_enable_dp;
5620                 intel_encoder->post_disable = vlv_post_disable_dp;
5621         } else {
5622                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5623                 intel_encoder->enable = g4x_enable_dp;
5624                 if (INTEL_INFO(dev)->gen >= 5)
5625                         intel_encoder->post_disable = ilk_post_disable_dp;
5626         }
5627
5628         intel_dig_port->port = port;
5629         intel_dig_port->dp.output_reg = output_reg;
5630
5631         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5632         if (IS_CHERRYVIEW(dev)) {
5633                 if (port == PORT_D)
5634                         intel_encoder->crtc_mask = 1 << 2;
5635                 else
5636                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5637         } else {
5638                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5639         }
5640         intel_encoder->cloneable = 0;
5641         intel_encoder->hot_plug = intel_dp_hot_plug;
5642
5643         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5644         dev_priv->hpd_irq_port[port] = intel_dig_port;
5645
5646         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5647                 drm_encoder_cleanup(encoder);
5648                 kfree(intel_dig_port);
5649                 kfree(intel_connector);
5650         }
5651 }
5652
5653 void intel_dp_mst_suspend(struct drm_device *dev)
5654 {
5655         struct drm_i915_private *dev_priv = dev->dev_private;
5656         int i;
5657
5658         /* disable MST */
5659         for (i = 0; i < I915_MAX_PORTS; i++) {
5660                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5661                 if (!intel_dig_port)
5662                         continue;
5663
5664                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5665                         if (!intel_dig_port->dp.can_mst)
5666                                 continue;
5667                         if (intel_dig_port->dp.is_mst)
5668                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5669                 }
5670         }
5671 }
5672
5673 void intel_dp_mst_resume(struct drm_device *dev)
5674 {
5675         struct drm_i915_private *dev_priv = dev->dev_private;
5676         int i;
5677
5678         for (i = 0; i < I915_MAX_PORTS; i++) {
5679                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5680                 if (!intel_dig_port)
5681                         continue;
5682                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5683                         int ret;
5684
5685                         if (!intel_dig_port->dp.can_mst)
5686                                 continue;
5687
5688                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5689                         if (ret != 0) {
5690                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5691                         }
5692                 }
5693         }
5694 }