drm/i915: Use DP_LINK_RATE_SET whenever possible
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 struct dp_link_dpll {
45         int link_bw;
46         struct dpll dpll;
47 };
48
49 static const struct dp_link_dpll gen4_dpll[] = {
50         { DP_LINK_BW_1_62,
51                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52         { DP_LINK_BW_2_7,
53                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54 };
55
56 static const struct dp_link_dpll pch_dpll[] = {
57         { DP_LINK_BW_1_62,
58                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59         { DP_LINK_BW_2_7,
60                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61 };
62
63 static const struct dp_link_dpll vlv_dpll[] = {
64         { DP_LINK_BW_1_62,
65                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
66         { DP_LINK_BW_2_7,
67                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68 };
69
70 /*
71  * CHV supports eDP 1.4 that have  more link rates.
72  * Below only provides the fixed rate but exclude variable rate.
73  */
74 static const struct dp_link_dpll chv_dpll[] = {
75         /*
76          * CHV requires to program fractional division for m2.
77          * m2 is stored in fixed point format using formula below
78          * (m2_int << 22) | m2_fraction
79          */
80         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
81                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
83                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
85                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86 };
87 /* Skylake supports following rates */
88 static const int gen9_rates[] = { 162000, 216000, 270000,
89                                   324000, 432000, 540000 };
90 static const int default_rates[] = { 162000, 270000, 540000 };
91
92 /**
93  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94  * @intel_dp: DP struct
95  *
96  * If a CPU or PCH DP output is attached to an eDP panel, this function
97  * will return true, and false otherwise.
98  */
99 static bool is_edp(struct intel_dp *intel_dp)
100 {
101         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
104 }
105
106 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
107 {
108         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110         return intel_dig_port->base.base.dev;
111 }
112
113 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114 {
115         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
116 }
117
118 static void intel_dp_link_down(struct intel_dp *intel_dp);
119 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
120 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
121 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
122 static void vlv_steal_power_sequencer(struct drm_device *dev,
123                                       enum pipe pipe);
124
125 int
126 intel_dp_max_link_bw(struct intel_dp *intel_dp)
127 {
128         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
129
130         switch (max_link_bw) {
131         case DP_LINK_BW_1_62:
132         case DP_LINK_BW_2_7:
133         case DP_LINK_BW_5_4:
134                 break;
135         default:
136                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137                      max_link_bw);
138                 max_link_bw = DP_LINK_BW_1_62;
139                 break;
140         }
141         return max_link_bw;
142 }
143
144 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145 {
146         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147         struct drm_device *dev = intel_dig_port->base.base.dev;
148         u8 source_max, sink_max;
149
150         source_max = 4;
151         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153                 source_max = 2;
154
155         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157         return min(source_max, sink_max);
158 }
159
160 /*
161  * The units on the numbers in the next two are... bizarre.  Examples will
162  * make it clearer; this one parallels an example in the eDP spec.
163  *
164  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165  *
166  *     270000 * 1 * 8 / 10 == 216000
167  *
168  * The actual data capacity of that configuration is 2.16Gbit/s, so the
169  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
170  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171  * 119000.  At 18bpp that's 2142000 kilobits per second.
172  *
173  * Thus the strange-looking division by 10 in intel_dp_link_required, to
174  * get the result in decakilobits instead of kilobits.
175  */
176
177 static int
178 intel_dp_link_required(int pixel_clock, int bpp)
179 {
180         return (pixel_clock * bpp + 9) / 10;
181 }
182
183 static int
184 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185 {
186         return (max_link_clock * max_lanes * 8) / 10;
187 }
188
189 static enum drm_mode_status
190 intel_dp_mode_valid(struct drm_connector *connector,
191                     struct drm_display_mode *mode)
192 {
193         struct intel_dp *intel_dp = intel_attached_dp(connector);
194         struct intel_connector *intel_connector = to_intel_connector(connector);
195         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
196         int target_clock = mode->clock;
197         int max_rate, mode_rate, max_lanes, max_link_clock;
198
199         if (is_edp(intel_dp) && fixed_mode) {
200                 if (mode->hdisplay > fixed_mode->hdisplay)
201                         return MODE_PANEL;
202
203                 if (mode->vdisplay > fixed_mode->vdisplay)
204                         return MODE_PANEL;
205
206                 target_clock = fixed_mode->clock;
207         }
208
209         max_link_clock = intel_dp_max_link_rate(intel_dp);
210         max_lanes = intel_dp_max_lane_count(intel_dp);
211
212         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213         mode_rate = intel_dp_link_required(target_clock, 18);
214
215         if (mode_rate > max_rate)
216                 return MODE_CLOCK_HIGH;
217
218         if (mode->clock < 10000)
219                 return MODE_CLOCK_LOW;
220
221         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222                 return MODE_H_ILLEGAL;
223
224         return MODE_OK;
225 }
226
227 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
228 {
229         int     i;
230         uint32_t v = 0;
231
232         if (src_bytes > 4)
233                 src_bytes = 4;
234         for (i = 0; i < src_bytes; i++)
235                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
236         return v;
237 }
238
239 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
240 {
241         int i;
242         if (dst_bytes > 4)
243                 dst_bytes = 4;
244         for (i = 0; i < dst_bytes; i++)
245                 dst[i] = src >> ((3-i) * 8);
246 }
247
248 /* hrawclock is 1/4 the FSB frequency */
249 static int
250 intel_hrawclk(struct drm_device *dev)
251 {
252         struct drm_i915_private *dev_priv = dev->dev_private;
253         uint32_t clkcfg;
254
255         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256         if (IS_VALLEYVIEW(dev))
257                 return 200;
258
259         clkcfg = I915_READ(CLKCFG);
260         switch (clkcfg & CLKCFG_FSB_MASK) {
261         case CLKCFG_FSB_400:
262                 return 100;
263         case CLKCFG_FSB_533:
264                 return 133;
265         case CLKCFG_FSB_667:
266                 return 166;
267         case CLKCFG_FSB_800:
268                 return 200;
269         case CLKCFG_FSB_1067:
270                 return 266;
271         case CLKCFG_FSB_1333:
272                 return 333;
273         /* these two are just a guess; one of them might be right */
274         case CLKCFG_FSB_1600:
275         case CLKCFG_FSB_1600_ALT:
276                 return 400;
277         default:
278                 return 133;
279         }
280 }
281
282 static void
283 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
284                                     struct intel_dp *intel_dp);
285 static void
286 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
287                                               struct intel_dp *intel_dp);
288
289 static void pps_lock(struct intel_dp *intel_dp)
290 {
291         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292         struct intel_encoder *encoder = &intel_dig_port->base;
293         struct drm_device *dev = encoder->base.dev;
294         struct drm_i915_private *dev_priv = dev->dev_private;
295         enum intel_display_power_domain power_domain;
296
297         /*
298          * See vlv_power_sequencer_reset() why we need
299          * a power domain reference here.
300          */
301         power_domain = intel_display_port_power_domain(encoder);
302         intel_display_power_get(dev_priv, power_domain);
303
304         mutex_lock(&dev_priv->pps_mutex);
305 }
306
307 static void pps_unlock(struct intel_dp *intel_dp)
308 {
309         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
310         struct intel_encoder *encoder = &intel_dig_port->base;
311         struct drm_device *dev = encoder->base.dev;
312         struct drm_i915_private *dev_priv = dev->dev_private;
313         enum intel_display_power_domain power_domain;
314
315         mutex_unlock(&dev_priv->pps_mutex);
316
317         power_domain = intel_display_port_power_domain(encoder);
318         intel_display_power_put(dev_priv, power_domain);
319 }
320
321 static void
322 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
323 {
324         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
325         struct drm_device *dev = intel_dig_port->base.base.dev;
326         struct drm_i915_private *dev_priv = dev->dev_private;
327         enum pipe pipe = intel_dp->pps_pipe;
328         bool pll_enabled;
329         uint32_t DP;
330
331         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
332                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
333                  pipe_name(pipe), port_name(intel_dig_port->port)))
334                 return;
335
336         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
337                       pipe_name(pipe), port_name(intel_dig_port->port));
338
339         /* Preserve the BIOS-computed detected bit. This is
340          * supposed to be read-only.
341          */
342         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
343         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
344         DP |= DP_PORT_WIDTH(1);
345         DP |= DP_LINK_TRAIN_PAT_1;
346
347         if (IS_CHERRYVIEW(dev))
348                 DP |= DP_PIPE_SELECT_CHV(pipe);
349         else if (pipe == PIPE_B)
350                 DP |= DP_PIPEB_SELECT;
351
352         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
353
354         /*
355          * The DPLL for the pipe must be enabled for this to work.
356          * So enable temporarily it if it's not already enabled.
357          */
358         if (!pll_enabled)
359                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
360                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
361
362         /*
363          * Similar magic as in intel_dp_enable_port().
364          * We _must_ do this port enable + disable trick
365          * to make this power seqeuencer lock onto the port.
366          * Otherwise even VDD force bit won't work.
367          */
368         I915_WRITE(intel_dp->output_reg, DP);
369         POSTING_READ(intel_dp->output_reg);
370
371         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
372         POSTING_READ(intel_dp->output_reg);
373
374         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
375         POSTING_READ(intel_dp->output_reg);
376
377         if (!pll_enabled)
378                 vlv_force_pll_off(dev, pipe);
379 }
380
381 static enum pipe
382 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
383 {
384         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
385         struct drm_device *dev = intel_dig_port->base.base.dev;
386         struct drm_i915_private *dev_priv = dev->dev_private;
387         struct intel_encoder *encoder;
388         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
389         enum pipe pipe;
390
391         lockdep_assert_held(&dev_priv->pps_mutex);
392
393         /* We should never land here with regular DP ports */
394         WARN_ON(!is_edp(intel_dp));
395
396         if (intel_dp->pps_pipe != INVALID_PIPE)
397                 return intel_dp->pps_pipe;
398
399         /*
400          * We don't have power sequencer currently.
401          * Pick one that's not used by other ports.
402          */
403         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
404                             base.head) {
405                 struct intel_dp *tmp;
406
407                 if (encoder->type != INTEL_OUTPUT_EDP)
408                         continue;
409
410                 tmp = enc_to_intel_dp(&encoder->base);
411
412                 if (tmp->pps_pipe != INVALID_PIPE)
413                         pipes &= ~(1 << tmp->pps_pipe);
414         }
415
416         /*
417          * Didn't find one. This should not happen since there
418          * are two power sequencers and up to two eDP ports.
419          */
420         if (WARN_ON(pipes == 0))
421                 pipe = PIPE_A;
422         else
423                 pipe = ffs(pipes) - 1;
424
425         vlv_steal_power_sequencer(dev, pipe);
426         intel_dp->pps_pipe = pipe;
427
428         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
429                       pipe_name(intel_dp->pps_pipe),
430                       port_name(intel_dig_port->port));
431
432         /* init power sequencer on this pipe and port */
433         intel_dp_init_panel_power_sequencer(dev, intel_dp);
434         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
435
436         /*
437          * Even vdd force doesn't work until we've made
438          * the power sequencer lock in on the port.
439          */
440         vlv_power_sequencer_kick(intel_dp);
441
442         return intel_dp->pps_pipe;
443 }
444
445 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
446                                enum pipe pipe);
447
448 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
449                                enum pipe pipe)
450 {
451         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
452 }
453
454 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
455                                 enum pipe pipe)
456 {
457         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
458 }
459
460 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
461                          enum pipe pipe)
462 {
463         return true;
464 }
465
466 static enum pipe
467 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
468                      enum port port,
469                      vlv_pipe_check pipe_check)
470 {
471         enum pipe pipe;
472
473         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
474                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
475                         PANEL_PORT_SELECT_MASK;
476
477                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
478                         continue;
479
480                 if (!pipe_check(dev_priv, pipe))
481                         continue;
482
483                 return pipe;
484         }
485
486         return INVALID_PIPE;
487 }
488
489 static void
490 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
491 {
492         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
493         struct drm_device *dev = intel_dig_port->base.base.dev;
494         struct drm_i915_private *dev_priv = dev->dev_private;
495         enum port port = intel_dig_port->port;
496
497         lockdep_assert_held(&dev_priv->pps_mutex);
498
499         /* try to find a pipe with this port selected */
500         /* first pick one where the panel is on */
501         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502                                                   vlv_pipe_has_pp_on);
503         /* didn't find one? pick one where vdd is on */
504         if (intel_dp->pps_pipe == INVALID_PIPE)
505                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506                                                           vlv_pipe_has_vdd_on);
507         /* didn't find one? pick one with just the correct port */
508         if (intel_dp->pps_pipe == INVALID_PIPE)
509                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510                                                           vlv_pipe_any);
511
512         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
513         if (intel_dp->pps_pipe == INVALID_PIPE) {
514                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
515                               port_name(port));
516                 return;
517         }
518
519         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
520                       port_name(port), pipe_name(intel_dp->pps_pipe));
521
522         intel_dp_init_panel_power_sequencer(dev, intel_dp);
523         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
524 }
525
526 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
527 {
528         struct drm_device *dev = dev_priv->dev;
529         struct intel_encoder *encoder;
530
531         if (WARN_ON(!IS_VALLEYVIEW(dev)))
532                 return;
533
534         /*
535          * We can't grab pps_mutex here due to deadlock with power_domain
536          * mutex when power_domain functions are called while holding pps_mutex.
537          * That also means that in order to use pps_pipe the code needs to
538          * hold both a power domain reference and pps_mutex, and the power domain
539          * reference get/put must be done while _not_ holding pps_mutex.
540          * pps_{lock,unlock}() do these steps in the correct order, so one
541          * should use them always.
542          */
543
544         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
545                 struct intel_dp *intel_dp;
546
547                 if (encoder->type != INTEL_OUTPUT_EDP)
548                         continue;
549
550                 intel_dp = enc_to_intel_dp(&encoder->base);
551                 intel_dp->pps_pipe = INVALID_PIPE;
552         }
553 }
554
555 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
556 {
557         struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
559         if (HAS_PCH_SPLIT(dev))
560                 return PCH_PP_CONTROL;
561         else
562                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563 }
564
565 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566 {
567         struct drm_device *dev = intel_dp_to_dev(intel_dp);
568
569         if (HAS_PCH_SPLIT(dev))
570                 return PCH_PP_STATUS;
571         else
572                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
573 }
574
575 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
576    This function only applicable when panel PM state is not to be tracked */
577 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
578                               void *unused)
579 {
580         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
581                                                  edp_notifier);
582         struct drm_device *dev = intel_dp_to_dev(intel_dp);
583         struct drm_i915_private *dev_priv = dev->dev_private;
584         u32 pp_div;
585         u32 pp_ctrl_reg, pp_div_reg;
586
587         if (!is_edp(intel_dp) || code != SYS_RESTART)
588                 return 0;
589
590         pps_lock(intel_dp);
591
592         if (IS_VALLEYVIEW(dev)) {
593                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594
595                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
596                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
597                 pp_div = I915_READ(pp_div_reg);
598                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
599
600                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
601                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
602                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
603                 msleep(intel_dp->panel_power_cycle_delay);
604         }
605
606         pps_unlock(intel_dp);
607
608         return 0;
609 }
610
611 static bool edp_have_panel_power(struct intel_dp *intel_dp)
612 {
613         struct drm_device *dev = intel_dp_to_dev(intel_dp);
614         struct drm_i915_private *dev_priv = dev->dev_private;
615
616         lockdep_assert_held(&dev_priv->pps_mutex);
617
618         if (IS_VALLEYVIEW(dev) &&
619             intel_dp->pps_pipe == INVALID_PIPE)
620                 return false;
621
622         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
623 }
624
625 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
626 {
627         struct drm_device *dev = intel_dp_to_dev(intel_dp);
628         struct drm_i915_private *dev_priv = dev->dev_private;
629
630         lockdep_assert_held(&dev_priv->pps_mutex);
631
632         if (IS_VALLEYVIEW(dev) &&
633             intel_dp->pps_pipe == INVALID_PIPE)
634                 return false;
635
636         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
637 }
638
639 static void
640 intel_dp_check_edp(struct intel_dp *intel_dp)
641 {
642         struct drm_device *dev = intel_dp_to_dev(intel_dp);
643         struct drm_i915_private *dev_priv = dev->dev_private;
644
645         if (!is_edp(intel_dp))
646                 return;
647
648         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
649                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
650                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
651                               I915_READ(_pp_stat_reg(intel_dp)),
652                               I915_READ(_pp_ctrl_reg(intel_dp)));
653         }
654 }
655
656 static uint32_t
657 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658 {
659         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
660         struct drm_device *dev = intel_dig_port->base.base.dev;
661         struct drm_i915_private *dev_priv = dev->dev_private;
662         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
663         uint32_t status;
664         bool done;
665
666 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
667         if (has_aux_irq)
668                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
669                                           msecs_to_jiffies_timeout(10));
670         else
671                 done = wait_for_atomic(C, 10) == 0;
672         if (!done)
673                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674                           has_aux_irq);
675 #undef C
676
677         return status;
678 }
679
680 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
681 {
682         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
683         struct drm_device *dev = intel_dig_port->base.base.dev;
684
685         /*
686          * The clock divider is based off the hrawclk, and would like to run at
687          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
688          */
689         return index ? 0 : intel_hrawclk(dev) / 2;
690 }
691
692 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693 {
694         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695         struct drm_device *dev = intel_dig_port->base.base.dev;
696
697         if (index)
698                 return 0;
699
700         if (intel_dig_port->port == PORT_A) {
701                 if (IS_GEN6(dev) || IS_GEN7(dev))
702                         return 200; /* SNB & IVB eDP input clock at 400Mhz */
703                 else
704                         return 225; /* eDP input clock at 450Mhz */
705         } else {
706                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
707         }
708 }
709
710 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711 {
712         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713         struct drm_device *dev = intel_dig_port->base.base.dev;
714         struct drm_i915_private *dev_priv = dev->dev_private;
715
716         if (intel_dig_port->port == PORT_A) {
717                 if (index)
718                         return 0;
719                 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
720         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
721                 /* Workaround for non-ULT HSW */
722                 switch (index) {
723                 case 0: return 63;
724                 case 1: return 72;
725                 default: return 0;
726                 }
727         } else  {
728                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
729         }
730 }
731
732 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733 {
734         return index ? 0 : 100;
735 }
736
737 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738 {
739         /*
740          * SKL doesn't need us to program the AUX clock divider (Hardware will
741          * derive the clock from CDCLK automatically). We still implement the
742          * get_aux_clock_divider vfunc to plug-in into the existing code.
743          */
744         return index ? 0 : 1;
745 }
746
747 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
748                                       bool has_aux_irq,
749                                       int send_bytes,
750                                       uint32_t aux_clock_divider)
751 {
752         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
753         struct drm_device *dev = intel_dig_port->base.base.dev;
754         uint32_t precharge, timeout;
755
756         if (IS_GEN6(dev))
757                 precharge = 3;
758         else
759                 precharge = 5;
760
761         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
762                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
763         else
764                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
765
766         return DP_AUX_CH_CTL_SEND_BUSY |
767                DP_AUX_CH_CTL_DONE |
768                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
769                DP_AUX_CH_CTL_TIME_OUT_ERROR |
770                timeout |
771                DP_AUX_CH_CTL_RECEIVE_ERROR |
772                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
774                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
775 }
776
777 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778                                       bool has_aux_irq,
779                                       int send_bytes,
780                                       uint32_t unused)
781 {
782         return DP_AUX_CH_CTL_SEND_BUSY |
783                DP_AUX_CH_CTL_DONE |
784                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
785                DP_AUX_CH_CTL_TIME_OUT_ERROR |
786                DP_AUX_CH_CTL_TIME_OUT_1600us |
787                DP_AUX_CH_CTL_RECEIVE_ERROR |
788                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
789                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
790 }
791
792 static int
793 intel_dp_aux_ch(struct intel_dp *intel_dp,
794                 const uint8_t *send, int send_bytes,
795                 uint8_t *recv, int recv_size)
796 {
797         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
798         struct drm_device *dev = intel_dig_port->base.base.dev;
799         struct drm_i915_private *dev_priv = dev->dev_private;
800         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
801         uint32_t ch_data = ch_ctl + 4;
802         uint32_t aux_clock_divider;
803         int i, ret, recv_bytes;
804         uint32_t status;
805         int try, clock = 0;
806         bool has_aux_irq = HAS_AUX_IRQ(dev);
807         bool vdd;
808
809         pps_lock(intel_dp);
810
811         /*
812          * We will be called with VDD already enabled for dpcd/edid/oui reads.
813          * In such cases we want to leave VDD enabled and it's up to upper layers
814          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815          * ourselves.
816          */
817         vdd = edp_panel_vdd_on(intel_dp);
818
819         /* dp aux is extremely sensitive to irq latency, hence request the
820          * lowest possible wakeup latency and so prevent the cpu from going into
821          * deep sleep states.
822          */
823         pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825         intel_dp_check_edp(intel_dp);
826
827         intel_aux_display_runtime_get(dev_priv);
828
829         /* Try to wait for any previous AUX channel activity */
830         for (try = 0; try < 3; try++) {
831                 status = I915_READ_NOTRACE(ch_ctl);
832                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833                         break;
834                 msleep(1);
835         }
836
837         if (try == 3) {
838                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839                      I915_READ(ch_ctl));
840                 ret = -EBUSY;
841                 goto out;
842         }
843
844         /* Only 5 data registers! */
845         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846                 ret = -E2BIG;
847                 goto out;
848         }
849
850         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
851                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852                                                           has_aux_irq,
853                                                           send_bytes,
854                                                           aux_clock_divider);
855
856                 /* Must try at least 3 times according to DP spec */
857                 for (try = 0; try < 5; try++) {
858                         /* Load the send data into the aux channel data registers */
859                         for (i = 0; i < send_bytes; i += 4)
860                                 I915_WRITE(ch_data + i,
861                                            intel_dp_pack_aux(send + i,
862                                                              send_bytes - i));
863
864                         /* Send the command and wait for it to complete */
865                         I915_WRITE(ch_ctl, send_ctl);
866
867                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868
869                         /* Clear done status and any errors */
870                         I915_WRITE(ch_ctl,
871                                    status |
872                                    DP_AUX_CH_CTL_DONE |
873                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
874                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
875
876                         if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
877                                       DP_AUX_CH_CTL_RECEIVE_ERROR))
878                                 continue;
879                         if (status & DP_AUX_CH_CTL_DONE)
880                                 break;
881                 }
882                 if (status & DP_AUX_CH_CTL_DONE)
883                         break;
884         }
885
886         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
887                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
888                 ret = -EBUSY;
889                 goto out;
890         }
891
892         /* Check for timeout or receive error.
893          * Timeouts occur when the sink is not connected
894          */
895         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
896                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
897                 ret = -EIO;
898                 goto out;
899         }
900
901         /* Timeouts occur when the device isn't connected, so they're
902          * "normal" -- don't fill the kernel log with these */
903         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
904                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
905                 ret = -ETIMEDOUT;
906                 goto out;
907         }
908
909         /* Unload any bytes sent back from the other side */
910         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
912         if (recv_bytes > recv_size)
913                 recv_bytes = recv_size;
914
915         for (i = 0; i < recv_bytes; i += 4)
916                 intel_dp_unpack_aux(I915_READ(ch_data + i),
917                                     recv + i, recv_bytes - i);
918
919         ret = recv_bytes;
920 out:
921         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
922         intel_aux_display_runtime_put(dev_priv);
923
924         if (vdd)
925                 edp_panel_vdd_off(intel_dp, false);
926
927         pps_unlock(intel_dp);
928
929         return ret;
930 }
931
932 #define BARE_ADDRESS_SIZE       3
933 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
934 static ssize_t
935 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
936 {
937         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
938         uint8_t txbuf[20], rxbuf[20];
939         size_t txsize, rxsize;
940         int ret;
941
942         txbuf[0] = msg->request << 4;
943         txbuf[1] = msg->address >> 8;
944         txbuf[2] = msg->address & 0xff;
945         txbuf[3] = msg->size - 1;
946
947         switch (msg->request & ~DP_AUX_I2C_MOT) {
948         case DP_AUX_NATIVE_WRITE:
949         case DP_AUX_I2C_WRITE:
950                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
951                 rxsize = 1;
952
953                 if (WARN_ON(txsize > 20))
954                         return -E2BIG;
955
956                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
957
958                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
959                 if (ret > 0) {
960                         msg->reply = rxbuf[0] >> 4;
961
962                         /* Return payload size. */
963                         ret = msg->size;
964                 }
965                 break;
966
967         case DP_AUX_NATIVE_READ:
968         case DP_AUX_I2C_READ:
969                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
970                 rxsize = msg->size + 1;
971
972                 if (WARN_ON(rxsize > 20))
973                         return -E2BIG;
974
975                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976                 if (ret > 0) {
977                         msg->reply = rxbuf[0] >> 4;
978                         /*
979                          * Assume happy day, and copy the data. The caller is
980                          * expected to check msg->reply before touching it.
981                          *
982                          * Return payload size.
983                          */
984                         ret--;
985                         memcpy(msg->buffer, rxbuf + 1, ret);
986                 }
987                 break;
988
989         default:
990                 ret = -EINVAL;
991                 break;
992         }
993
994         return ret;
995 }
996
997 static void
998 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
999 {
1000         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1001         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1002         enum port port = intel_dig_port->port;
1003         const char *name = NULL;
1004         int ret;
1005
1006         switch (port) {
1007         case PORT_A:
1008                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1009                 name = "DPDDC-A";
1010                 break;
1011         case PORT_B:
1012                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1013                 name = "DPDDC-B";
1014                 break;
1015         case PORT_C:
1016                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1017                 name = "DPDDC-C";
1018                 break;
1019         case PORT_D:
1020                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1021                 name = "DPDDC-D";
1022                 break;
1023         default:
1024                 BUG();
1025         }
1026
1027         /*
1028          * The AUX_CTL register is usually DP_CTL + 0x10.
1029          *
1030          * On Haswell and Broadwell though:
1031          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1032          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1033          *
1034          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1035          */
1036         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1037                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1038
1039         intel_dp->aux.name = name;
1040         intel_dp->aux.dev = dev->dev;
1041         intel_dp->aux.transfer = intel_dp_aux_transfer;
1042
1043         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1044                       connector->base.kdev->kobj.name);
1045
1046         ret = drm_dp_aux_register(&intel_dp->aux);
1047         if (ret < 0) {
1048                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1049                           name, ret);
1050                 return;
1051         }
1052
1053         ret = sysfs_create_link(&connector->base.kdev->kobj,
1054                                 &intel_dp->aux.ddc.dev.kobj,
1055                                 intel_dp->aux.ddc.dev.kobj.name);
1056         if (ret < 0) {
1057                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1058                 drm_dp_aux_unregister(&intel_dp->aux);
1059         }
1060 }
1061
1062 static void
1063 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1064 {
1065         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1066
1067         if (!intel_connector->mst_port)
1068                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1069                                   intel_dp->aux.ddc.dev.kobj.name);
1070         intel_connector_unregister(intel_connector);
1071 }
1072
1073 static void
1074 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1075 {
1076         u32 ctrl1;
1077
1078         pipe_config->ddi_pll_sel = SKL_DPLL0;
1079         pipe_config->dpll_hw_state.cfgcr1 = 0;
1080         pipe_config->dpll_hw_state.cfgcr2 = 0;
1081
1082         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1083         switch (link_clock / 2) {
1084         case 81000:
1085                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1086                                               SKL_DPLL0);
1087                 break;
1088         case 135000:
1089                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1090                                               SKL_DPLL0);
1091                 break;
1092         case 270000:
1093                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1094                                               SKL_DPLL0);
1095                 break;
1096         case 162000:
1097                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1098                                               SKL_DPLL0);
1099                 break;
1100         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1101         results in CDCLK change. Need to handle the change of CDCLK by
1102         disabling pipes and re-enabling them */
1103         case 108000:
1104                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1105                                               SKL_DPLL0);
1106                 break;
1107         case 216000:
1108                 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1109                                               SKL_DPLL0);
1110                 break;
1111
1112         }
1113         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1114 }
1115
1116 static void
1117 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1118 {
1119         switch (link_bw) {
1120         case DP_LINK_BW_1_62:
1121                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1122                 break;
1123         case DP_LINK_BW_2_7:
1124                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1125                 break;
1126         case DP_LINK_BW_5_4:
1127                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1128                 break;
1129         }
1130 }
1131
1132 static int
1133 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1134 {
1135         if (intel_dp->num_supported_rates) {
1136                 *sink_rates = intel_dp->supported_rates;
1137                 return intel_dp->num_supported_rates;
1138         }
1139
1140         *sink_rates = default_rates;
1141
1142         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1143 }
1144
1145 static int
1146 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1147 {
1148         if (INTEL_INFO(dev)->gen >= 9) {
1149                 *source_rates = gen9_rates;
1150                 return ARRAY_SIZE(gen9_rates);
1151         }
1152
1153         *source_rates = default_rates;
1154
1155         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1156                 /* WaDisableHBR2:skl */
1157                 return (DP_LINK_BW_2_7 >> 3) + 1;
1158         else if (INTEL_INFO(dev)->gen >= 8 ||
1159             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1160                 return (DP_LINK_BW_5_4 >> 3) + 1;
1161         else
1162                 return (DP_LINK_BW_2_7 >> 3) + 1;
1163 }
1164
1165 static void
1166 intel_dp_set_clock(struct intel_encoder *encoder,
1167                    struct intel_crtc_state *pipe_config, int link_bw)
1168 {
1169         struct drm_device *dev = encoder->base.dev;
1170         const struct dp_link_dpll *divisor = NULL;
1171         int i, count = 0;
1172
1173         if (IS_G4X(dev)) {
1174                 divisor = gen4_dpll;
1175                 count = ARRAY_SIZE(gen4_dpll);
1176         } else if (HAS_PCH_SPLIT(dev)) {
1177                 divisor = pch_dpll;
1178                 count = ARRAY_SIZE(pch_dpll);
1179         } else if (IS_CHERRYVIEW(dev)) {
1180                 divisor = chv_dpll;
1181                 count = ARRAY_SIZE(chv_dpll);
1182         } else if (IS_VALLEYVIEW(dev)) {
1183                 divisor = vlv_dpll;
1184                 count = ARRAY_SIZE(vlv_dpll);
1185         }
1186
1187         if (divisor && count) {
1188                 for (i = 0; i < count; i++) {
1189                         if (link_bw == divisor[i].link_bw) {
1190                                 pipe_config->dpll = divisor[i].dpll;
1191                                 pipe_config->clock_set = true;
1192                                 break;
1193                         }
1194                 }
1195         }
1196 }
1197
1198 static int intersect_rates(const int *source_rates, int source_len,
1199                            const int *sink_rates, int sink_len,
1200                            int *supported_rates)
1201 {
1202         int i = 0, j = 0, k = 0;
1203
1204         while (i < source_len && j < sink_len) {
1205                 if (source_rates[i] == sink_rates[j]) {
1206                         supported_rates[k] = source_rates[i];
1207                         ++k;
1208                         ++i;
1209                         ++j;
1210                 } else if (source_rates[i] < sink_rates[j]) {
1211                         ++i;
1212                 } else {
1213                         ++j;
1214                 }
1215         }
1216         return k;
1217 }
1218
1219 static int intel_supported_rates(struct intel_dp *intel_dp,
1220                                  int *supported_rates)
1221 {
1222         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1223         const int *source_rates, *sink_rates;
1224         int source_len, sink_len;
1225
1226         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1227         source_len = intel_dp_source_rates(dev, &source_rates);
1228
1229         return intersect_rates(source_rates, source_len,
1230                                sink_rates, sink_len,
1231                                supported_rates);
1232 }
1233
1234 static int rate_to_index(int find, const int *rates)
1235 {
1236         int i = 0;
1237
1238         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1239                 if (find == rates[i])
1240                         break;
1241
1242         return i;
1243 }
1244
1245 int
1246 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1247 {
1248         int rates[DP_MAX_SUPPORTED_RATES] = {};
1249         int len;
1250
1251         len = intel_supported_rates(intel_dp, rates);
1252         if (WARN_ON(len <= 0))
1253                 return 162000;
1254
1255         return rates[rate_to_index(0, rates) - 1];
1256 }
1257
1258 bool
1259 intel_dp_compute_config(struct intel_encoder *encoder,
1260                         struct intel_crtc_state *pipe_config)
1261 {
1262         struct drm_device *dev = encoder->base.dev;
1263         struct drm_i915_private *dev_priv = dev->dev_private;
1264         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1265         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1266         enum port port = dp_to_dig_port(intel_dp)->port;
1267         struct intel_crtc *intel_crtc = encoder->new_crtc;
1268         struct intel_connector *intel_connector = intel_dp->attached_connector;
1269         int lane_count, clock;
1270         int min_lane_count = 1;
1271         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1272         /* Conveniently, the link BW constants become indices with a shift...*/
1273         int min_clock = 0;
1274         int max_clock;
1275         int bpp, mode_rate;
1276         int link_avail, link_clock;
1277         int supported_rates[DP_MAX_SUPPORTED_RATES] = {};
1278         int supported_len;
1279
1280         supported_len = intel_supported_rates(intel_dp, supported_rates);
1281
1282         /* No common link rates between source and sink */
1283         WARN_ON(supported_len <= 0);
1284
1285         max_clock = supported_len - 1;
1286
1287         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1288                 pipe_config->has_pch_encoder = true;
1289
1290         pipe_config->has_dp_encoder = true;
1291         pipe_config->has_drrs = false;
1292         pipe_config->has_audio = intel_dp->has_audio;
1293
1294         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1295                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1296                                        adjusted_mode);
1297                 if (!HAS_PCH_SPLIT(dev))
1298                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1299                                                  intel_connector->panel.fitting_mode);
1300                 else
1301                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1302                                                 intel_connector->panel.fitting_mode);
1303         }
1304
1305         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1306                 return false;
1307
1308         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1309                       "max bw %d pixel clock %iKHz\n",
1310                       max_lane_count, supported_rates[max_clock],
1311                       adjusted_mode->crtc_clock);
1312
1313         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1314          * bpc in between. */
1315         bpp = pipe_config->pipe_bpp;
1316         if (is_edp(intel_dp)) {
1317                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1318                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1319                                       dev_priv->vbt.edp_bpp);
1320                         bpp = dev_priv->vbt.edp_bpp;
1321                 }
1322
1323                 /*
1324                  * Use the maximum clock and number of lanes the eDP panel
1325                  * advertizes being capable of. The panels are generally
1326                  * designed to support only a single clock and lane
1327                  * configuration, and typically these values correspond to the
1328                  * native resolution of the panel.
1329                  */
1330                 min_lane_count = max_lane_count;
1331                 min_clock = max_clock;
1332         }
1333
1334         for (; bpp >= 6*3; bpp -= 2*3) {
1335                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1336                                                    bpp);
1337
1338                 for (clock = min_clock; clock <= max_clock; clock++) {
1339                         for (lane_count = min_lane_count;
1340                                 lane_count <= max_lane_count;
1341                                 lane_count <<= 1) {
1342
1343                                 link_clock = supported_rates[clock];
1344                                 link_avail = intel_dp_max_data_rate(link_clock,
1345                                                                     lane_count);
1346
1347                                 if (mode_rate <= link_avail) {
1348                                         goto found;
1349                                 }
1350                         }
1351                 }
1352         }
1353
1354         return false;
1355
1356 found:
1357         if (intel_dp->color_range_auto) {
1358                 /*
1359                  * See:
1360                  * CEA-861-E - 5.1 Default Encoding Parameters
1361                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1362                  */
1363                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1364                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1365                 else
1366                         intel_dp->color_range = 0;
1367         }
1368
1369         if (intel_dp->color_range)
1370                 pipe_config->limited_color_range = true;
1371
1372         intel_dp->lane_count = lane_count;
1373
1374         if (intel_dp->num_supported_rates) {
1375                 intel_dp->link_bw = 0;
1376                 intel_dp->rate_select =
1377                         rate_to_index(supported_rates[clock],
1378                                       intel_dp->supported_rates);
1379         } else {
1380                 intel_dp->link_bw =
1381                         drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1382                 intel_dp->rate_select = 0;
1383         }
1384
1385         pipe_config->pipe_bpp = bpp;
1386         pipe_config->port_clock = supported_rates[clock];
1387
1388         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1389                       intel_dp->link_bw, intel_dp->lane_count,
1390                       pipe_config->port_clock, bpp);
1391         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1392                       mode_rate, link_avail);
1393
1394         intel_link_compute_m_n(bpp, lane_count,
1395                                adjusted_mode->crtc_clock,
1396                                pipe_config->port_clock,
1397                                &pipe_config->dp_m_n);
1398
1399         if (intel_connector->panel.downclock_mode != NULL &&
1400                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1401                         pipe_config->has_drrs = true;
1402                         intel_link_compute_m_n(bpp, lane_count,
1403                                 intel_connector->panel.downclock_mode->clock,
1404                                 pipe_config->port_clock,
1405                                 &pipe_config->dp_m2_n2);
1406         }
1407
1408         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1409                 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
1410         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1411                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1412         else
1413                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1414
1415         return true;
1416 }
1417
1418 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1419 {
1420         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1421         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1422         struct drm_device *dev = crtc->base.dev;
1423         struct drm_i915_private *dev_priv = dev->dev_private;
1424         u32 dpa_ctl;
1425
1426         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1427                       crtc->config->port_clock);
1428         dpa_ctl = I915_READ(DP_A);
1429         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1430
1431         if (crtc->config->port_clock == 162000) {
1432                 /* For a long time we've carried around a ILK-DevA w/a for the
1433                  * 160MHz clock. If we're really unlucky, it's still required.
1434                  */
1435                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1436                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1437                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1438         } else {
1439                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1440                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1441         }
1442
1443         I915_WRITE(DP_A, dpa_ctl);
1444
1445         POSTING_READ(DP_A);
1446         udelay(500);
1447 }
1448
1449 static void intel_dp_prepare(struct intel_encoder *encoder)
1450 {
1451         struct drm_device *dev = encoder->base.dev;
1452         struct drm_i915_private *dev_priv = dev->dev_private;
1453         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1454         enum port port = dp_to_dig_port(intel_dp)->port;
1455         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1456         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1457
1458         /*
1459          * There are four kinds of DP registers:
1460          *
1461          *      IBX PCH
1462          *      SNB CPU
1463          *      IVB CPU
1464          *      CPT PCH
1465          *
1466          * IBX PCH and CPU are the same for almost everything,
1467          * except that the CPU DP PLL is configured in this
1468          * register
1469          *
1470          * CPT PCH is quite different, having many bits moved
1471          * to the TRANS_DP_CTL register instead. That
1472          * configuration happens (oddly) in ironlake_pch_enable
1473          */
1474
1475         /* Preserve the BIOS-computed detected bit. This is
1476          * supposed to be read-only.
1477          */
1478         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1479
1480         /* Handle DP bits in common between all three register formats */
1481         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1482         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1483
1484         if (crtc->config->has_audio)
1485                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1486
1487         /* Split out the IBX/CPU vs CPT settings */
1488
1489         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1490                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1491                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1492                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1493                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1494                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1495
1496                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1497                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1498
1499                 intel_dp->DP |= crtc->pipe << 29;
1500         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1501                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1502                         intel_dp->DP |= intel_dp->color_range;
1503
1504                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1505                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1506                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1507                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1508                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1509
1510                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1511                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1512
1513                 if (!IS_CHERRYVIEW(dev)) {
1514                         if (crtc->pipe == 1)
1515                                 intel_dp->DP |= DP_PIPEB_SELECT;
1516                 } else {
1517                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1518                 }
1519         } else {
1520                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1521         }
1522 }
1523
1524 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1525 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1526
1527 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1528 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1529
1530 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1531 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1532
1533 static void wait_panel_status(struct intel_dp *intel_dp,
1534                                        u32 mask,
1535                                        u32 value)
1536 {
1537         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1538         struct drm_i915_private *dev_priv = dev->dev_private;
1539         u32 pp_stat_reg, pp_ctrl_reg;
1540
1541         lockdep_assert_held(&dev_priv->pps_mutex);
1542
1543         pp_stat_reg = _pp_stat_reg(intel_dp);
1544         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1545
1546         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1547                         mask, value,
1548                         I915_READ(pp_stat_reg),
1549                         I915_READ(pp_ctrl_reg));
1550
1551         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1552                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1553                                 I915_READ(pp_stat_reg),
1554                                 I915_READ(pp_ctrl_reg));
1555         }
1556
1557         DRM_DEBUG_KMS("Wait complete\n");
1558 }
1559
1560 static void wait_panel_on(struct intel_dp *intel_dp)
1561 {
1562         DRM_DEBUG_KMS("Wait for panel power on\n");
1563         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1564 }
1565
1566 static void wait_panel_off(struct intel_dp *intel_dp)
1567 {
1568         DRM_DEBUG_KMS("Wait for panel power off time\n");
1569         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1570 }
1571
1572 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1573 {
1574         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1575
1576         /* When we disable the VDD override bit last we have to do the manual
1577          * wait. */
1578         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1579                                        intel_dp->panel_power_cycle_delay);
1580
1581         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1582 }
1583
1584 static void wait_backlight_on(struct intel_dp *intel_dp)
1585 {
1586         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1587                                        intel_dp->backlight_on_delay);
1588 }
1589
1590 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1591 {
1592         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1593                                        intel_dp->backlight_off_delay);
1594 }
1595
1596 /* Read the current pp_control value, unlocking the register if it
1597  * is locked
1598  */
1599
1600 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1601 {
1602         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1603         struct drm_i915_private *dev_priv = dev->dev_private;
1604         u32 control;
1605
1606         lockdep_assert_held(&dev_priv->pps_mutex);
1607
1608         control = I915_READ(_pp_ctrl_reg(intel_dp));
1609         control &= ~PANEL_UNLOCK_MASK;
1610         control |= PANEL_UNLOCK_REGS;
1611         return control;
1612 }
1613
1614 /*
1615  * Must be paired with edp_panel_vdd_off().
1616  * Must hold pps_mutex around the whole on/off sequence.
1617  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1618  */
1619 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1620 {
1621         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1622         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1623         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1624         struct drm_i915_private *dev_priv = dev->dev_private;
1625         enum intel_display_power_domain power_domain;
1626         u32 pp;
1627         u32 pp_stat_reg, pp_ctrl_reg;
1628         bool need_to_disable = !intel_dp->want_panel_vdd;
1629
1630         lockdep_assert_held(&dev_priv->pps_mutex);
1631
1632         if (!is_edp(intel_dp))
1633                 return false;
1634
1635         cancel_delayed_work(&intel_dp->panel_vdd_work);
1636         intel_dp->want_panel_vdd = true;
1637
1638         if (edp_have_panel_vdd(intel_dp))
1639                 return need_to_disable;
1640
1641         power_domain = intel_display_port_power_domain(intel_encoder);
1642         intel_display_power_get(dev_priv, power_domain);
1643
1644         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1645                       port_name(intel_dig_port->port));
1646
1647         if (!edp_have_panel_power(intel_dp))
1648                 wait_panel_power_cycle(intel_dp);
1649
1650         pp = ironlake_get_pp_control(intel_dp);
1651         pp |= EDP_FORCE_VDD;
1652
1653         pp_stat_reg = _pp_stat_reg(intel_dp);
1654         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1655
1656         I915_WRITE(pp_ctrl_reg, pp);
1657         POSTING_READ(pp_ctrl_reg);
1658         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1659                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1660         /*
1661          * If the panel wasn't on, delay before accessing aux channel
1662          */
1663         if (!edp_have_panel_power(intel_dp)) {
1664                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1665                               port_name(intel_dig_port->port));
1666                 msleep(intel_dp->panel_power_up_delay);
1667         }
1668
1669         return need_to_disable;
1670 }
1671
1672 /*
1673  * Must be paired with intel_edp_panel_vdd_off() or
1674  * intel_edp_panel_off().
1675  * Nested calls to these functions are not allowed since
1676  * we drop the lock. Caller must use some higher level
1677  * locking to prevent nested calls from other threads.
1678  */
1679 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1680 {
1681         bool vdd;
1682
1683         if (!is_edp(intel_dp))
1684                 return;
1685
1686         pps_lock(intel_dp);
1687         vdd = edp_panel_vdd_on(intel_dp);
1688         pps_unlock(intel_dp);
1689
1690         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1691              port_name(dp_to_dig_port(intel_dp)->port));
1692 }
1693
1694 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1695 {
1696         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1697         struct drm_i915_private *dev_priv = dev->dev_private;
1698         struct intel_digital_port *intel_dig_port =
1699                 dp_to_dig_port(intel_dp);
1700         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1701         enum intel_display_power_domain power_domain;
1702         u32 pp;
1703         u32 pp_stat_reg, pp_ctrl_reg;
1704
1705         lockdep_assert_held(&dev_priv->pps_mutex);
1706
1707         WARN_ON(intel_dp->want_panel_vdd);
1708
1709         if (!edp_have_panel_vdd(intel_dp))
1710                 return;
1711
1712         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1713                       port_name(intel_dig_port->port));
1714
1715         pp = ironlake_get_pp_control(intel_dp);
1716         pp &= ~EDP_FORCE_VDD;
1717
1718         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1719         pp_stat_reg = _pp_stat_reg(intel_dp);
1720
1721         I915_WRITE(pp_ctrl_reg, pp);
1722         POSTING_READ(pp_ctrl_reg);
1723
1724         /* Make sure sequencer is idle before allowing subsequent activity */
1725         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1726         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1727
1728         if ((pp & POWER_TARGET_ON) == 0)
1729                 intel_dp->last_power_cycle = jiffies;
1730
1731         power_domain = intel_display_port_power_domain(intel_encoder);
1732         intel_display_power_put(dev_priv, power_domain);
1733 }
1734
1735 static void edp_panel_vdd_work(struct work_struct *__work)
1736 {
1737         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1738                                                  struct intel_dp, panel_vdd_work);
1739
1740         pps_lock(intel_dp);
1741         if (!intel_dp->want_panel_vdd)
1742                 edp_panel_vdd_off_sync(intel_dp);
1743         pps_unlock(intel_dp);
1744 }
1745
1746 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1747 {
1748         unsigned long delay;
1749
1750         /*
1751          * Queue the timer to fire a long time from now (relative to the power
1752          * down delay) to keep the panel power up across a sequence of
1753          * operations.
1754          */
1755         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1756         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1757 }
1758
1759 /*
1760  * Must be paired with edp_panel_vdd_on().
1761  * Must hold pps_mutex around the whole on/off sequence.
1762  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1763  */
1764 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1765 {
1766         struct drm_i915_private *dev_priv =
1767                 intel_dp_to_dev(intel_dp)->dev_private;
1768
1769         lockdep_assert_held(&dev_priv->pps_mutex);
1770
1771         if (!is_edp(intel_dp))
1772                 return;
1773
1774         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1775              port_name(dp_to_dig_port(intel_dp)->port));
1776
1777         intel_dp->want_panel_vdd = false;
1778
1779         if (sync)
1780                 edp_panel_vdd_off_sync(intel_dp);
1781         else
1782                 edp_panel_vdd_schedule_off(intel_dp);
1783 }
1784
1785 static void edp_panel_on(struct intel_dp *intel_dp)
1786 {
1787         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1788         struct drm_i915_private *dev_priv = dev->dev_private;
1789         u32 pp;
1790         u32 pp_ctrl_reg;
1791
1792         lockdep_assert_held(&dev_priv->pps_mutex);
1793
1794         if (!is_edp(intel_dp))
1795                 return;
1796
1797         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1798                       port_name(dp_to_dig_port(intel_dp)->port));
1799
1800         if (WARN(edp_have_panel_power(intel_dp),
1801                  "eDP port %c panel power already on\n",
1802                  port_name(dp_to_dig_port(intel_dp)->port)))
1803                 return;
1804
1805         wait_panel_power_cycle(intel_dp);
1806
1807         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1808         pp = ironlake_get_pp_control(intel_dp);
1809         if (IS_GEN5(dev)) {
1810                 /* ILK workaround: disable reset around power sequence */
1811                 pp &= ~PANEL_POWER_RESET;
1812                 I915_WRITE(pp_ctrl_reg, pp);
1813                 POSTING_READ(pp_ctrl_reg);
1814         }
1815
1816         pp |= POWER_TARGET_ON;
1817         if (!IS_GEN5(dev))
1818                 pp |= PANEL_POWER_RESET;
1819
1820         I915_WRITE(pp_ctrl_reg, pp);
1821         POSTING_READ(pp_ctrl_reg);
1822
1823         wait_panel_on(intel_dp);
1824         intel_dp->last_power_on = jiffies;
1825
1826         if (IS_GEN5(dev)) {
1827                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1828                 I915_WRITE(pp_ctrl_reg, pp);
1829                 POSTING_READ(pp_ctrl_reg);
1830         }
1831 }
1832
1833 void intel_edp_panel_on(struct intel_dp *intel_dp)
1834 {
1835         if (!is_edp(intel_dp))
1836                 return;
1837
1838         pps_lock(intel_dp);
1839         edp_panel_on(intel_dp);
1840         pps_unlock(intel_dp);
1841 }
1842
1843
1844 static void edp_panel_off(struct intel_dp *intel_dp)
1845 {
1846         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1847         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1848         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1849         struct drm_i915_private *dev_priv = dev->dev_private;
1850         enum intel_display_power_domain power_domain;
1851         u32 pp;
1852         u32 pp_ctrl_reg;
1853
1854         lockdep_assert_held(&dev_priv->pps_mutex);
1855
1856         if (!is_edp(intel_dp))
1857                 return;
1858
1859         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1860                       port_name(dp_to_dig_port(intel_dp)->port));
1861
1862         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1863              port_name(dp_to_dig_port(intel_dp)->port));
1864
1865         pp = ironlake_get_pp_control(intel_dp);
1866         /* We need to switch off panel power _and_ force vdd, for otherwise some
1867          * panels get very unhappy and cease to work. */
1868         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1869                 EDP_BLC_ENABLE);
1870
1871         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1872
1873         intel_dp->want_panel_vdd = false;
1874
1875         I915_WRITE(pp_ctrl_reg, pp);
1876         POSTING_READ(pp_ctrl_reg);
1877
1878         intel_dp->last_power_cycle = jiffies;
1879         wait_panel_off(intel_dp);
1880
1881         /* We got a reference when we enabled the VDD. */
1882         power_domain = intel_display_port_power_domain(intel_encoder);
1883         intel_display_power_put(dev_priv, power_domain);
1884 }
1885
1886 void intel_edp_panel_off(struct intel_dp *intel_dp)
1887 {
1888         if (!is_edp(intel_dp))
1889                 return;
1890
1891         pps_lock(intel_dp);
1892         edp_panel_off(intel_dp);
1893         pps_unlock(intel_dp);
1894 }
1895
1896 /* Enable backlight in the panel power control. */
1897 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1898 {
1899         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1900         struct drm_device *dev = intel_dig_port->base.base.dev;
1901         struct drm_i915_private *dev_priv = dev->dev_private;
1902         u32 pp;
1903         u32 pp_ctrl_reg;
1904
1905         /*
1906          * If we enable the backlight right away following a panel power
1907          * on, we may see slight flicker as the panel syncs with the eDP
1908          * link.  So delay a bit to make sure the image is solid before
1909          * allowing it to appear.
1910          */
1911         wait_backlight_on(intel_dp);
1912
1913         pps_lock(intel_dp);
1914
1915         pp = ironlake_get_pp_control(intel_dp);
1916         pp |= EDP_BLC_ENABLE;
1917
1918         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1919
1920         I915_WRITE(pp_ctrl_reg, pp);
1921         POSTING_READ(pp_ctrl_reg);
1922
1923         pps_unlock(intel_dp);
1924 }
1925
1926 /* Enable backlight PWM and backlight PP control. */
1927 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1928 {
1929         if (!is_edp(intel_dp))
1930                 return;
1931
1932         DRM_DEBUG_KMS("\n");
1933
1934         intel_panel_enable_backlight(intel_dp->attached_connector);
1935         _intel_edp_backlight_on(intel_dp);
1936 }
1937
1938 /* Disable backlight in the panel power control. */
1939 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1940 {
1941         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1942         struct drm_i915_private *dev_priv = dev->dev_private;
1943         u32 pp;
1944         u32 pp_ctrl_reg;
1945
1946         if (!is_edp(intel_dp))
1947                 return;
1948
1949         pps_lock(intel_dp);
1950
1951         pp = ironlake_get_pp_control(intel_dp);
1952         pp &= ~EDP_BLC_ENABLE;
1953
1954         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1955
1956         I915_WRITE(pp_ctrl_reg, pp);
1957         POSTING_READ(pp_ctrl_reg);
1958
1959         pps_unlock(intel_dp);
1960
1961         intel_dp->last_backlight_off = jiffies;
1962         edp_wait_backlight_off(intel_dp);
1963 }
1964
1965 /* Disable backlight PP control and backlight PWM. */
1966 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1967 {
1968         if (!is_edp(intel_dp))
1969                 return;
1970
1971         DRM_DEBUG_KMS("\n");
1972
1973         _intel_edp_backlight_off(intel_dp);
1974         intel_panel_disable_backlight(intel_dp->attached_connector);
1975 }
1976
1977 /*
1978  * Hook for controlling the panel power control backlight through the bl_power
1979  * sysfs attribute. Take care to handle multiple calls.
1980  */
1981 static void intel_edp_backlight_power(struct intel_connector *connector,
1982                                       bool enable)
1983 {
1984         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
1985         bool is_enabled;
1986
1987         pps_lock(intel_dp);
1988         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1989         pps_unlock(intel_dp);
1990
1991         if (is_enabled == enable)
1992                 return;
1993
1994         DRM_DEBUG_KMS("panel power control backlight %s\n",
1995                       enable ? "enable" : "disable");
1996
1997         if (enable)
1998                 _intel_edp_backlight_on(intel_dp);
1999         else
2000                 _intel_edp_backlight_off(intel_dp);
2001 }
2002
2003 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2004 {
2005         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2006         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2007         struct drm_device *dev = crtc->dev;
2008         struct drm_i915_private *dev_priv = dev->dev_private;
2009         u32 dpa_ctl;
2010
2011         assert_pipe_disabled(dev_priv,
2012                              to_intel_crtc(crtc)->pipe);
2013
2014         DRM_DEBUG_KMS("\n");
2015         dpa_ctl = I915_READ(DP_A);
2016         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2017         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2018
2019         /* We don't adjust intel_dp->DP while tearing down the link, to
2020          * facilitate link retraining (e.g. after hotplug). Hence clear all
2021          * enable bits here to ensure that we don't enable too much. */
2022         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2023         intel_dp->DP |= DP_PLL_ENABLE;
2024         I915_WRITE(DP_A, intel_dp->DP);
2025         POSTING_READ(DP_A);
2026         udelay(200);
2027 }
2028
2029 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2030 {
2031         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2032         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2033         struct drm_device *dev = crtc->dev;
2034         struct drm_i915_private *dev_priv = dev->dev_private;
2035         u32 dpa_ctl;
2036
2037         assert_pipe_disabled(dev_priv,
2038                              to_intel_crtc(crtc)->pipe);
2039
2040         dpa_ctl = I915_READ(DP_A);
2041         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2042              "dp pll off, should be on\n");
2043         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2044
2045         /* We can't rely on the value tracked for the DP register in
2046          * intel_dp->DP because link_down must not change that (otherwise link
2047          * re-training will fail. */
2048         dpa_ctl &= ~DP_PLL_ENABLE;
2049         I915_WRITE(DP_A, dpa_ctl);
2050         POSTING_READ(DP_A);
2051         udelay(200);
2052 }
2053
2054 /* If the sink supports it, try to set the power state appropriately */
2055 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2056 {
2057         int ret, i;
2058
2059         /* Should have a valid DPCD by this point */
2060         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2061                 return;
2062
2063         if (mode != DRM_MODE_DPMS_ON) {
2064                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2065                                          DP_SET_POWER_D3);
2066         } else {
2067                 /*
2068                  * When turning on, we need to retry for 1ms to give the sink
2069                  * time to wake up.
2070                  */
2071                 for (i = 0; i < 3; i++) {
2072                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2073                                                  DP_SET_POWER_D0);
2074                         if (ret == 1)
2075                                 break;
2076                         msleep(1);
2077                 }
2078         }
2079
2080         if (ret != 1)
2081                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2082                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2083 }
2084
2085 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2086                                   enum pipe *pipe)
2087 {
2088         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2089         enum port port = dp_to_dig_port(intel_dp)->port;
2090         struct drm_device *dev = encoder->base.dev;
2091         struct drm_i915_private *dev_priv = dev->dev_private;
2092         enum intel_display_power_domain power_domain;
2093         u32 tmp;
2094
2095         power_domain = intel_display_port_power_domain(encoder);
2096         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2097                 return false;
2098
2099         tmp = I915_READ(intel_dp->output_reg);
2100
2101         if (!(tmp & DP_PORT_EN))
2102                 return false;
2103
2104         if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2105                 *pipe = PORT_TO_PIPE_CPT(tmp);
2106         } else if (IS_CHERRYVIEW(dev)) {
2107                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2108         } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2109                 *pipe = PORT_TO_PIPE(tmp);
2110         } else {
2111                 u32 trans_sel;
2112                 u32 trans_dp;
2113                 int i;
2114
2115                 switch (intel_dp->output_reg) {
2116                 case PCH_DP_B:
2117                         trans_sel = TRANS_DP_PORT_SEL_B;
2118                         break;
2119                 case PCH_DP_C:
2120                         trans_sel = TRANS_DP_PORT_SEL_C;
2121                         break;
2122                 case PCH_DP_D:
2123                         trans_sel = TRANS_DP_PORT_SEL_D;
2124                         break;
2125                 default:
2126                         return true;
2127                 }
2128
2129                 for_each_pipe(dev_priv, i) {
2130                         trans_dp = I915_READ(TRANS_DP_CTL(i));
2131                         if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2132                                 *pipe = i;
2133                                 return true;
2134                         }
2135                 }
2136
2137                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2138                               intel_dp->output_reg);
2139         }
2140
2141         return true;
2142 }
2143
2144 static void intel_dp_get_config(struct intel_encoder *encoder,
2145                                 struct intel_crtc_state *pipe_config)
2146 {
2147         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2148         u32 tmp, flags = 0;
2149         struct drm_device *dev = encoder->base.dev;
2150         struct drm_i915_private *dev_priv = dev->dev_private;
2151         enum port port = dp_to_dig_port(intel_dp)->port;
2152         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2153         int dotclock;
2154
2155         tmp = I915_READ(intel_dp->output_reg);
2156         if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2157                 pipe_config->has_audio = true;
2158
2159         if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2160                 if (tmp & DP_SYNC_HS_HIGH)
2161                         flags |= DRM_MODE_FLAG_PHSYNC;
2162                 else
2163                         flags |= DRM_MODE_FLAG_NHSYNC;
2164
2165                 if (tmp & DP_SYNC_VS_HIGH)
2166                         flags |= DRM_MODE_FLAG_PVSYNC;
2167                 else
2168                         flags |= DRM_MODE_FLAG_NVSYNC;
2169         } else {
2170                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2171                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2172                         flags |= DRM_MODE_FLAG_PHSYNC;
2173                 else
2174                         flags |= DRM_MODE_FLAG_NHSYNC;
2175
2176                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2177                         flags |= DRM_MODE_FLAG_PVSYNC;
2178                 else
2179                         flags |= DRM_MODE_FLAG_NVSYNC;
2180         }
2181
2182         pipe_config->base.adjusted_mode.flags |= flags;
2183
2184         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2185             tmp & DP_COLOR_RANGE_16_235)
2186                 pipe_config->limited_color_range = true;
2187
2188         pipe_config->has_dp_encoder = true;
2189
2190         intel_dp_get_m_n(crtc, pipe_config);
2191
2192         if (port == PORT_A) {
2193                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2194                         pipe_config->port_clock = 162000;
2195                 else
2196                         pipe_config->port_clock = 270000;
2197         }
2198
2199         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2200                                             &pipe_config->dp_m_n);
2201
2202         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2203                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2204
2205         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2206
2207         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2208             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2209                 /*
2210                  * This is a big fat ugly hack.
2211                  *
2212                  * Some machines in UEFI boot mode provide us a VBT that has 18
2213                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2214                  * unknown we fail to light up. Yet the same BIOS boots up with
2215                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2216                  * max, not what it tells us to use.
2217                  *
2218                  * Note: This will still be broken if the eDP panel is not lit
2219                  * up by the BIOS, and thus we can't get the mode at module
2220                  * load.
2221                  */
2222                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2223                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2224                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2225         }
2226 }
2227
2228 static void intel_disable_dp(struct intel_encoder *encoder)
2229 {
2230         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2231         struct drm_device *dev = encoder->base.dev;
2232         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2233
2234         if (crtc->config->has_audio)
2235                 intel_audio_codec_disable(encoder);
2236
2237         if (HAS_PSR(dev) && !HAS_DDI(dev))
2238                 intel_psr_disable(intel_dp);
2239
2240         /* Make sure the panel is off before trying to change the mode. But also
2241          * ensure that we have vdd while we switch off the panel. */
2242         intel_edp_panel_vdd_on(intel_dp);
2243         intel_edp_backlight_off(intel_dp);
2244         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2245         intel_edp_panel_off(intel_dp);
2246
2247         /* disable the port before the pipe on g4x */
2248         if (INTEL_INFO(dev)->gen < 5)
2249                 intel_dp_link_down(intel_dp);
2250 }
2251
2252 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2253 {
2254         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2255         enum port port = dp_to_dig_port(intel_dp)->port;
2256
2257         intel_dp_link_down(intel_dp);
2258         if (port == PORT_A)
2259                 ironlake_edp_pll_off(intel_dp);
2260 }
2261
2262 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2263 {
2264         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2265
2266         intel_dp_link_down(intel_dp);
2267 }
2268
2269 static void chv_post_disable_dp(struct intel_encoder *encoder)
2270 {
2271         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2272         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2273         struct drm_device *dev = encoder->base.dev;
2274         struct drm_i915_private *dev_priv = dev->dev_private;
2275         struct intel_crtc *intel_crtc =
2276                 to_intel_crtc(encoder->base.crtc);
2277         enum dpio_channel ch = vlv_dport_to_channel(dport);
2278         enum pipe pipe = intel_crtc->pipe;
2279         u32 val;
2280
2281         intel_dp_link_down(intel_dp);
2282
2283         mutex_lock(&dev_priv->dpio_lock);
2284
2285         /* Propagate soft reset to data lane reset */
2286         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2287         val |= CHV_PCS_REQ_SOFTRESET_EN;
2288         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2289
2290         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2291         val |= CHV_PCS_REQ_SOFTRESET_EN;
2292         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2293
2294         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2295         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2296         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2297
2298         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2299         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2300         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2301
2302         mutex_unlock(&dev_priv->dpio_lock);
2303 }
2304
2305 static void
2306 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2307                          uint32_t *DP,
2308                          uint8_t dp_train_pat)
2309 {
2310         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2311         struct drm_device *dev = intel_dig_port->base.base.dev;
2312         struct drm_i915_private *dev_priv = dev->dev_private;
2313         enum port port = intel_dig_port->port;
2314
2315         if (HAS_DDI(dev)) {
2316                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2317
2318                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2319                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2320                 else
2321                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2322
2323                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2324                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2325                 case DP_TRAINING_PATTERN_DISABLE:
2326                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2327
2328                         break;
2329                 case DP_TRAINING_PATTERN_1:
2330                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2331                         break;
2332                 case DP_TRAINING_PATTERN_2:
2333                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2334                         break;
2335                 case DP_TRAINING_PATTERN_3:
2336                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2337                         break;
2338                 }
2339                 I915_WRITE(DP_TP_CTL(port), temp);
2340
2341         } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2342                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2343
2344                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2345                 case DP_TRAINING_PATTERN_DISABLE:
2346                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2347                         break;
2348                 case DP_TRAINING_PATTERN_1:
2349                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2350                         break;
2351                 case DP_TRAINING_PATTERN_2:
2352                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2353                         break;
2354                 case DP_TRAINING_PATTERN_3:
2355                         DRM_ERROR("DP training pattern 3 not supported\n");
2356                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2357                         break;
2358                 }
2359
2360         } else {
2361                 if (IS_CHERRYVIEW(dev))
2362                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2363                 else
2364                         *DP &= ~DP_LINK_TRAIN_MASK;
2365
2366                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2367                 case DP_TRAINING_PATTERN_DISABLE:
2368                         *DP |= DP_LINK_TRAIN_OFF;
2369                         break;
2370                 case DP_TRAINING_PATTERN_1:
2371                         *DP |= DP_LINK_TRAIN_PAT_1;
2372                         break;
2373                 case DP_TRAINING_PATTERN_2:
2374                         *DP |= DP_LINK_TRAIN_PAT_2;
2375                         break;
2376                 case DP_TRAINING_PATTERN_3:
2377                         if (IS_CHERRYVIEW(dev)) {
2378                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2379                         } else {
2380                                 DRM_ERROR("DP training pattern 3 not supported\n");
2381                                 *DP |= DP_LINK_TRAIN_PAT_2;
2382                         }
2383                         break;
2384                 }
2385         }
2386 }
2387
2388 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2389 {
2390         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2391         struct drm_i915_private *dev_priv = dev->dev_private;
2392
2393         /* enable with pattern 1 (as per spec) */
2394         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2395                                  DP_TRAINING_PATTERN_1);
2396
2397         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2398         POSTING_READ(intel_dp->output_reg);
2399
2400         /*
2401          * Magic for VLV/CHV. We _must_ first set up the register
2402          * without actually enabling the port, and then do another
2403          * write to enable the port. Otherwise link training will
2404          * fail when the power sequencer is freshly used for this port.
2405          */
2406         intel_dp->DP |= DP_PORT_EN;
2407
2408         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2409         POSTING_READ(intel_dp->output_reg);
2410 }
2411
2412 static void intel_enable_dp(struct intel_encoder *encoder)
2413 {
2414         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2415         struct drm_device *dev = encoder->base.dev;
2416         struct drm_i915_private *dev_priv = dev->dev_private;
2417         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2418         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2419
2420         if (WARN_ON(dp_reg & DP_PORT_EN))
2421                 return;
2422
2423         pps_lock(intel_dp);
2424
2425         if (IS_VALLEYVIEW(dev))
2426                 vlv_init_panel_power_sequencer(intel_dp);
2427
2428         intel_dp_enable_port(intel_dp);
2429
2430         edp_panel_vdd_on(intel_dp);
2431         edp_panel_on(intel_dp);
2432         edp_panel_vdd_off(intel_dp, true);
2433
2434         pps_unlock(intel_dp);
2435
2436         if (IS_VALLEYVIEW(dev))
2437                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2438
2439         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2440         intel_dp_start_link_train(intel_dp);
2441         intel_dp_complete_link_train(intel_dp);
2442         intel_dp_stop_link_train(intel_dp);
2443
2444         if (crtc->config->has_audio) {
2445                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2446                                  pipe_name(crtc->pipe));
2447                 intel_audio_codec_enable(encoder);
2448         }
2449 }
2450
2451 static void g4x_enable_dp(struct intel_encoder *encoder)
2452 {
2453         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2454
2455         intel_enable_dp(encoder);
2456         intel_edp_backlight_on(intel_dp);
2457 }
2458
2459 static void vlv_enable_dp(struct intel_encoder *encoder)
2460 {
2461         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2462
2463         intel_edp_backlight_on(intel_dp);
2464         intel_psr_enable(intel_dp);
2465 }
2466
2467 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2468 {
2469         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2470         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2471
2472         intel_dp_prepare(encoder);
2473
2474         /* Only ilk+ has port A */
2475         if (dport->port == PORT_A) {
2476                 ironlake_set_pll_cpu_edp(intel_dp);
2477                 ironlake_edp_pll_on(intel_dp);
2478         }
2479 }
2480
2481 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2482 {
2483         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2484         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2485         enum pipe pipe = intel_dp->pps_pipe;
2486         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2487
2488         edp_panel_vdd_off_sync(intel_dp);
2489
2490         /*
2491          * VLV seems to get confused when multiple power seqeuencers
2492          * have the same port selected (even if only one has power/vdd
2493          * enabled). The failure manifests as vlv_wait_port_ready() failing
2494          * CHV on the other hand doesn't seem to mind having the same port
2495          * selected in multiple power seqeuencers, but let's clear the
2496          * port select always when logically disconnecting a power sequencer
2497          * from a port.
2498          */
2499         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2500                       pipe_name(pipe), port_name(intel_dig_port->port));
2501         I915_WRITE(pp_on_reg, 0);
2502         POSTING_READ(pp_on_reg);
2503
2504         intel_dp->pps_pipe = INVALID_PIPE;
2505 }
2506
2507 static void vlv_steal_power_sequencer(struct drm_device *dev,
2508                                       enum pipe pipe)
2509 {
2510         struct drm_i915_private *dev_priv = dev->dev_private;
2511         struct intel_encoder *encoder;
2512
2513         lockdep_assert_held(&dev_priv->pps_mutex);
2514
2515         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2516                 return;
2517
2518         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2519                             base.head) {
2520                 struct intel_dp *intel_dp;
2521                 enum port port;
2522
2523                 if (encoder->type != INTEL_OUTPUT_EDP)
2524                         continue;
2525
2526                 intel_dp = enc_to_intel_dp(&encoder->base);
2527                 port = dp_to_dig_port(intel_dp)->port;
2528
2529                 if (intel_dp->pps_pipe != pipe)
2530                         continue;
2531
2532                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2533                               pipe_name(pipe), port_name(port));
2534
2535                 WARN(encoder->connectors_active,
2536                      "stealing pipe %c power sequencer from active eDP port %c\n",
2537                      pipe_name(pipe), port_name(port));
2538
2539                 /* make sure vdd is off before we steal it */
2540                 vlv_detach_power_sequencer(intel_dp);
2541         }
2542 }
2543
2544 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2545 {
2546         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2547         struct intel_encoder *encoder = &intel_dig_port->base;
2548         struct drm_device *dev = encoder->base.dev;
2549         struct drm_i915_private *dev_priv = dev->dev_private;
2550         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2551
2552         lockdep_assert_held(&dev_priv->pps_mutex);
2553
2554         if (!is_edp(intel_dp))
2555                 return;
2556
2557         if (intel_dp->pps_pipe == crtc->pipe)
2558                 return;
2559
2560         /*
2561          * If another power sequencer was being used on this
2562          * port previously make sure to turn off vdd there while
2563          * we still have control of it.
2564          */
2565         if (intel_dp->pps_pipe != INVALID_PIPE)
2566                 vlv_detach_power_sequencer(intel_dp);
2567
2568         /*
2569          * We may be stealing the power
2570          * sequencer from another port.
2571          */
2572         vlv_steal_power_sequencer(dev, crtc->pipe);
2573
2574         /* now it's all ours */
2575         intel_dp->pps_pipe = crtc->pipe;
2576
2577         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2578                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2579
2580         /* init power sequencer on this pipe and port */
2581         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2582         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2583 }
2584
2585 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2586 {
2587         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2588         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2589         struct drm_device *dev = encoder->base.dev;
2590         struct drm_i915_private *dev_priv = dev->dev_private;
2591         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2592         enum dpio_channel port = vlv_dport_to_channel(dport);
2593         int pipe = intel_crtc->pipe;
2594         u32 val;
2595
2596         mutex_lock(&dev_priv->dpio_lock);
2597
2598         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2599         val = 0;
2600         if (pipe)
2601                 val |= (1<<21);
2602         else
2603                 val &= ~(1<<21);
2604         val |= 0x001000c4;
2605         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2606         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2607         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2608
2609         mutex_unlock(&dev_priv->dpio_lock);
2610
2611         intel_enable_dp(encoder);
2612 }
2613
2614 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2615 {
2616         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2617         struct drm_device *dev = encoder->base.dev;
2618         struct drm_i915_private *dev_priv = dev->dev_private;
2619         struct intel_crtc *intel_crtc =
2620                 to_intel_crtc(encoder->base.crtc);
2621         enum dpio_channel port = vlv_dport_to_channel(dport);
2622         int pipe = intel_crtc->pipe;
2623
2624         intel_dp_prepare(encoder);
2625
2626         /* Program Tx lane resets to default */
2627         mutex_lock(&dev_priv->dpio_lock);
2628         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2629                          DPIO_PCS_TX_LANE2_RESET |
2630                          DPIO_PCS_TX_LANE1_RESET);
2631         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2632                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2633                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2634                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2635                                  DPIO_PCS_CLK_SOFT_RESET);
2636
2637         /* Fix up inter-pair skew failure */
2638         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2639         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2640         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2641         mutex_unlock(&dev_priv->dpio_lock);
2642 }
2643
2644 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2645 {
2646         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2647         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2648         struct drm_device *dev = encoder->base.dev;
2649         struct drm_i915_private *dev_priv = dev->dev_private;
2650         struct intel_crtc *intel_crtc =
2651                 to_intel_crtc(encoder->base.crtc);
2652         enum dpio_channel ch = vlv_dport_to_channel(dport);
2653         int pipe = intel_crtc->pipe;
2654         int data, i;
2655         u32 val;
2656
2657         mutex_lock(&dev_priv->dpio_lock);
2658
2659         /* allow hardware to manage TX FIFO reset source */
2660         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2661         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2662         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2663
2664         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2665         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2666         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2667
2668         /* Deassert soft data lane reset*/
2669         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2670         val |= CHV_PCS_REQ_SOFTRESET_EN;
2671         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2672
2673         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2674         val |= CHV_PCS_REQ_SOFTRESET_EN;
2675         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2676
2677         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2678         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2679         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2680
2681         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2682         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2683         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2684
2685         /* Program Tx lane latency optimal setting*/
2686         for (i = 0; i < 4; i++) {
2687                 /* Set the latency optimal bit */
2688                 data = (i == 1) ? 0x0 : 0x6;
2689                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2690                                 data << DPIO_FRC_LATENCY_SHFIT);
2691
2692                 /* Set the upar bit */
2693                 data = (i == 1) ? 0x0 : 0x1;
2694                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2695                                 data << DPIO_UPAR_SHIFT);
2696         }
2697
2698         /* Data lane stagger programming */
2699         /* FIXME: Fix up value only after power analysis */
2700
2701         mutex_unlock(&dev_priv->dpio_lock);
2702
2703         intel_enable_dp(encoder);
2704 }
2705
2706 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2707 {
2708         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2709         struct drm_device *dev = encoder->base.dev;
2710         struct drm_i915_private *dev_priv = dev->dev_private;
2711         struct intel_crtc *intel_crtc =
2712                 to_intel_crtc(encoder->base.crtc);
2713         enum dpio_channel ch = vlv_dport_to_channel(dport);
2714         enum pipe pipe = intel_crtc->pipe;
2715         u32 val;
2716
2717         intel_dp_prepare(encoder);
2718
2719         mutex_lock(&dev_priv->dpio_lock);
2720
2721         /* program left/right clock distribution */
2722         if (pipe != PIPE_B) {
2723                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2724                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2725                 if (ch == DPIO_CH0)
2726                         val |= CHV_BUFLEFTENA1_FORCE;
2727                 if (ch == DPIO_CH1)
2728                         val |= CHV_BUFRIGHTENA1_FORCE;
2729                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2730         } else {
2731                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2732                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2733                 if (ch == DPIO_CH0)
2734                         val |= CHV_BUFLEFTENA2_FORCE;
2735                 if (ch == DPIO_CH1)
2736                         val |= CHV_BUFRIGHTENA2_FORCE;
2737                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2738         }
2739
2740         /* program clock channel usage */
2741         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2742         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2743         if (pipe != PIPE_B)
2744                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2745         else
2746                 val |= CHV_PCS_USEDCLKCHANNEL;
2747         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2748
2749         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2750         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2751         if (pipe != PIPE_B)
2752                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2753         else
2754                 val |= CHV_PCS_USEDCLKCHANNEL;
2755         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2756
2757         /*
2758          * This a a bit weird since generally CL
2759          * matches the pipe, but here we need to
2760          * pick the CL based on the port.
2761          */
2762         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2763         if (pipe != PIPE_B)
2764                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2765         else
2766                 val |= CHV_CMN_USEDCLKCHANNEL;
2767         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2768
2769         mutex_unlock(&dev_priv->dpio_lock);
2770 }
2771
2772 /*
2773  * Native read with retry for link status and receiver capability reads for
2774  * cases where the sink may still be asleep.
2775  *
2776  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2777  * supposed to retry 3 times per the spec.
2778  */
2779 static ssize_t
2780 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2781                         void *buffer, size_t size)
2782 {
2783         ssize_t ret;
2784         int i;
2785
2786         /*
2787          * Sometime we just get the same incorrect byte repeated
2788          * over the entire buffer. Doing just one throw away read
2789          * initially seems to "solve" it.
2790          */
2791         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2792
2793         for (i = 0; i < 3; i++) {
2794                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2795                 if (ret == size)
2796                         return ret;
2797                 msleep(1);
2798         }
2799
2800         return ret;
2801 }
2802
2803 /*
2804  * Fetch AUX CH registers 0x202 - 0x207 which contain
2805  * link status information
2806  */
2807 static bool
2808 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2809 {
2810         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2811                                        DP_LANE0_1_STATUS,
2812                                        link_status,
2813                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2814 }
2815
2816 /* These are source-specific values. */
2817 static uint8_t
2818 intel_dp_voltage_max(struct intel_dp *intel_dp)
2819 {
2820         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2821         struct drm_i915_private *dev_priv = dev->dev_private;
2822         enum port port = dp_to_dig_port(intel_dp)->port;
2823
2824         if (INTEL_INFO(dev)->gen >= 9) {
2825                 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2826                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2827                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2828         } else if (IS_VALLEYVIEW(dev))
2829                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2830         else if (IS_GEN7(dev) && port == PORT_A)
2831                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2832         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2833                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2834         else
2835                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2836 }
2837
2838 static uint8_t
2839 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2840 {
2841         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2842         enum port port = dp_to_dig_port(intel_dp)->port;
2843
2844         if (INTEL_INFO(dev)->gen >= 9) {
2845                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2846                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2847                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2848                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2849                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2850                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2851                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2852                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2853                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2854                 default:
2855                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2856                 }
2857         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2858                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2859                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2860                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2861                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2862                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2863                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2864                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2865                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2866                 default:
2867                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2868                 }
2869         } else if (IS_VALLEYVIEW(dev)) {
2870                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2871                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2872                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2873                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2874                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2875                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2876                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2877                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2878                 default:
2879                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2880                 }
2881         } else if (IS_GEN7(dev) && port == PORT_A) {
2882                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2883                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2884                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2885                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2886                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2887                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2888                 default:
2889                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2890                 }
2891         } else {
2892                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2893                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2894                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2895                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2896                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2897                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2898                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2899                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2900                 default:
2901                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2902                 }
2903         }
2904 }
2905
2906 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2907 {
2908         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2909         struct drm_i915_private *dev_priv = dev->dev_private;
2910         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2911         struct intel_crtc *intel_crtc =
2912                 to_intel_crtc(dport->base.base.crtc);
2913         unsigned long demph_reg_value, preemph_reg_value,
2914                 uniqtranscale_reg_value;
2915         uint8_t train_set = intel_dp->train_set[0];
2916         enum dpio_channel port = vlv_dport_to_channel(dport);
2917         int pipe = intel_crtc->pipe;
2918
2919         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2920         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2921                 preemph_reg_value = 0x0004000;
2922                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2923                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2924                         demph_reg_value = 0x2B405555;
2925                         uniqtranscale_reg_value = 0x552AB83A;
2926                         break;
2927                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2928                         demph_reg_value = 0x2B404040;
2929                         uniqtranscale_reg_value = 0x5548B83A;
2930                         break;
2931                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2932                         demph_reg_value = 0x2B245555;
2933                         uniqtranscale_reg_value = 0x5560B83A;
2934                         break;
2935                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2936                         demph_reg_value = 0x2B405555;
2937                         uniqtranscale_reg_value = 0x5598DA3A;
2938                         break;
2939                 default:
2940                         return 0;
2941                 }
2942                 break;
2943         case DP_TRAIN_PRE_EMPH_LEVEL_1:
2944                 preemph_reg_value = 0x0002000;
2945                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2946                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2947                         demph_reg_value = 0x2B404040;
2948                         uniqtranscale_reg_value = 0x5552B83A;
2949                         break;
2950                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2951                         demph_reg_value = 0x2B404848;
2952                         uniqtranscale_reg_value = 0x5580B83A;
2953                         break;
2954                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2955                         demph_reg_value = 0x2B404040;
2956                         uniqtranscale_reg_value = 0x55ADDA3A;
2957                         break;
2958                 default:
2959                         return 0;
2960                 }
2961                 break;
2962         case DP_TRAIN_PRE_EMPH_LEVEL_2:
2963                 preemph_reg_value = 0x0000000;
2964                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2965                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2966                         demph_reg_value = 0x2B305555;
2967                         uniqtranscale_reg_value = 0x5570B83A;
2968                         break;
2969                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2970                         demph_reg_value = 0x2B2B4040;
2971                         uniqtranscale_reg_value = 0x55ADDA3A;
2972                         break;
2973                 default:
2974                         return 0;
2975                 }
2976                 break;
2977         case DP_TRAIN_PRE_EMPH_LEVEL_3:
2978                 preemph_reg_value = 0x0006000;
2979                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2980                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2981                         demph_reg_value = 0x1B405555;
2982                         uniqtranscale_reg_value = 0x55ADDA3A;
2983                         break;
2984                 default:
2985                         return 0;
2986                 }
2987                 break;
2988         default:
2989                 return 0;
2990         }
2991
2992         mutex_lock(&dev_priv->dpio_lock);
2993         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2994         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2995         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2996                          uniqtranscale_reg_value);
2997         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2998         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2999         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3000         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3001         mutex_unlock(&dev_priv->dpio_lock);
3002
3003         return 0;
3004 }
3005
3006 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3007 {
3008         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3009         struct drm_i915_private *dev_priv = dev->dev_private;
3010         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3011         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3012         u32 deemph_reg_value, margin_reg_value, val;
3013         uint8_t train_set = intel_dp->train_set[0];
3014         enum dpio_channel ch = vlv_dport_to_channel(dport);
3015         enum pipe pipe = intel_crtc->pipe;
3016         int i;
3017
3018         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3019         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3020                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3021                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3022                         deemph_reg_value = 128;
3023                         margin_reg_value = 52;
3024                         break;
3025                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3026                         deemph_reg_value = 128;
3027                         margin_reg_value = 77;
3028                         break;
3029                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3030                         deemph_reg_value = 128;
3031                         margin_reg_value = 102;
3032                         break;
3033                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3034                         deemph_reg_value = 128;
3035                         margin_reg_value = 154;
3036                         /* FIXME extra to set for 1200 */
3037                         break;
3038                 default:
3039                         return 0;
3040                 }
3041                 break;
3042         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3043                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3044                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3045                         deemph_reg_value = 85;
3046                         margin_reg_value = 78;
3047                         break;
3048                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3049                         deemph_reg_value = 85;
3050                         margin_reg_value = 116;
3051                         break;
3052                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3053                         deemph_reg_value = 85;
3054                         margin_reg_value = 154;
3055                         break;
3056                 default:
3057                         return 0;
3058                 }
3059                 break;
3060         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3061                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3062                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3063                         deemph_reg_value = 64;
3064                         margin_reg_value = 104;
3065                         break;
3066                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3067                         deemph_reg_value = 64;
3068                         margin_reg_value = 154;
3069                         break;
3070                 default:
3071                         return 0;
3072                 }
3073                 break;
3074         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3075                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3076                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3077                         deemph_reg_value = 43;
3078                         margin_reg_value = 154;
3079                         break;
3080                 default:
3081                         return 0;
3082                 }
3083                 break;
3084         default:
3085                 return 0;
3086         }
3087
3088         mutex_lock(&dev_priv->dpio_lock);
3089
3090         /* Clear calc init */
3091         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3092         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3093         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3094         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3095         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3096
3097         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3098         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3099         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3100         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3101         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3102
3103         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3104         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3105         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3106         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3107
3108         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3109         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3110         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3111         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3112
3113         /* Program swing deemph */
3114         for (i = 0; i < 4; i++) {
3115                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3116                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3117                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3118                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3119         }
3120
3121         /* Program swing margin */
3122         for (i = 0; i < 4; i++) {
3123                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3124                 val &= ~DPIO_SWING_MARGIN000_MASK;
3125                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3126                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3127         }
3128
3129         /* Disable unique transition scale */
3130         for (i = 0; i < 4; i++) {
3131                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3132                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3133                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3134         }
3135
3136         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3137                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3138                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3139                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3140
3141                 /*
3142                  * The document said it needs to set bit 27 for ch0 and bit 26
3143                  * for ch1. Might be a typo in the doc.
3144                  * For now, for this unique transition scale selection, set bit
3145                  * 27 for ch0 and ch1.
3146                  */
3147                 for (i = 0; i < 4; i++) {
3148                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3149                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3150                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3151                 }
3152
3153                 for (i = 0; i < 4; i++) {
3154                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3155                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3156                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3157                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3158                 }
3159         }
3160
3161         /* Start swing calculation */
3162         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3163         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3164         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3165
3166         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3167         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3168         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3169
3170         /* LRC Bypass */
3171         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3172         val |= DPIO_LRC_BYPASS;
3173         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3174
3175         mutex_unlock(&dev_priv->dpio_lock);
3176
3177         return 0;
3178 }
3179
3180 static void
3181 intel_get_adjust_train(struct intel_dp *intel_dp,
3182                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3183 {
3184         uint8_t v = 0;
3185         uint8_t p = 0;
3186         int lane;
3187         uint8_t voltage_max;
3188         uint8_t preemph_max;
3189
3190         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3191                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3192                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3193
3194                 if (this_v > v)
3195                         v = this_v;
3196                 if (this_p > p)
3197                         p = this_p;
3198         }
3199
3200         voltage_max = intel_dp_voltage_max(intel_dp);
3201         if (v >= voltage_max)
3202                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3203
3204         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3205         if (p >= preemph_max)
3206                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3207
3208         for (lane = 0; lane < 4; lane++)
3209                 intel_dp->train_set[lane] = v | p;
3210 }
3211
3212 static uint32_t
3213 intel_gen4_signal_levels(uint8_t train_set)
3214 {
3215         uint32_t        signal_levels = 0;
3216
3217         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3218         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3219         default:
3220                 signal_levels |= DP_VOLTAGE_0_4;
3221                 break;
3222         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3223                 signal_levels |= DP_VOLTAGE_0_6;
3224                 break;
3225         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3226                 signal_levels |= DP_VOLTAGE_0_8;
3227                 break;
3228         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3229                 signal_levels |= DP_VOLTAGE_1_2;
3230                 break;
3231         }
3232         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3233         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3234         default:
3235                 signal_levels |= DP_PRE_EMPHASIS_0;
3236                 break;
3237         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3238                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3239                 break;
3240         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3241                 signal_levels |= DP_PRE_EMPHASIS_6;
3242                 break;
3243         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3244                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3245                 break;
3246         }
3247         return signal_levels;
3248 }
3249
3250 /* Gen6's DP voltage swing and pre-emphasis control */
3251 static uint32_t
3252 intel_gen6_edp_signal_levels(uint8_t train_set)
3253 {
3254         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3255                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3256         switch (signal_levels) {
3257         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3258         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3259                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3260         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3261                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3262         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3263         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3264                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3265         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3266         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3267                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3268         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3269         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3270                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3271         default:
3272                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3273                               "0x%x\n", signal_levels);
3274                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3275         }
3276 }
3277
3278 /* Gen7's DP voltage swing and pre-emphasis control */
3279 static uint32_t
3280 intel_gen7_edp_signal_levels(uint8_t train_set)
3281 {
3282         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3283                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3284         switch (signal_levels) {
3285         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3286                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3287         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3288                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3289         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3290                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3291
3292         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3293                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3294         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3295                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3296
3297         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3298                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3299         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3300                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3301
3302         default:
3303                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3304                               "0x%x\n", signal_levels);
3305                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3306         }
3307 }
3308
3309 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3310 static uint32_t
3311 intel_hsw_signal_levels(uint8_t train_set)
3312 {
3313         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3314                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3315         switch (signal_levels) {
3316         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3317                 return DDI_BUF_TRANS_SELECT(0);
3318         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319                 return DDI_BUF_TRANS_SELECT(1);
3320         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3321                 return DDI_BUF_TRANS_SELECT(2);
3322         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3323                 return DDI_BUF_TRANS_SELECT(3);
3324
3325         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3326                 return DDI_BUF_TRANS_SELECT(4);
3327         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3328                 return DDI_BUF_TRANS_SELECT(5);
3329         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3330                 return DDI_BUF_TRANS_SELECT(6);
3331
3332         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3333                 return DDI_BUF_TRANS_SELECT(7);
3334         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3335                 return DDI_BUF_TRANS_SELECT(8);
3336
3337         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3338                 return DDI_BUF_TRANS_SELECT(9);
3339         default:
3340                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3341                               "0x%x\n", signal_levels);
3342                 return DDI_BUF_TRANS_SELECT(0);
3343         }
3344 }
3345
3346 /* Properly updates "DP" with the correct signal levels. */
3347 static void
3348 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3349 {
3350         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3351         enum port port = intel_dig_port->port;
3352         struct drm_device *dev = intel_dig_port->base.base.dev;
3353         uint32_t signal_levels, mask;
3354         uint8_t train_set = intel_dp->train_set[0];
3355
3356         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3357                 signal_levels = intel_hsw_signal_levels(train_set);
3358                 mask = DDI_BUF_EMP_MASK;
3359         } else if (IS_CHERRYVIEW(dev)) {
3360                 signal_levels = intel_chv_signal_levels(intel_dp);
3361                 mask = 0;
3362         } else if (IS_VALLEYVIEW(dev)) {
3363                 signal_levels = intel_vlv_signal_levels(intel_dp);
3364                 mask = 0;
3365         } else if (IS_GEN7(dev) && port == PORT_A) {
3366                 signal_levels = intel_gen7_edp_signal_levels(train_set);
3367                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3368         } else if (IS_GEN6(dev) && port == PORT_A) {
3369                 signal_levels = intel_gen6_edp_signal_levels(train_set);
3370                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3371         } else {
3372                 signal_levels = intel_gen4_signal_levels(train_set);
3373                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3374         }
3375
3376         DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3377
3378         *DP = (*DP & ~mask) | signal_levels;
3379 }
3380
3381 static bool
3382 intel_dp_set_link_train(struct intel_dp *intel_dp,
3383                         uint32_t *DP,
3384                         uint8_t dp_train_pat)
3385 {
3386         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3387         struct drm_device *dev = intel_dig_port->base.base.dev;
3388         struct drm_i915_private *dev_priv = dev->dev_private;
3389         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3390         int ret, len;
3391
3392         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3393
3394         I915_WRITE(intel_dp->output_reg, *DP);
3395         POSTING_READ(intel_dp->output_reg);
3396
3397         buf[0] = dp_train_pat;
3398         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3399             DP_TRAINING_PATTERN_DISABLE) {
3400                 /* don't write DP_TRAINING_LANEx_SET on disable */
3401                 len = 1;
3402         } else {
3403                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3404                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3405                 len = intel_dp->lane_count + 1;
3406         }
3407
3408         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3409                                 buf, len);
3410
3411         return ret == len;
3412 }
3413
3414 static bool
3415 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3416                         uint8_t dp_train_pat)
3417 {
3418         memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3419         intel_dp_set_signal_levels(intel_dp, DP);
3420         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3421 }
3422
3423 static bool
3424 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3425                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3426 {
3427         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3428         struct drm_device *dev = intel_dig_port->base.base.dev;
3429         struct drm_i915_private *dev_priv = dev->dev_private;
3430         int ret;
3431
3432         intel_get_adjust_train(intel_dp, link_status);
3433         intel_dp_set_signal_levels(intel_dp, DP);
3434
3435         I915_WRITE(intel_dp->output_reg, *DP);
3436         POSTING_READ(intel_dp->output_reg);
3437
3438         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3439                                 intel_dp->train_set, intel_dp->lane_count);
3440
3441         return ret == intel_dp->lane_count;
3442 }
3443
3444 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3445 {
3446         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3447         struct drm_device *dev = intel_dig_port->base.base.dev;
3448         struct drm_i915_private *dev_priv = dev->dev_private;
3449         enum port port = intel_dig_port->port;
3450         uint32_t val;
3451
3452         if (!HAS_DDI(dev))
3453                 return;
3454
3455         val = I915_READ(DP_TP_CTL(port));
3456         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3457         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3458         I915_WRITE(DP_TP_CTL(port), val);
3459
3460         /*
3461          * On PORT_A we can have only eDP in SST mode. There the only reason
3462          * we need to set idle transmission mode is to work around a HW issue
3463          * where we enable the pipe while not in idle link-training mode.
3464          * In this case there is requirement to wait for a minimum number of
3465          * idle patterns to be sent.
3466          */
3467         if (port == PORT_A)
3468                 return;
3469
3470         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3471                      1))
3472                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3473 }
3474
3475 /* Enable corresponding port and start training pattern 1 */
3476 void
3477 intel_dp_start_link_train(struct intel_dp *intel_dp)
3478 {
3479         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3480         struct drm_device *dev = encoder->dev;
3481         int i;
3482         uint8_t voltage;
3483         int voltage_tries, loop_tries;
3484         uint32_t DP = intel_dp->DP;
3485         uint8_t link_config[2];
3486
3487         if (HAS_DDI(dev))
3488                 intel_ddi_prepare_link_retrain(encoder);
3489
3490         /* Write the link configuration data */
3491         link_config[0] = intel_dp->link_bw;
3492         link_config[1] = intel_dp->lane_count;
3493         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3494                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3495         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3496         if (intel_dp->num_supported_rates)
3497                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3498                                 &intel_dp->rate_select, 1);
3499
3500         link_config[0] = 0;
3501         link_config[1] = DP_SET_ANSI_8B10B;
3502         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3503
3504         DP |= DP_PORT_EN;
3505
3506         /* clock recovery */
3507         if (!intel_dp_reset_link_train(intel_dp, &DP,
3508                                        DP_TRAINING_PATTERN_1 |
3509                                        DP_LINK_SCRAMBLING_DISABLE)) {
3510                 DRM_ERROR("failed to enable link training\n");
3511                 return;
3512         }
3513
3514         voltage = 0xff;
3515         voltage_tries = 0;
3516         loop_tries = 0;
3517         for (;;) {
3518                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3519
3520                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3521                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3522                         DRM_ERROR("failed to get link status\n");
3523                         break;
3524                 }
3525
3526                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3527                         DRM_DEBUG_KMS("clock recovery OK\n");
3528                         break;
3529                 }
3530
3531                 /* Check to see if we've tried the max voltage */
3532                 for (i = 0; i < intel_dp->lane_count; i++)
3533                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3534                                 break;
3535                 if (i == intel_dp->lane_count) {
3536                         ++loop_tries;
3537                         if (loop_tries == 5) {
3538                                 DRM_ERROR("too many full retries, give up\n");
3539                                 break;
3540                         }
3541                         intel_dp_reset_link_train(intel_dp, &DP,
3542                                                   DP_TRAINING_PATTERN_1 |
3543                                                   DP_LINK_SCRAMBLING_DISABLE);
3544                         voltage_tries = 0;
3545                         continue;
3546                 }
3547
3548                 /* Check to see if we've tried the same voltage 5 times */
3549                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3550                         ++voltage_tries;
3551                         if (voltage_tries == 5) {
3552                                 DRM_ERROR("too many voltage retries, give up\n");
3553                                 break;
3554                         }
3555                 } else
3556                         voltage_tries = 0;
3557                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3558
3559                 /* Update training set as requested by target */
3560                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3561                         DRM_ERROR("failed to update link training\n");
3562                         break;
3563                 }
3564         }
3565
3566         intel_dp->DP = DP;
3567 }
3568
3569 void
3570 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3571 {
3572         bool channel_eq = false;
3573         int tries, cr_tries;
3574         uint32_t DP = intel_dp->DP;
3575         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3576
3577         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3578         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3579                 training_pattern = DP_TRAINING_PATTERN_3;
3580
3581         /* channel equalization */
3582         if (!intel_dp_set_link_train(intel_dp, &DP,
3583                                      training_pattern |
3584                                      DP_LINK_SCRAMBLING_DISABLE)) {
3585                 DRM_ERROR("failed to start channel equalization\n");
3586                 return;
3587         }
3588
3589         tries = 0;
3590         cr_tries = 0;
3591         channel_eq = false;
3592         for (;;) {
3593                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3594
3595                 if (cr_tries > 5) {
3596                         DRM_ERROR("failed to train DP, aborting\n");
3597                         break;
3598                 }
3599
3600                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3601                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3602                         DRM_ERROR("failed to get link status\n");
3603                         break;
3604                 }
3605
3606                 /* Make sure clock is still ok */
3607                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3608                         intel_dp_start_link_train(intel_dp);
3609                         intel_dp_set_link_train(intel_dp, &DP,
3610                                                 training_pattern |
3611                                                 DP_LINK_SCRAMBLING_DISABLE);
3612                         cr_tries++;
3613                         continue;
3614                 }
3615
3616                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3617                         channel_eq = true;
3618                         break;
3619                 }
3620
3621                 /* Try 5 times, then try clock recovery if that fails */
3622                 if (tries > 5) {
3623                         intel_dp_start_link_train(intel_dp);
3624                         intel_dp_set_link_train(intel_dp, &DP,
3625                                                 training_pattern |
3626                                                 DP_LINK_SCRAMBLING_DISABLE);
3627                         tries = 0;
3628                         cr_tries++;
3629                         continue;
3630                 }
3631
3632                 /* Update training set as requested by target */
3633                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3634                         DRM_ERROR("failed to update link training\n");
3635                         break;
3636                 }
3637                 ++tries;
3638         }
3639
3640         intel_dp_set_idle_link_train(intel_dp);
3641
3642         intel_dp->DP = DP;
3643
3644         if (channel_eq)
3645                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3646
3647 }
3648
3649 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3650 {
3651         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3652                                 DP_TRAINING_PATTERN_DISABLE);
3653 }
3654
3655 static void
3656 intel_dp_link_down(struct intel_dp *intel_dp)
3657 {
3658         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3659         enum port port = intel_dig_port->port;
3660         struct drm_device *dev = intel_dig_port->base.base.dev;
3661         struct drm_i915_private *dev_priv = dev->dev_private;
3662         uint32_t DP = intel_dp->DP;
3663
3664         if (WARN_ON(HAS_DDI(dev)))
3665                 return;
3666
3667         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3668                 return;
3669
3670         DRM_DEBUG_KMS("\n");
3671
3672         if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3673                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3674                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3675         } else {
3676                 if (IS_CHERRYVIEW(dev))
3677                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3678                 else
3679                         DP &= ~DP_LINK_TRAIN_MASK;
3680                 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3681         }
3682         POSTING_READ(intel_dp->output_reg);
3683
3684         if (HAS_PCH_IBX(dev) &&
3685             I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3686                 /* Hardware workaround: leaving our transcoder select
3687                  * set to transcoder B while it's off will prevent the
3688                  * corresponding HDMI output on transcoder A.
3689                  *
3690                  * Combine this with another hardware workaround:
3691                  * transcoder select bit can only be cleared while the
3692                  * port is enabled.
3693                  */
3694                 DP &= ~DP_PIPEB_SELECT;
3695                 I915_WRITE(intel_dp->output_reg, DP);
3696                 POSTING_READ(intel_dp->output_reg);
3697         }
3698
3699         DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3700         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3701         POSTING_READ(intel_dp->output_reg);
3702         msleep(intel_dp->panel_power_down_delay);
3703 }
3704
3705 static bool
3706 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3707 {
3708         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3709         struct drm_device *dev = dig_port->base.base.dev;
3710         struct drm_i915_private *dev_priv = dev->dev_private;
3711         uint8_t rev;
3712
3713         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3714                                     sizeof(intel_dp->dpcd)) < 0)
3715                 return false; /* aux transfer failed */
3716
3717         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3718
3719         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3720                 return false; /* DPCD not present */
3721
3722         /* Check if the panel supports PSR */
3723         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3724         if (is_edp(intel_dp)) {
3725                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3726                                         intel_dp->psr_dpcd,
3727                                         sizeof(intel_dp->psr_dpcd));
3728                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3729                         dev_priv->psr.sink_support = true;
3730                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3731                 }
3732         }
3733
3734         /* Training Pattern 3 support, both source and sink */
3735         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3736             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3737             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3738                 intel_dp->use_tps3 = true;
3739                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3740         } else
3741                 intel_dp->use_tps3 = false;
3742
3743         /* Intermediate frequency support */
3744         if (is_edp(intel_dp) &&
3745             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3746             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3747             (rev >= 0x03)) { /* eDp v1.4 or higher */
3748                 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3749                 int i;
3750
3751                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3752                                 DP_SUPPORTED_LINK_RATES,
3753                                 supported_rates,
3754                                 sizeof(supported_rates));
3755
3756                 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3757                         int val = le16_to_cpu(supported_rates[i]);
3758
3759                         if (val == 0)
3760                                 break;
3761
3762                         intel_dp->supported_rates[i] = val * 200;
3763                 }
3764                 intel_dp->num_supported_rates = i;
3765         }
3766         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3767               DP_DWN_STRM_PORT_PRESENT))
3768                 return true; /* native DP sink */
3769
3770         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3771                 return true; /* no per-port downstream info */
3772
3773         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3774                                     intel_dp->downstream_ports,
3775                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3776                 return false; /* downstream port status fetch failed */
3777
3778         return true;
3779 }
3780
3781 static void
3782 intel_dp_probe_oui(struct intel_dp *intel_dp)
3783 {
3784         u8 buf[3];
3785
3786         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3787                 return;
3788
3789         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3790                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3791                               buf[0], buf[1], buf[2]);
3792
3793         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3794                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3795                               buf[0], buf[1], buf[2]);
3796 }
3797
3798 static bool
3799 intel_dp_probe_mst(struct intel_dp *intel_dp)
3800 {
3801         u8 buf[1];
3802
3803         if (!intel_dp->can_mst)
3804                 return false;
3805
3806         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3807                 return false;
3808
3809         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3810                 if (buf[0] & DP_MST_CAP) {
3811                         DRM_DEBUG_KMS("Sink is MST capable\n");
3812                         intel_dp->is_mst = true;
3813                 } else {
3814                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3815                         intel_dp->is_mst = false;
3816                 }
3817         }
3818
3819         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3820         return intel_dp->is_mst;
3821 }
3822
3823 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3824 {
3825         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3826         struct drm_device *dev = intel_dig_port->base.base.dev;
3827         struct intel_crtc *intel_crtc =
3828                 to_intel_crtc(intel_dig_port->base.base.crtc);
3829         u8 buf;
3830         int test_crc_count;
3831         int attempts = 6;
3832
3833         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3834                 return -EIO;
3835
3836         if (!(buf & DP_TEST_CRC_SUPPORTED))
3837                 return -ENOTTY;
3838
3839         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3840                 return -EIO;
3841
3842         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3843                                 buf | DP_TEST_SINK_START) < 0)
3844                 return -EIO;
3845
3846         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3847                 return -EIO;
3848         test_crc_count = buf & DP_TEST_COUNT_MASK;
3849
3850         do {
3851                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3852                                       DP_TEST_SINK_MISC, &buf) < 0)
3853                         return -EIO;
3854                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3855         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3856
3857         if (attempts == 0) {
3858                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3859                 return -ETIMEDOUT;
3860         }
3861
3862         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3863                 return -EIO;
3864
3865         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3866                 return -EIO;
3867         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3868                                buf & ~DP_TEST_SINK_START) < 0)
3869                 return -EIO;
3870
3871         return 0;
3872 }
3873
3874 static bool
3875 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3876 {
3877         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3878                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3879                                        sink_irq_vector, 1) == 1;
3880 }
3881
3882 static bool
3883 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3884 {
3885         int ret;
3886
3887         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3888                                              DP_SINK_COUNT_ESI,
3889                                              sink_irq_vector, 14);
3890         if (ret != 14)
3891                 return false;
3892
3893         return true;
3894 }
3895
3896 static void
3897 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3898 {
3899         /* NAK by default */
3900         drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3901 }
3902
3903 static int
3904 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3905 {
3906         bool bret;
3907
3908         if (intel_dp->is_mst) {
3909                 u8 esi[16] = { 0 };
3910                 int ret = 0;
3911                 int retry;
3912                 bool handled;
3913                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3914 go_again:
3915                 if (bret == true) {
3916
3917                         /* check link status - esi[10] = 0x200c */
3918                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3919                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3920                                 intel_dp_start_link_train(intel_dp);
3921                                 intel_dp_complete_link_train(intel_dp);
3922                                 intel_dp_stop_link_train(intel_dp);
3923                         }
3924
3925                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
3926                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3927
3928                         if (handled) {
3929                                 for (retry = 0; retry < 3; retry++) {
3930                                         int wret;
3931                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3932                                                                  DP_SINK_COUNT_ESI+1,
3933                                                                  &esi[1], 3);
3934                                         if (wret == 3) {
3935                                                 break;
3936                                         }
3937                                 }
3938
3939                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3940                                 if (bret == true) {
3941                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3942                                         goto go_again;
3943                                 }
3944                         } else
3945                                 ret = 0;
3946
3947                         return ret;
3948                 } else {
3949                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3950                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3951                         intel_dp->is_mst = false;
3952                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3953                         /* send a hotplug event */
3954                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3955                 }
3956         }
3957         return -EINVAL;
3958 }
3959
3960 /*
3961  * According to DP spec
3962  * 5.1.2:
3963  *  1. Read DPCD
3964  *  2. Configure link according to Receiver Capabilities
3965  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3966  *  4. Check link status on receipt of hot-plug interrupt
3967  */
3968 static void
3969 intel_dp_check_link_status(struct intel_dp *intel_dp)
3970 {
3971         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3972         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3973         u8 sink_irq_vector;
3974         u8 link_status[DP_LINK_STATUS_SIZE];
3975
3976         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3977
3978         if (!intel_encoder->connectors_active)
3979                 return;
3980
3981         if (WARN_ON(!intel_encoder->base.crtc))
3982                 return;
3983
3984         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3985                 return;
3986
3987         /* Try to read receiver status if the link appears to be up */
3988         if (!intel_dp_get_link_status(intel_dp, link_status)) {
3989                 return;
3990         }
3991
3992         /* Now read the DPCD to see if it's actually running */
3993         if (!intel_dp_get_dpcd(intel_dp)) {
3994                 return;
3995         }
3996
3997         /* Try to read the source of the interrupt */
3998         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3999             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4000                 /* Clear interrupt source */
4001                 drm_dp_dpcd_writeb(&intel_dp->aux,
4002                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4003                                    sink_irq_vector);
4004
4005                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4006                         intel_dp_handle_test_request(intel_dp);
4007                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4008                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4009         }
4010
4011         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4012                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4013                               intel_encoder->base.name);
4014                 intel_dp_start_link_train(intel_dp);
4015                 intel_dp_complete_link_train(intel_dp);
4016                 intel_dp_stop_link_train(intel_dp);
4017         }
4018 }
4019
4020 /* XXX this is probably wrong for multiple downstream ports */
4021 static enum drm_connector_status
4022 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4023 {
4024         uint8_t *dpcd = intel_dp->dpcd;
4025         uint8_t type;
4026
4027         if (!intel_dp_get_dpcd(intel_dp))
4028                 return connector_status_disconnected;
4029
4030         /* if there's no downstream port, we're done */
4031         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4032                 return connector_status_connected;
4033
4034         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4035         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4036             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4037                 uint8_t reg;
4038
4039                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4040                                             &reg, 1) < 0)
4041                         return connector_status_unknown;
4042
4043                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4044                                               : connector_status_disconnected;
4045         }
4046
4047         /* If no HPD, poke DDC gently */
4048         if (drm_probe_ddc(&intel_dp->aux.ddc))
4049                 return connector_status_connected;
4050
4051         /* Well we tried, say unknown for unreliable port types */
4052         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4053                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4054                 if (type == DP_DS_PORT_TYPE_VGA ||
4055                     type == DP_DS_PORT_TYPE_NON_EDID)
4056                         return connector_status_unknown;
4057         } else {
4058                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4059                         DP_DWN_STRM_PORT_TYPE_MASK;
4060                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4061                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4062                         return connector_status_unknown;
4063         }
4064
4065         /* Anything else is out of spec, warn and ignore */
4066         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4067         return connector_status_disconnected;
4068 }
4069
4070 static enum drm_connector_status
4071 edp_detect(struct intel_dp *intel_dp)
4072 {
4073         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4074         enum drm_connector_status status;
4075
4076         status = intel_panel_detect(dev);
4077         if (status == connector_status_unknown)
4078                 status = connector_status_connected;
4079
4080         return status;
4081 }
4082
4083 static enum drm_connector_status
4084 ironlake_dp_detect(struct intel_dp *intel_dp)
4085 {
4086         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4087         struct drm_i915_private *dev_priv = dev->dev_private;
4088         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4089
4090         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4091                 return connector_status_disconnected;
4092
4093         return intel_dp_detect_dpcd(intel_dp);
4094 }
4095
4096 static int g4x_digital_port_connected(struct drm_device *dev,
4097                                        struct intel_digital_port *intel_dig_port)
4098 {
4099         struct drm_i915_private *dev_priv = dev->dev_private;
4100         uint32_t bit;
4101
4102         if (IS_VALLEYVIEW(dev)) {
4103                 switch (intel_dig_port->port) {
4104                 case PORT_B:
4105                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4106                         break;
4107                 case PORT_C:
4108                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4109                         break;
4110                 case PORT_D:
4111                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4112                         break;
4113                 default:
4114                         return -EINVAL;
4115                 }
4116         } else {
4117                 switch (intel_dig_port->port) {
4118                 case PORT_B:
4119                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4120                         break;
4121                 case PORT_C:
4122                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4123                         break;
4124                 case PORT_D:
4125                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4126                         break;
4127                 default:
4128                         return -EINVAL;
4129                 }
4130         }
4131
4132         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4133                 return 0;
4134         return 1;
4135 }
4136
4137 static enum drm_connector_status
4138 g4x_dp_detect(struct intel_dp *intel_dp)
4139 {
4140         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4141         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4142         int ret;
4143
4144         /* Can't disconnect eDP, but you can close the lid... */
4145         if (is_edp(intel_dp)) {
4146                 enum drm_connector_status status;
4147
4148                 status = intel_panel_detect(dev);
4149                 if (status == connector_status_unknown)
4150                         status = connector_status_connected;
4151                 return status;
4152         }
4153
4154         ret = g4x_digital_port_connected(dev, intel_dig_port);
4155         if (ret == -EINVAL)
4156                 return connector_status_unknown;
4157         else if (ret == 0)
4158                 return connector_status_disconnected;
4159
4160         return intel_dp_detect_dpcd(intel_dp);
4161 }
4162
4163 static struct edid *
4164 intel_dp_get_edid(struct intel_dp *intel_dp)
4165 {
4166         struct intel_connector *intel_connector = intel_dp->attached_connector;
4167
4168         /* use cached edid if we have one */
4169         if (intel_connector->edid) {
4170                 /* invalid edid */
4171                 if (IS_ERR(intel_connector->edid))
4172                         return NULL;
4173
4174                 return drm_edid_duplicate(intel_connector->edid);
4175         } else
4176                 return drm_get_edid(&intel_connector->base,
4177                                     &intel_dp->aux.ddc);
4178 }
4179
4180 static void
4181 intel_dp_set_edid(struct intel_dp *intel_dp)
4182 {
4183         struct intel_connector *intel_connector = intel_dp->attached_connector;
4184         struct edid *edid;
4185
4186         edid = intel_dp_get_edid(intel_dp);
4187         intel_connector->detect_edid = edid;
4188
4189         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4190                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4191         else
4192                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4193 }
4194
4195 static void
4196 intel_dp_unset_edid(struct intel_dp *intel_dp)
4197 {
4198         struct intel_connector *intel_connector = intel_dp->attached_connector;
4199
4200         kfree(intel_connector->detect_edid);
4201         intel_connector->detect_edid = NULL;
4202
4203         intel_dp->has_audio = false;
4204 }
4205
4206 static enum intel_display_power_domain
4207 intel_dp_power_get(struct intel_dp *dp)
4208 {
4209         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4210         enum intel_display_power_domain power_domain;
4211
4212         power_domain = intel_display_port_power_domain(encoder);
4213         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4214
4215         return power_domain;
4216 }
4217
4218 static void
4219 intel_dp_power_put(struct intel_dp *dp,
4220                    enum intel_display_power_domain power_domain)
4221 {
4222         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4223         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4224 }
4225
4226 static enum drm_connector_status
4227 intel_dp_detect(struct drm_connector *connector, bool force)
4228 {
4229         struct intel_dp *intel_dp = intel_attached_dp(connector);
4230         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4231         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4232         struct drm_device *dev = connector->dev;
4233         enum drm_connector_status status;
4234         enum intel_display_power_domain power_domain;
4235         bool ret;
4236
4237         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4238                       connector->base.id, connector->name);
4239         intel_dp_unset_edid(intel_dp);
4240
4241         if (intel_dp->is_mst) {
4242                 /* MST devices are disconnected from a monitor POV */
4243                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4244                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4245                 return connector_status_disconnected;
4246         }
4247
4248         power_domain = intel_dp_power_get(intel_dp);
4249
4250         /* Can't disconnect eDP, but you can close the lid... */
4251         if (is_edp(intel_dp))
4252                 status = edp_detect(intel_dp);
4253         else if (HAS_PCH_SPLIT(dev))
4254                 status = ironlake_dp_detect(intel_dp);
4255         else
4256                 status = g4x_dp_detect(intel_dp);
4257         if (status != connector_status_connected)
4258                 goto out;
4259
4260         intel_dp_probe_oui(intel_dp);
4261
4262         ret = intel_dp_probe_mst(intel_dp);
4263         if (ret) {
4264                 /* if we are in MST mode then this connector
4265                    won't appear connected or have anything with EDID on it */
4266                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4267                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4268                 status = connector_status_disconnected;
4269                 goto out;
4270         }
4271
4272         intel_dp_set_edid(intel_dp);
4273
4274         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4275                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4276         status = connector_status_connected;
4277
4278 out:
4279         intel_dp_power_put(intel_dp, power_domain);
4280         return status;
4281 }
4282
4283 static void
4284 intel_dp_force(struct drm_connector *connector)
4285 {
4286         struct intel_dp *intel_dp = intel_attached_dp(connector);
4287         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4288         enum intel_display_power_domain power_domain;
4289
4290         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4291                       connector->base.id, connector->name);
4292         intel_dp_unset_edid(intel_dp);
4293
4294         if (connector->status != connector_status_connected)
4295                 return;
4296
4297         power_domain = intel_dp_power_get(intel_dp);
4298
4299         intel_dp_set_edid(intel_dp);
4300
4301         intel_dp_power_put(intel_dp, power_domain);
4302
4303         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4304                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4305 }
4306
4307 static int intel_dp_get_modes(struct drm_connector *connector)
4308 {
4309         struct intel_connector *intel_connector = to_intel_connector(connector);
4310         struct edid *edid;
4311
4312         edid = intel_connector->detect_edid;
4313         if (edid) {
4314                 int ret = intel_connector_update_modes(connector, edid);
4315                 if (ret)
4316                         return ret;
4317         }
4318
4319         /* if eDP has no EDID, fall back to fixed mode */
4320         if (is_edp(intel_attached_dp(connector)) &&
4321             intel_connector->panel.fixed_mode) {
4322                 struct drm_display_mode *mode;
4323
4324                 mode = drm_mode_duplicate(connector->dev,
4325                                           intel_connector->panel.fixed_mode);
4326                 if (mode) {
4327                         drm_mode_probed_add(connector, mode);
4328                         return 1;
4329                 }
4330         }
4331
4332         return 0;
4333 }
4334
4335 static bool
4336 intel_dp_detect_audio(struct drm_connector *connector)
4337 {
4338         bool has_audio = false;
4339         struct edid *edid;
4340
4341         edid = to_intel_connector(connector)->detect_edid;
4342         if (edid)
4343                 has_audio = drm_detect_monitor_audio(edid);
4344
4345         return has_audio;
4346 }
4347
4348 static int
4349 intel_dp_set_property(struct drm_connector *connector,
4350                       struct drm_property *property,
4351                       uint64_t val)
4352 {
4353         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4354         struct intel_connector *intel_connector = to_intel_connector(connector);
4355         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4356         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4357         int ret;
4358
4359         ret = drm_object_property_set_value(&connector->base, property, val);
4360         if (ret)
4361                 return ret;
4362
4363         if (property == dev_priv->force_audio_property) {
4364                 int i = val;
4365                 bool has_audio;
4366
4367                 if (i == intel_dp->force_audio)
4368                         return 0;
4369
4370                 intel_dp->force_audio = i;
4371
4372                 if (i == HDMI_AUDIO_AUTO)
4373                         has_audio = intel_dp_detect_audio(connector);
4374                 else
4375                         has_audio = (i == HDMI_AUDIO_ON);
4376
4377                 if (has_audio == intel_dp->has_audio)
4378                         return 0;
4379
4380                 intel_dp->has_audio = has_audio;
4381                 goto done;
4382         }
4383
4384         if (property == dev_priv->broadcast_rgb_property) {
4385                 bool old_auto = intel_dp->color_range_auto;
4386                 uint32_t old_range = intel_dp->color_range;
4387
4388                 switch (val) {
4389                 case INTEL_BROADCAST_RGB_AUTO:
4390                         intel_dp->color_range_auto = true;
4391                         break;
4392                 case INTEL_BROADCAST_RGB_FULL:
4393                         intel_dp->color_range_auto = false;
4394                         intel_dp->color_range = 0;
4395                         break;
4396                 case INTEL_BROADCAST_RGB_LIMITED:
4397                         intel_dp->color_range_auto = false;
4398                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4399                         break;
4400                 default:
4401                         return -EINVAL;
4402                 }
4403
4404                 if (old_auto == intel_dp->color_range_auto &&
4405                     old_range == intel_dp->color_range)
4406                         return 0;
4407
4408                 goto done;
4409         }
4410
4411         if (is_edp(intel_dp) &&
4412             property == connector->dev->mode_config.scaling_mode_property) {
4413                 if (val == DRM_MODE_SCALE_NONE) {
4414                         DRM_DEBUG_KMS("no scaling not supported\n");
4415                         return -EINVAL;
4416                 }
4417
4418                 if (intel_connector->panel.fitting_mode == val) {
4419                         /* the eDP scaling property is not changed */
4420                         return 0;
4421                 }
4422                 intel_connector->panel.fitting_mode = val;
4423
4424                 goto done;
4425         }
4426
4427         return -EINVAL;
4428
4429 done:
4430         if (intel_encoder->base.crtc)
4431                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4432
4433         return 0;
4434 }
4435
4436 static void
4437 intel_dp_connector_destroy(struct drm_connector *connector)
4438 {
4439         struct intel_connector *intel_connector = to_intel_connector(connector);
4440
4441         kfree(intel_connector->detect_edid);
4442
4443         if (!IS_ERR_OR_NULL(intel_connector->edid))
4444                 kfree(intel_connector->edid);
4445
4446         /* Can't call is_edp() since the encoder may have been destroyed
4447          * already. */
4448         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4449                 intel_panel_fini(&intel_connector->panel);
4450
4451         drm_connector_cleanup(connector);
4452         kfree(connector);
4453 }
4454
4455 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4456 {
4457         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4458         struct intel_dp *intel_dp = &intel_dig_port->dp;
4459
4460         drm_dp_aux_unregister(&intel_dp->aux);
4461         intel_dp_mst_encoder_cleanup(intel_dig_port);
4462         if (is_edp(intel_dp)) {
4463                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4464                 /*
4465                  * vdd might still be enabled do to the delayed vdd off.
4466                  * Make sure vdd is actually turned off here.
4467                  */
4468                 pps_lock(intel_dp);
4469                 edp_panel_vdd_off_sync(intel_dp);
4470                 pps_unlock(intel_dp);
4471
4472                 if (intel_dp->edp_notifier.notifier_call) {
4473                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4474                         intel_dp->edp_notifier.notifier_call = NULL;
4475                 }
4476         }
4477         drm_encoder_cleanup(encoder);
4478         kfree(intel_dig_port);
4479 }
4480
4481 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4482 {
4483         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4484
4485         if (!is_edp(intel_dp))
4486                 return;
4487
4488         /*
4489          * vdd might still be enabled do to the delayed vdd off.
4490          * Make sure vdd is actually turned off here.
4491          */
4492         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4493         pps_lock(intel_dp);
4494         edp_panel_vdd_off_sync(intel_dp);
4495         pps_unlock(intel_dp);
4496 }
4497
4498 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4499 {
4500         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4501         struct drm_device *dev = intel_dig_port->base.base.dev;
4502         struct drm_i915_private *dev_priv = dev->dev_private;
4503         enum intel_display_power_domain power_domain;
4504
4505         lockdep_assert_held(&dev_priv->pps_mutex);
4506
4507         if (!edp_have_panel_vdd(intel_dp))
4508                 return;
4509
4510         /*
4511          * The VDD bit needs a power domain reference, so if the bit is
4512          * already enabled when we boot or resume, grab this reference and
4513          * schedule a vdd off, so we don't hold on to the reference
4514          * indefinitely.
4515          */
4516         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4517         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4518         intel_display_power_get(dev_priv, power_domain);
4519
4520         edp_panel_vdd_schedule_off(intel_dp);
4521 }
4522
4523 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4524 {
4525         struct intel_dp *intel_dp;
4526
4527         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4528                 return;
4529
4530         intel_dp = enc_to_intel_dp(encoder);
4531
4532         pps_lock(intel_dp);
4533
4534         /*
4535          * Read out the current power sequencer assignment,
4536          * in case the BIOS did something with it.
4537          */
4538         if (IS_VALLEYVIEW(encoder->dev))
4539                 vlv_initial_power_sequencer_setup(intel_dp);
4540
4541         intel_edp_panel_vdd_sanitize(intel_dp);
4542
4543         pps_unlock(intel_dp);
4544 }
4545
4546 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4547         .dpms = intel_connector_dpms,
4548         .detect = intel_dp_detect,
4549         .force = intel_dp_force,
4550         .fill_modes = drm_helper_probe_single_connector_modes,
4551         .set_property = intel_dp_set_property,
4552         .atomic_get_property = intel_connector_atomic_get_property,
4553         .destroy = intel_dp_connector_destroy,
4554         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4555 };
4556
4557 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4558         .get_modes = intel_dp_get_modes,
4559         .mode_valid = intel_dp_mode_valid,
4560         .best_encoder = intel_best_encoder,
4561 };
4562
4563 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4564         .reset = intel_dp_encoder_reset,
4565         .destroy = intel_dp_encoder_destroy,
4566 };
4567
4568 void
4569 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4570 {
4571         return;
4572 }
4573
4574 enum irqreturn
4575 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4576 {
4577         struct intel_dp *intel_dp = &intel_dig_port->dp;
4578         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4579         struct drm_device *dev = intel_dig_port->base.base.dev;
4580         struct drm_i915_private *dev_priv = dev->dev_private;
4581         enum intel_display_power_domain power_domain;
4582         enum irqreturn ret = IRQ_NONE;
4583
4584         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4585                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4586
4587         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4588                 /*
4589                  * vdd off can generate a long pulse on eDP which
4590                  * would require vdd on to handle it, and thus we
4591                  * would end up in an endless cycle of
4592                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4593                  */
4594                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4595                               port_name(intel_dig_port->port));
4596                 return IRQ_HANDLED;
4597         }
4598
4599         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4600                       port_name(intel_dig_port->port),
4601                       long_hpd ? "long" : "short");
4602
4603         power_domain = intel_display_port_power_domain(intel_encoder);
4604         intel_display_power_get(dev_priv, power_domain);
4605
4606         if (long_hpd) {
4607
4608                 if (HAS_PCH_SPLIT(dev)) {
4609                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4610                                 goto mst_fail;
4611                 } else {
4612                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4613                                 goto mst_fail;
4614                 }
4615
4616                 if (!intel_dp_get_dpcd(intel_dp)) {
4617                         goto mst_fail;
4618                 }
4619
4620                 intel_dp_probe_oui(intel_dp);
4621
4622                 if (!intel_dp_probe_mst(intel_dp))
4623                         goto mst_fail;
4624
4625         } else {
4626                 if (intel_dp->is_mst) {
4627                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4628                                 goto mst_fail;
4629                 }
4630
4631                 if (!intel_dp->is_mst) {
4632                         /*
4633                          * we'll check the link status via the normal hot plug path later -
4634                          * but for short hpds we should check it now
4635                          */
4636                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4637                         intel_dp_check_link_status(intel_dp);
4638                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4639                 }
4640         }
4641
4642         ret = IRQ_HANDLED;
4643
4644         goto put_power;
4645 mst_fail:
4646         /* if we were in MST mode, and device is not there get out of MST mode */
4647         if (intel_dp->is_mst) {
4648                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4649                 intel_dp->is_mst = false;
4650                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4651         }
4652 put_power:
4653         intel_display_power_put(dev_priv, power_domain);
4654
4655         return ret;
4656 }
4657
4658 /* Return which DP Port should be selected for Transcoder DP control */
4659 int
4660 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4661 {
4662         struct drm_device *dev = crtc->dev;
4663         struct intel_encoder *intel_encoder;
4664         struct intel_dp *intel_dp;
4665
4666         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4667                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4668
4669                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4670                     intel_encoder->type == INTEL_OUTPUT_EDP)
4671                         return intel_dp->output_reg;
4672         }
4673
4674         return -1;
4675 }
4676
4677 /* check the VBT to see whether the eDP is on DP-D port */
4678 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4679 {
4680         struct drm_i915_private *dev_priv = dev->dev_private;
4681         union child_device_config *p_child;
4682         int i;
4683         static const short port_mapping[] = {
4684                 [PORT_B] = PORT_IDPB,
4685                 [PORT_C] = PORT_IDPC,
4686                 [PORT_D] = PORT_IDPD,
4687         };
4688
4689         if (port == PORT_A)
4690                 return true;
4691
4692         if (!dev_priv->vbt.child_dev_num)
4693                 return false;
4694
4695         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4696                 p_child = dev_priv->vbt.child_dev + i;
4697
4698                 if (p_child->common.dvo_port == port_mapping[port] &&
4699                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4700                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4701                         return true;
4702         }
4703         return false;
4704 }
4705
4706 void
4707 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4708 {
4709         struct intel_connector *intel_connector = to_intel_connector(connector);
4710
4711         intel_attach_force_audio_property(connector);
4712         intel_attach_broadcast_rgb_property(connector);
4713         intel_dp->color_range_auto = true;
4714
4715         if (is_edp(intel_dp)) {
4716                 drm_mode_create_scaling_mode_property(connector->dev);
4717                 drm_object_attach_property(
4718                         &connector->base,
4719                         connector->dev->mode_config.scaling_mode_property,
4720                         DRM_MODE_SCALE_ASPECT);
4721                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4722         }
4723 }
4724
4725 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4726 {
4727         intel_dp->last_power_cycle = jiffies;
4728         intel_dp->last_power_on = jiffies;
4729         intel_dp->last_backlight_off = jiffies;
4730 }
4731
4732 static void
4733 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4734                                     struct intel_dp *intel_dp)
4735 {
4736         struct drm_i915_private *dev_priv = dev->dev_private;
4737         struct edp_power_seq cur, vbt, spec,
4738                 *final = &intel_dp->pps_delays;
4739         u32 pp_on, pp_off, pp_div, pp;
4740         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4741
4742         lockdep_assert_held(&dev_priv->pps_mutex);
4743
4744         /* already initialized? */
4745         if (final->t11_t12 != 0)
4746                 return;
4747
4748         if (HAS_PCH_SPLIT(dev)) {
4749                 pp_ctrl_reg = PCH_PP_CONTROL;
4750                 pp_on_reg = PCH_PP_ON_DELAYS;
4751                 pp_off_reg = PCH_PP_OFF_DELAYS;
4752                 pp_div_reg = PCH_PP_DIVISOR;
4753         } else {
4754                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4755
4756                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4757                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4758                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4759                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4760         }
4761
4762         /* Workaround: Need to write PP_CONTROL with the unlock key as
4763          * the very first thing. */
4764         pp = ironlake_get_pp_control(intel_dp);
4765         I915_WRITE(pp_ctrl_reg, pp);
4766
4767         pp_on = I915_READ(pp_on_reg);
4768         pp_off = I915_READ(pp_off_reg);
4769         pp_div = I915_READ(pp_div_reg);
4770
4771         /* Pull timing values out of registers */
4772         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4773                 PANEL_POWER_UP_DELAY_SHIFT;
4774
4775         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4776                 PANEL_LIGHT_ON_DELAY_SHIFT;
4777
4778         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4779                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4780
4781         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4782                 PANEL_POWER_DOWN_DELAY_SHIFT;
4783
4784         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4785                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4786
4787         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4788                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4789
4790         vbt = dev_priv->vbt.edp_pps;
4791
4792         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4793          * our hw here, which are all in 100usec. */
4794         spec.t1_t3 = 210 * 10;
4795         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4796         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4797         spec.t10 = 500 * 10;
4798         /* This one is special and actually in units of 100ms, but zero
4799          * based in the hw (so we need to add 100 ms). But the sw vbt
4800          * table multiplies it with 1000 to make it in units of 100usec,
4801          * too. */
4802         spec.t11_t12 = (510 + 100) * 10;
4803
4804         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4805                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4806
4807         /* Use the max of the register settings and vbt. If both are
4808          * unset, fall back to the spec limits. */
4809 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4810                                        spec.field : \
4811                                        max(cur.field, vbt.field))
4812         assign_final(t1_t3);
4813         assign_final(t8);
4814         assign_final(t9);
4815         assign_final(t10);
4816         assign_final(t11_t12);
4817 #undef assign_final
4818
4819 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4820         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4821         intel_dp->backlight_on_delay = get_delay(t8);
4822         intel_dp->backlight_off_delay = get_delay(t9);
4823         intel_dp->panel_power_down_delay = get_delay(t10);
4824         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4825 #undef get_delay
4826
4827         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4828                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4829                       intel_dp->panel_power_cycle_delay);
4830
4831         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4832                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4833 }
4834
4835 static void
4836 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4837                                               struct intel_dp *intel_dp)
4838 {
4839         struct drm_i915_private *dev_priv = dev->dev_private;
4840         u32 pp_on, pp_off, pp_div, port_sel = 0;
4841         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4842         int pp_on_reg, pp_off_reg, pp_div_reg;
4843         enum port port = dp_to_dig_port(intel_dp)->port;
4844         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4845
4846         lockdep_assert_held(&dev_priv->pps_mutex);
4847
4848         if (HAS_PCH_SPLIT(dev)) {
4849                 pp_on_reg = PCH_PP_ON_DELAYS;
4850                 pp_off_reg = PCH_PP_OFF_DELAYS;
4851                 pp_div_reg = PCH_PP_DIVISOR;
4852         } else {
4853                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4854
4855                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4856                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4857                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4858         }
4859
4860         /*
4861          * And finally store the new values in the power sequencer. The
4862          * backlight delays are set to 1 because we do manual waits on them. For
4863          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4864          * we'll end up waiting for the backlight off delay twice: once when we
4865          * do the manual sleep, and once when we disable the panel and wait for
4866          * the PP_STATUS bit to become zero.
4867          */
4868         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4869                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4870         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4871                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4872         /* Compute the divisor for the pp clock, simply match the Bspec
4873          * formula. */
4874         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4875         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4876                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
4877
4878         /* Haswell doesn't have any port selection bits for the panel
4879          * power sequencer any more. */
4880         if (IS_VALLEYVIEW(dev)) {
4881                 port_sel = PANEL_PORT_SELECT_VLV(port);
4882         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4883                 if (port == PORT_A)
4884                         port_sel = PANEL_PORT_SELECT_DPA;
4885                 else
4886                         port_sel = PANEL_PORT_SELECT_DPD;
4887         }
4888
4889         pp_on |= port_sel;
4890
4891         I915_WRITE(pp_on_reg, pp_on);
4892         I915_WRITE(pp_off_reg, pp_off);
4893         I915_WRITE(pp_div_reg, pp_div);
4894
4895         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4896                       I915_READ(pp_on_reg),
4897                       I915_READ(pp_off_reg),
4898                       I915_READ(pp_div_reg));
4899 }
4900
4901 /**
4902  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4903  * @dev: DRM device
4904  * @refresh_rate: RR to be programmed
4905  *
4906  * This function gets called when refresh rate (RR) has to be changed from
4907  * one frequency to another. Switches can be between high and low RR
4908  * supported by the panel or to any other RR based on media playback (in
4909  * this case, RR value needs to be passed from user space).
4910  *
4911  * The caller of this function needs to take a lock on dev_priv->drrs.
4912  */
4913 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4914 {
4915         struct drm_i915_private *dev_priv = dev->dev_private;
4916         struct intel_encoder *encoder;
4917         struct intel_digital_port *dig_port = NULL;
4918         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4919         struct intel_crtc_state *config = NULL;
4920         struct intel_crtc *intel_crtc = NULL;
4921         u32 reg, val;
4922         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4923
4924         if (refresh_rate <= 0) {
4925                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4926                 return;
4927         }
4928
4929         if (intel_dp == NULL) {
4930                 DRM_DEBUG_KMS("DRRS not supported.\n");
4931                 return;
4932         }
4933
4934         /*
4935          * FIXME: This needs proper synchronization with psr state for some
4936          * platforms that cannot have PSR and DRRS enabled at the same time.
4937          */
4938
4939         dig_port = dp_to_dig_port(intel_dp);
4940         encoder = &dig_port->base;
4941         intel_crtc = encoder->new_crtc;
4942
4943         if (!intel_crtc) {
4944                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4945                 return;
4946         }
4947
4948         config = intel_crtc->config;
4949
4950         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4951                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4952                 return;
4953         }
4954
4955         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4956                         refresh_rate)
4957                 index = DRRS_LOW_RR;
4958
4959         if (index == dev_priv->drrs.refresh_rate_type) {
4960                 DRM_DEBUG_KMS(
4961                         "DRRS requested for previously set RR...ignoring\n");
4962                 return;
4963         }
4964
4965         if (!intel_crtc->active) {
4966                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4967                 return;
4968         }
4969
4970         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4971                 switch (index) {
4972                 case DRRS_HIGH_RR:
4973                         intel_dp_set_m_n(intel_crtc, M1_N1);
4974                         break;
4975                 case DRRS_LOW_RR:
4976                         intel_dp_set_m_n(intel_crtc, M2_N2);
4977                         break;
4978                 case DRRS_MAX_RR:
4979                 default:
4980                         DRM_ERROR("Unsupported refreshrate type\n");
4981                 }
4982         } else if (INTEL_INFO(dev)->gen > 6) {
4983                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4984                 val = I915_READ(reg);
4985
4986                 if (index > DRRS_HIGH_RR) {
4987                         if (IS_VALLEYVIEW(dev))
4988                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4989                         else
4990                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4991                 } else {
4992                         if (IS_VALLEYVIEW(dev))
4993                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4994                         else
4995                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4996                 }
4997                 I915_WRITE(reg, val);
4998         }
4999
5000         dev_priv->drrs.refresh_rate_type = index;
5001
5002         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5003 }
5004
5005 /**
5006  * intel_edp_drrs_enable - init drrs struct if supported
5007  * @intel_dp: DP struct
5008  *
5009  * Initializes frontbuffer_bits and drrs.dp
5010  */
5011 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5012 {
5013         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5014         struct drm_i915_private *dev_priv = dev->dev_private;
5015         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5016         struct drm_crtc *crtc = dig_port->base.base.crtc;
5017         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5018
5019         if (!intel_crtc->config->has_drrs) {
5020                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5021                 return;
5022         }
5023
5024         mutex_lock(&dev_priv->drrs.mutex);
5025         if (WARN_ON(dev_priv->drrs.dp)) {
5026                 DRM_ERROR("DRRS already enabled\n");
5027                 goto unlock;
5028         }
5029
5030         dev_priv->drrs.busy_frontbuffer_bits = 0;
5031
5032         dev_priv->drrs.dp = intel_dp;
5033
5034 unlock:
5035         mutex_unlock(&dev_priv->drrs.mutex);
5036 }
5037
5038 /**
5039  * intel_edp_drrs_disable - Disable DRRS
5040  * @intel_dp: DP struct
5041  *
5042  */
5043 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5044 {
5045         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5046         struct drm_i915_private *dev_priv = dev->dev_private;
5047         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5048         struct drm_crtc *crtc = dig_port->base.base.crtc;
5049         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5050
5051         if (!intel_crtc->config->has_drrs)
5052                 return;
5053
5054         mutex_lock(&dev_priv->drrs.mutex);
5055         if (!dev_priv->drrs.dp) {
5056                 mutex_unlock(&dev_priv->drrs.mutex);
5057                 return;
5058         }
5059
5060         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5061                 intel_dp_set_drrs_state(dev_priv->dev,
5062                         intel_dp->attached_connector->panel.
5063                         fixed_mode->vrefresh);
5064
5065         dev_priv->drrs.dp = NULL;
5066         mutex_unlock(&dev_priv->drrs.mutex);
5067
5068         cancel_delayed_work_sync(&dev_priv->drrs.work);
5069 }
5070
5071 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5072 {
5073         struct drm_i915_private *dev_priv =
5074                 container_of(work, typeof(*dev_priv), drrs.work.work);
5075         struct intel_dp *intel_dp;
5076
5077         mutex_lock(&dev_priv->drrs.mutex);
5078
5079         intel_dp = dev_priv->drrs.dp;
5080
5081         if (!intel_dp)
5082                 goto unlock;
5083
5084         /*
5085          * The delayed work can race with an invalidate hence we need to
5086          * recheck.
5087          */
5088
5089         if (dev_priv->drrs.busy_frontbuffer_bits)
5090                 goto unlock;
5091
5092         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5093                 intel_dp_set_drrs_state(dev_priv->dev,
5094                         intel_dp->attached_connector->panel.
5095                         downclock_mode->vrefresh);
5096
5097 unlock:
5098
5099         mutex_unlock(&dev_priv->drrs.mutex);
5100 }
5101
5102 /**
5103  * intel_edp_drrs_invalidate - Invalidate DRRS
5104  * @dev: DRM device
5105  * @frontbuffer_bits: frontbuffer plane tracking bits
5106  *
5107  * When there is a disturbance on screen (due to cursor movement/time
5108  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5109  * high RR.
5110  *
5111  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5112  */
5113 void intel_edp_drrs_invalidate(struct drm_device *dev,
5114                 unsigned frontbuffer_bits)
5115 {
5116         struct drm_i915_private *dev_priv = dev->dev_private;
5117         struct drm_crtc *crtc;
5118         enum pipe pipe;
5119
5120         if (!dev_priv->drrs.dp)
5121                 return;
5122
5123         cancel_delayed_work_sync(&dev_priv->drrs.work);
5124
5125         mutex_lock(&dev_priv->drrs.mutex);
5126         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5127         pipe = to_intel_crtc(crtc)->pipe;
5128
5129         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5130                 intel_dp_set_drrs_state(dev_priv->dev,
5131                                 dev_priv->drrs.dp->attached_connector->panel.
5132                                 fixed_mode->vrefresh);
5133         }
5134
5135         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5136
5137         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5138         mutex_unlock(&dev_priv->drrs.mutex);
5139 }
5140
5141 /**
5142  * intel_edp_drrs_flush - Flush DRRS
5143  * @dev: DRM device
5144  * @frontbuffer_bits: frontbuffer plane tracking bits
5145  *
5146  * When there is no movement on screen, DRRS work can be scheduled.
5147  * This DRRS work is responsible for setting relevant registers after a
5148  * timeout of 1 second.
5149  *
5150  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5151  */
5152 void intel_edp_drrs_flush(struct drm_device *dev,
5153                 unsigned frontbuffer_bits)
5154 {
5155         struct drm_i915_private *dev_priv = dev->dev_private;
5156         struct drm_crtc *crtc;
5157         enum pipe pipe;
5158
5159         if (!dev_priv->drrs.dp)
5160                 return;
5161
5162         cancel_delayed_work_sync(&dev_priv->drrs.work);
5163
5164         mutex_lock(&dev_priv->drrs.mutex);
5165         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5166         pipe = to_intel_crtc(crtc)->pipe;
5167         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5168
5169         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5170                         !dev_priv->drrs.busy_frontbuffer_bits)
5171                 schedule_delayed_work(&dev_priv->drrs.work,
5172                                 msecs_to_jiffies(1000));
5173         mutex_unlock(&dev_priv->drrs.mutex);
5174 }
5175
5176 /**
5177  * DOC: Display Refresh Rate Switching (DRRS)
5178  *
5179  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5180  * which enables swtching between low and high refresh rates,
5181  * dynamically, based on the usage scenario. This feature is applicable
5182  * for internal panels.
5183  *
5184  * Indication that the panel supports DRRS is given by the panel EDID, which
5185  * would list multiple refresh rates for one resolution.
5186  *
5187  * DRRS is of 2 types - static and seamless.
5188  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5189  * (may appear as a blink on screen) and is used in dock-undock scenario.
5190  * Seamless DRRS involves changing RR without any visual effect to the user
5191  * and can be used during normal system usage. This is done by programming
5192  * certain registers.
5193  *
5194  * Support for static/seamless DRRS may be indicated in the VBT based on
5195  * inputs from the panel spec.
5196  *
5197  * DRRS saves power by switching to low RR based on usage scenarios.
5198  *
5199  * eDP DRRS:-
5200  *        The implementation is based on frontbuffer tracking implementation.
5201  * When there is a disturbance on the screen triggered by user activity or a
5202  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5203  * When there is no movement on screen, after a timeout of 1 second, a switch
5204  * to low RR is made.
5205  *        For integration with frontbuffer tracking code,
5206  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5207  *
5208  * DRRS can be further extended to support other internal panels and also
5209  * the scenario of video playback wherein RR is set based on the rate
5210  * requested by userspace.
5211  */
5212
5213 /**
5214  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5215  * @intel_connector: eDP connector
5216  * @fixed_mode: preferred mode of panel
5217  *
5218  * This function is  called only once at driver load to initialize basic
5219  * DRRS stuff.
5220  *
5221  * Returns:
5222  * Downclock mode if panel supports it, else return NULL.
5223  * DRRS support is determined by the presence of downclock mode (apart
5224  * from VBT setting).
5225  */
5226 static struct drm_display_mode *
5227 intel_dp_drrs_init(struct intel_connector *intel_connector,
5228                 struct drm_display_mode *fixed_mode)
5229 {
5230         struct drm_connector *connector = &intel_connector->base;
5231         struct drm_device *dev = connector->dev;
5232         struct drm_i915_private *dev_priv = dev->dev_private;
5233         struct drm_display_mode *downclock_mode = NULL;
5234
5235         if (INTEL_INFO(dev)->gen <= 6) {
5236                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5237                 return NULL;
5238         }
5239
5240         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5241                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5242                 return NULL;
5243         }
5244
5245         downclock_mode = intel_find_panel_downclock
5246                                         (dev, fixed_mode, connector);
5247
5248         if (!downclock_mode) {
5249                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5250                 return NULL;
5251         }
5252
5253         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5254
5255         mutex_init(&dev_priv->drrs.mutex);
5256
5257         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5258
5259         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5260         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5261         return downclock_mode;
5262 }
5263
5264 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5265                                      struct intel_connector *intel_connector)
5266 {
5267         struct drm_connector *connector = &intel_connector->base;
5268         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5269         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5270         struct drm_device *dev = intel_encoder->base.dev;
5271         struct drm_i915_private *dev_priv = dev->dev_private;
5272         struct drm_display_mode *fixed_mode = NULL;
5273         struct drm_display_mode *downclock_mode = NULL;
5274         bool has_dpcd;
5275         struct drm_display_mode *scan;
5276         struct edid *edid;
5277         enum pipe pipe = INVALID_PIPE;
5278
5279         dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
5280
5281         if (!is_edp(intel_dp))
5282                 return true;
5283
5284         pps_lock(intel_dp);
5285         intel_edp_panel_vdd_sanitize(intel_dp);
5286         pps_unlock(intel_dp);
5287
5288         /* Cache DPCD and EDID for edp. */
5289         has_dpcd = intel_dp_get_dpcd(intel_dp);
5290
5291         if (has_dpcd) {
5292                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5293                         dev_priv->no_aux_handshake =
5294                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5295                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5296         } else {
5297                 /* if this fails, presume the device is a ghost */
5298                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5299                 return false;
5300         }
5301
5302         /* We now know it's not a ghost, init power sequence regs. */
5303         pps_lock(intel_dp);
5304         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5305         pps_unlock(intel_dp);
5306
5307         mutex_lock(&dev->mode_config.mutex);
5308         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5309         if (edid) {
5310                 if (drm_add_edid_modes(connector, edid)) {
5311                         drm_mode_connector_update_edid_property(connector,
5312                                                                 edid);
5313                         drm_edid_to_eld(connector, edid);
5314                 } else {
5315                         kfree(edid);
5316                         edid = ERR_PTR(-EINVAL);
5317                 }
5318         } else {
5319                 edid = ERR_PTR(-ENOENT);
5320         }
5321         intel_connector->edid = edid;
5322
5323         /* prefer fixed mode from EDID if available */
5324         list_for_each_entry(scan, &connector->probed_modes, head) {
5325                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5326                         fixed_mode = drm_mode_duplicate(dev, scan);
5327                         downclock_mode = intel_dp_drrs_init(
5328                                                 intel_connector, fixed_mode);
5329                         break;
5330                 }
5331         }
5332
5333         /* fallback to VBT if available for eDP */
5334         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5335                 fixed_mode = drm_mode_duplicate(dev,
5336                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5337                 if (fixed_mode)
5338                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5339         }
5340         mutex_unlock(&dev->mode_config.mutex);
5341
5342         if (IS_VALLEYVIEW(dev)) {
5343                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5344                 register_reboot_notifier(&intel_dp->edp_notifier);
5345
5346                 /*
5347                  * Figure out the current pipe for the initial backlight setup.
5348                  * If the current pipe isn't valid, try the PPS pipe, and if that
5349                  * fails just assume pipe A.
5350                  */
5351                 if (IS_CHERRYVIEW(dev))
5352                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5353                 else
5354                         pipe = PORT_TO_PIPE(intel_dp->DP);
5355
5356                 if (pipe != PIPE_A && pipe != PIPE_B)
5357                         pipe = intel_dp->pps_pipe;
5358
5359                 if (pipe != PIPE_A && pipe != PIPE_B)
5360                         pipe = PIPE_A;
5361
5362                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5363                               pipe_name(pipe));
5364         }
5365
5366         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5367         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5368         intel_panel_setup_backlight(connector, pipe);
5369
5370         return true;
5371 }
5372
5373 bool
5374 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5375                         struct intel_connector *intel_connector)
5376 {
5377         struct drm_connector *connector = &intel_connector->base;
5378         struct intel_dp *intel_dp = &intel_dig_port->dp;
5379         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5380         struct drm_device *dev = intel_encoder->base.dev;
5381         struct drm_i915_private *dev_priv = dev->dev_private;
5382         enum port port = intel_dig_port->port;
5383         int type;
5384
5385         intel_dp->pps_pipe = INVALID_PIPE;
5386
5387         /* intel_dp vfuncs */
5388         if (INTEL_INFO(dev)->gen >= 9)
5389                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5390         else if (IS_VALLEYVIEW(dev))
5391                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5392         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5393                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5394         else if (HAS_PCH_SPLIT(dev))
5395                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5396         else
5397                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5398
5399         if (INTEL_INFO(dev)->gen >= 9)
5400                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5401         else
5402                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5403
5404         /* Preserve the current hw state. */
5405         intel_dp->DP = I915_READ(intel_dp->output_reg);
5406         intel_dp->attached_connector = intel_connector;
5407
5408         if (intel_dp_is_edp(dev, port))
5409                 type = DRM_MODE_CONNECTOR_eDP;
5410         else
5411                 type = DRM_MODE_CONNECTOR_DisplayPort;
5412
5413         /*
5414          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5415          * for DP the encoder type can be set by the caller to
5416          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5417          */
5418         if (type == DRM_MODE_CONNECTOR_eDP)
5419                 intel_encoder->type = INTEL_OUTPUT_EDP;
5420
5421         /* eDP only on port B and/or C on vlv/chv */
5422         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5423                     port != PORT_B && port != PORT_C))
5424                 return false;
5425
5426         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5427                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5428                         port_name(port));
5429
5430         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5431         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5432
5433         connector->interlace_allowed = true;
5434         connector->doublescan_allowed = 0;
5435
5436         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5437                           edp_panel_vdd_work);
5438
5439         intel_connector_attach_encoder(intel_connector, intel_encoder);
5440         drm_connector_register(connector);
5441
5442         if (HAS_DDI(dev))
5443                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5444         else
5445                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5446         intel_connector->unregister = intel_dp_connector_unregister;
5447
5448         /* Set up the hotplug pin. */
5449         switch (port) {
5450         case PORT_A:
5451                 intel_encoder->hpd_pin = HPD_PORT_A;
5452                 break;
5453         case PORT_B:
5454                 intel_encoder->hpd_pin = HPD_PORT_B;
5455                 break;
5456         case PORT_C:
5457                 intel_encoder->hpd_pin = HPD_PORT_C;
5458                 break;
5459         case PORT_D:
5460                 intel_encoder->hpd_pin = HPD_PORT_D;
5461                 break;
5462         default:
5463                 BUG();
5464         }
5465
5466         if (is_edp(intel_dp)) {
5467                 pps_lock(intel_dp);
5468                 intel_dp_init_panel_power_timestamps(intel_dp);
5469                 if (IS_VALLEYVIEW(dev))
5470                         vlv_initial_power_sequencer_setup(intel_dp);
5471                 else
5472                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5473                 pps_unlock(intel_dp);
5474         }
5475
5476         intel_dp_aux_init(intel_dp, intel_connector);
5477
5478         /* init MST on ports that can support it */
5479         if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5480                 if (port == PORT_B || port == PORT_C || port == PORT_D) {
5481                         intel_dp_mst_encoder_init(intel_dig_port,
5482                                                   intel_connector->base.base.id);
5483                 }
5484         }
5485
5486         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5487                 drm_dp_aux_unregister(&intel_dp->aux);
5488                 if (is_edp(intel_dp)) {
5489                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5490                         /*
5491                          * vdd might still be enabled do to the delayed vdd off.
5492                          * Make sure vdd is actually turned off here.
5493                          */
5494                         pps_lock(intel_dp);
5495                         edp_panel_vdd_off_sync(intel_dp);
5496                         pps_unlock(intel_dp);
5497                 }
5498                 drm_connector_unregister(connector);
5499                 drm_connector_cleanup(connector);
5500                 return false;
5501         }
5502
5503         intel_dp_add_properties(intel_dp, connector);
5504
5505         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5506          * 0xd.  Failure to do so will result in spurious interrupts being
5507          * generated on the port when a cable is not attached.
5508          */
5509         if (IS_G4X(dev) && !IS_GM45(dev)) {
5510                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5511                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5512         }
5513
5514         return true;
5515 }
5516
5517 void
5518 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5519 {
5520         struct drm_i915_private *dev_priv = dev->dev_private;
5521         struct intel_digital_port *intel_dig_port;
5522         struct intel_encoder *intel_encoder;
5523         struct drm_encoder *encoder;
5524         struct intel_connector *intel_connector;
5525
5526         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5527         if (!intel_dig_port)
5528                 return;
5529
5530         intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
5531         if (!intel_connector) {
5532                 kfree(intel_dig_port);
5533                 return;
5534         }
5535
5536         intel_encoder = &intel_dig_port->base;
5537         encoder = &intel_encoder->base;
5538
5539         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5540                          DRM_MODE_ENCODER_TMDS);
5541
5542         intel_encoder->compute_config = intel_dp_compute_config;
5543         intel_encoder->disable = intel_disable_dp;
5544         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5545         intel_encoder->get_config = intel_dp_get_config;
5546         intel_encoder->suspend = intel_dp_encoder_suspend;
5547         if (IS_CHERRYVIEW(dev)) {
5548                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5549                 intel_encoder->pre_enable = chv_pre_enable_dp;
5550                 intel_encoder->enable = vlv_enable_dp;
5551                 intel_encoder->post_disable = chv_post_disable_dp;
5552         } else if (IS_VALLEYVIEW(dev)) {
5553                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5554                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5555                 intel_encoder->enable = vlv_enable_dp;
5556                 intel_encoder->post_disable = vlv_post_disable_dp;
5557         } else {
5558                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5559                 intel_encoder->enable = g4x_enable_dp;
5560                 if (INTEL_INFO(dev)->gen >= 5)
5561                         intel_encoder->post_disable = ilk_post_disable_dp;
5562         }
5563
5564         intel_dig_port->port = port;
5565         intel_dig_port->dp.output_reg = output_reg;
5566
5567         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5568         if (IS_CHERRYVIEW(dev)) {
5569                 if (port == PORT_D)
5570                         intel_encoder->crtc_mask = 1 << 2;
5571                 else
5572                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5573         } else {
5574                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5575         }
5576         intel_encoder->cloneable = 0;
5577         intel_encoder->hot_plug = intel_dp_hot_plug;
5578
5579         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5580         dev_priv->hpd_irq_port[port] = intel_dig_port;
5581
5582         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5583                 drm_encoder_cleanup(encoder);
5584                 kfree(intel_dig_port);
5585                 kfree(intel_connector);
5586         }
5587 }
5588
5589 void intel_dp_mst_suspend(struct drm_device *dev)
5590 {
5591         struct drm_i915_private *dev_priv = dev->dev_private;
5592         int i;
5593
5594         /* disable MST */
5595         for (i = 0; i < I915_MAX_PORTS; i++) {
5596                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5597                 if (!intel_dig_port)
5598                         continue;
5599
5600                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5601                         if (!intel_dig_port->dp.can_mst)
5602                                 continue;
5603                         if (intel_dig_port->dp.is_mst)
5604                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5605                 }
5606         }
5607 }
5608
5609 void intel_dp_mst_resume(struct drm_device *dev)
5610 {
5611         struct drm_i915_private *dev_priv = dev->dev_private;
5612         int i;
5613
5614         for (i = 0; i < I915_MAX_PORTS; i++) {
5615                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5616                 if (!intel_dig_port)
5617                         continue;
5618                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5619                         int ret;
5620
5621                         if (!intel_dig_port->dp.can_mst)
5622                                 continue;
5623
5624                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5625                         if (ret != 0) {
5626                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5627                         }
5628                 }
5629         }
5630 }