intel_pstate: fix PCT_TO_HWP macro
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int link_bw;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { DP_LINK_BW_1_62,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { DP_LINK_BW_2_7,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { DP_LINK_BW_1_62,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { DP_LINK_BW_2_7,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { DP_LINK_BW_1_62,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { DP_LINK_BW_2_7,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { DP_LINK_BW_1_62,      /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { DP_LINK_BW_2_7,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { DP_LINK_BW_5_4,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int skl_rates[] = { 162000, 216000, 270000,
95                                   324000, 432000, 540000 };
96 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97                                  243000, 270000, 324000, 405000,
98                                  420000, 432000, 540000 };
99 static const int default_rates[] = { 162000, 270000, 540000 };
100
101 /**
102  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103  * @intel_dp: DP struct
104  *
105  * If a CPU or PCH DP output is attached to an eDP panel, this function
106  * will return true, and false otherwise.
107  */
108 static bool is_edp(struct intel_dp *intel_dp)
109 {
110         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111
112         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
113 }
114
115 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 {
117         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118
119         return intel_dig_port->base.base.dev;
120 }
121
122 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 {
124         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
125 }
126
127 static void intel_dp_link_down(struct intel_dp *intel_dp);
128 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
129 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
130 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
131 static void vlv_steal_power_sequencer(struct drm_device *dev,
132                                       enum pipe pipe);
133
134 static int
135 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
136 {
137         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138
139         switch (max_link_bw) {
140         case DP_LINK_BW_1_62:
141         case DP_LINK_BW_2_7:
142         case DP_LINK_BW_5_4:
143                 break;
144         default:
145                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146                      max_link_bw);
147                 max_link_bw = DP_LINK_BW_1_62;
148                 break;
149         }
150         return max_link_bw;
151 }
152
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156         struct drm_device *dev = intel_dig_port->base.base.dev;
157         u8 source_max, sink_max;
158
159         source_max = 4;
160         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162                 source_max = 2;
163
164         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165
166         return min(source_max, sink_max);
167 }
168
169 /*
170  * The units on the numbers in the next two are... bizarre.  Examples will
171  * make it clearer; this one parallels an example in the eDP spec.
172  *
173  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174  *
175  *     270000 * 1 * 8 / 10 == 216000
176  *
177  * The actual data capacity of that configuration is 2.16Gbit/s, so the
178  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
179  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180  * 119000.  At 18bpp that's 2142000 kilobits per second.
181  *
182  * Thus the strange-looking division by 10 in intel_dp_link_required, to
183  * get the result in decakilobits instead of kilobits.
184  */
185
186 static int
187 intel_dp_link_required(int pixel_clock, int bpp)
188 {
189         return (pixel_clock * bpp + 9) / 10;
190 }
191
192 static int
193 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 {
195         return (max_link_clock * max_lanes * 8) / 10;
196 }
197
198 static enum drm_mode_status
199 intel_dp_mode_valid(struct drm_connector *connector,
200                     struct drm_display_mode *mode)
201 {
202         struct intel_dp *intel_dp = intel_attached_dp(connector);
203         struct intel_connector *intel_connector = to_intel_connector(connector);
204         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
205         int target_clock = mode->clock;
206         int max_rate, mode_rate, max_lanes, max_link_clock;
207
208         if (is_edp(intel_dp) && fixed_mode) {
209                 if (mode->hdisplay > fixed_mode->hdisplay)
210                         return MODE_PANEL;
211
212                 if (mode->vdisplay > fixed_mode->vdisplay)
213                         return MODE_PANEL;
214
215                 target_clock = fixed_mode->clock;
216         }
217
218         max_link_clock = intel_dp_max_link_rate(intel_dp);
219         max_lanes = intel_dp_max_lane_count(intel_dp);
220
221         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222         mode_rate = intel_dp_link_required(target_clock, 18);
223
224         if (mode_rate > max_rate)
225                 return MODE_CLOCK_HIGH;
226
227         if (mode->clock < 10000)
228                 return MODE_CLOCK_LOW;
229
230         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231                 return MODE_H_ILLEGAL;
232
233         return MODE_OK;
234 }
235
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 {
238         int     i;
239         uint32_t v = 0;
240
241         if (src_bytes > 4)
242                 src_bytes = 4;
243         for (i = 0; i < src_bytes; i++)
244                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245         return v;
246 }
247
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
249 {
250         int i;
251         if (dst_bytes > 4)
252                 dst_bytes = 4;
253         for (i = 0; i < dst_bytes; i++)
254                 dst[i] = src >> ((3-i) * 8);
255 }
256
257 /* hrawclock is 1/4 the FSB frequency */
258 static int
259 intel_hrawclk(struct drm_device *dev)
260 {
261         struct drm_i915_private *dev_priv = dev->dev_private;
262         uint32_t clkcfg;
263
264         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265         if (IS_VALLEYVIEW(dev))
266                 return 200;
267
268         clkcfg = I915_READ(CLKCFG);
269         switch (clkcfg & CLKCFG_FSB_MASK) {
270         case CLKCFG_FSB_400:
271                 return 100;
272         case CLKCFG_FSB_533:
273                 return 133;
274         case CLKCFG_FSB_667:
275                 return 166;
276         case CLKCFG_FSB_800:
277                 return 200;
278         case CLKCFG_FSB_1067:
279                 return 266;
280         case CLKCFG_FSB_1333:
281                 return 333;
282         /* these two are just a guess; one of them might be right */
283         case CLKCFG_FSB_1600:
284         case CLKCFG_FSB_1600_ALT:
285                 return 400;
286         default:
287                 return 133;
288         }
289 }
290
291 static void
292 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
293                                     struct intel_dp *intel_dp);
294 static void
295 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
296                                               struct intel_dp *intel_dp);
297
298 static void pps_lock(struct intel_dp *intel_dp)
299 {
300         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301         struct intel_encoder *encoder = &intel_dig_port->base;
302         struct drm_device *dev = encoder->base.dev;
303         struct drm_i915_private *dev_priv = dev->dev_private;
304         enum intel_display_power_domain power_domain;
305
306         /*
307          * See vlv_power_sequencer_reset() why we need
308          * a power domain reference here.
309          */
310         power_domain = intel_display_port_power_domain(encoder);
311         intel_display_power_get(dev_priv, power_domain);
312
313         mutex_lock(&dev_priv->pps_mutex);
314 }
315
316 static void pps_unlock(struct intel_dp *intel_dp)
317 {
318         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319         struct intel_encoder *encoder = &intel_dig_port->base;
320         struct drm_device *dev = encoder->base.dev;
321         struct drm_i915_private *dev_priv = dev->dev_private;
322         enum intel_display_power_domain power_domain;
323
324         mutex_unlock(&dev_priv->pps_mutex);
325
326         power_domain = intel_display_port_power_domain(encoder);
327         intel_display_power_put(dev_priv, power_domain);
328 }
329
330 static void
331 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332 {
333         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334         struct drm_device *dev = intel_dig_port->base.base.dev;
335         struct drm_i915_private *dev_priv = dev->dev_private;
336         enum pipe pipe = intel_dp->pps_pipe;
337         bool pll_enabled;
338         uint32_t DP;
339
340         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342                  pipe_name(pipe), port_name(intel_dig_port->port)))
343                 return;
344
345         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346                       pipe_name(pipe), port_name(intel_dig_port->port));
347
348         /* Preserve the BIOS-computed detected bit. This is
349          * supposed to be read-only.
350          */
351         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353         DP |= DP_PORT_WIDTH(1);
354         DP |= DP_LINK_TRAIN_PAT_1;
355
356         if (IS_CHERRYVIEW(dev))
357                 DP |= DP_PIPE_SELECT_CHV(pipe);
358         else if (pipe == PIPE_B)
359                 DP |= DP_PIPEB_SELECT;
360
361         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362
363         /*
364          * The DPLL for the pipe must be enabled for this to work.
365          * So enable temporarily it if it's not already enabled.
366          */
367         if (!pll_enabled)
368                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370
371         /*
372          * Similar magic as in intel_dp_enable_port().
373          * We _must_ do this port enable + disable trick
374          * to make this power seqeuencer lock onto the port.
375          * Otherwise even VDD force bit won't work.
376          */
377         I915_WRITE(intel_dp->output_reg, DP);
378         POSTING_READ(intel_dp->output_reg);
379
380         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381         POSTING_READ(intel_dp->output_reg);
382
383         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384         POSTING_READ(intel_dp->output_reg);
385
386         if (!pll_enabled)
387                 vlv_force_pll_off(dev, pipe);
388 }
389
390 static enum pipe
391 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392 {
393         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
394         struct drm_device *dev = intel_dig_port->base.base.dev;
395         struct drm_i915_private *dev_priv = dev->dev_private;
396         struct intel_encoder *encoder;
397         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
398         enum pipe pipe;
399
400         lockdep_assert_held(&dev_priv->pps_mutex);
401
402         /* We should never land here with regular DP ports */
403         WARN_ON(!is_edp(intel_dp));
404
405         if (intel_dp->pps_pipe != INVALID_PIPE)
406                 return intel_dp->pps_pipe;
407
408         /*
409          * We don't have power sequencer currently.
410          * Pick one that's not used by other ports.
411          */
412         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413                             base.head) {
414                 struct intel_dp *tmp;
415
416                 if (encoder->type != INTEL_OUTPUT_EDP)
417                         continue;
418
419                 tmp = enc_to_intel_dp(&encoder->base);
420
421                 if (tmp->pps_pipe != INVALID_PIPE)
422                         pipes &= ~(1 << tmp->pps_pipe);
423         }
424
425         /*
426          * Didn't find one. This should not happen since there
427          * are two power sequencers and up to two eDP ports.
428          */
429         if (WARN_ON(pipes == 0))
430                 pipe = PIPE_A;
431         else
432                 pipe = ffs(pipes) - 1;
433
434         vlv_steal_power_sequencer(dev, pipe);
435         intel_dp->pps_pipe = pipe;
436
437         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438                       pipe_name(intel_dp->pps_pipe),
439                       port_name(intel_dig_port->port));
440
441         /* init power sequencer on this pipe and port */
442         intel_dp_init_panel_power_sequencer(dev, intel_dp);
443         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
444
445         /*
446          * Even vdd force doesn't work until we've made
447          * the power sequencer lock in on the port.
448          */
449         vlv_power_sequencer_kick(intel_dp);
450
451         return intel_dp->pps_pipe;
452 }
453
454 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
455                                enum pipe pipe);
456
457 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458                                enum pipe pipe)
459 {
460         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461 }
462
463 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464                                 enum pipe pipe)
465 {
466         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467 }
468
469 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470                          enum pipe pipe)
471 {
472         return true;
473 }
474
475 static enum pipe
476 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477                      enum port port,
478                      vlv_pipe_check pipe_check)
479 {
480         enum pipe pipe;
481
482         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484                         PANEL_PORT_SELECT_MASK;
485
486                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
487                         continue;
488
489                 if (!pipe_check(dev_priv, pipe))
490                         continue;
491
492                 return pipe;
493         }
494
495         return INVALID_PIPE;
496 }
497
498 static void
499 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500 {
501         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502         struct drm_device *dev = intel_dig_port->base.base.dev;
503         struct drm_i915_private *dev_priv = dev->dev_private;
504         enum port port = intel_dig_port->port;
505
506         lockdep_assert_held(&dev_priv->pps_mutex);
507
508         /* try to find a pipe with this port selected */
509         /* first pick one where the panel is on */
510         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511                                                   vlv_pipe_has_pp_on);
512         /* didn't find one? pick one where vdd is on */
513         if (intel_dp->pps_pipe == INVALID_PIPE)
514                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515                                                           vlv_pipe_has_vdd_on);
516         /* didn't find one? pick one with just the correct port */
517         if (intel_dp->pps_pipe == INVALID_PIPE)
518                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519                                                           vlv_pipe_any);
520
521         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522         if (intel_dp->pps_pipe == INVALID_PIPE) {
523                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
524                               port_name(port));
525                 return;
526         }
527
528         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529                       port_name(port), pipe_name(intel_dp->pps_pipe));
530
531         intel_dp_init_panel_power_sequencer(dev, intel_dp);
532         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
533 }
534
535 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536 {
537         struct drm_device *dev = dev_priv->dev;
538         struct intel_encoder *encoder;
539
540         if (WARN_ON(!IS_VALLEYVIEW(dev)))
541                 return;
542
543         /*
544          * We can't grab pps_mutex here due to deadlock with power_domain
545          * mutex when power_domain functions are called while holding pps_mutex.
546          * That also means that in order to use pps_pipe the code needs to
547          * hold both a power domain reference and pps_mutex, and the power domain
548          * reference get/put must be done while _not_ holding pps_mutex.
549          * pps_{lock,unlock}() do these steps in the correct order, so one
550          * should use them always.
551          */
552
553         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554                 struct intel_dp *intel_dp;
555
556                 if (encoder->type != INTEL_OUTPUT_EDP)
557                         continue;
558
559                 intel_dp = enc_to_intel_dp(&encoder->base);
560                 intel_dp->pps_pipe = INVALID_PIPE;
561         }
562 }
563
564 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565 {
566         struct drm_device *dev = intel_dp_to_dev(intel_dp);
567
568         if (HAS_PCH_SPLIT(dev))
569                 return PCH_PP_CONTROL;
570         else
571                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
572 }
573
574 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
575 {
576         struct drm_device *dev = intel_dp_to_dev(intel_dp);
577
578         if (HAS_PCH_SPLIT(dev))
579                 return PCH_PP_STATUS;
580         else
581                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
582 }
583
584 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
585    This function only applicable when panel PM state is not to be tracked */
586 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
587                               void *unused)
588 {
589         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
590                                                  edp_notifier);
591         struct drm_device *dev = intel_dp_to_dev(intel_dp);
592         struct drm_i915_private *dev_priv = dev->dev_private;
593         u32 pp_div;
594         u32 pp_ctrl_reg, pp_div_reg;
595
596         if (!is_edp(intel_dp) || code != SYS_RESTART)
597                 return 0;
598
599         pps_lock(intel_dp);
600
601         if (IS_VALLEYVIEW(dev)) {
602                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603
604                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
605                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
606                 pp_div = I915_READ(pp_div_reg);
607                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
608
609                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
610                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
611                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
612                 msleep(intel_dp->panel_power_cycle_delay);
613         }
614
615         pps_unlock(intel_dp);
616
617         return 0;
618 }
619
620 static bool edp_have_panel_power(struct intel_dp *intel_dp)
621 {
622         struct drm_device *dev = intel_dp_to_dev(intel_dp);
623         struct drm_i915_private *dev_priv = dev->dev_private;
624
625         lockdep_assert_held(&dev_priv->pps_mutex);
626
627         if (IS_VALLEYVIEW(dev) &&
628             intel_dp->pps_pipe == INVALID_PIPE)
629                 return false;
630
631         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
632 }
633
634 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
635 {
636         struct drm_device *dev = intel_dp_to_dev(intel_dp);
637         struct drm_i915_private *dev_priv = dev->dev_private;
638
639         lockdep_assert_held(&dev_priv->pps_mutex);
640
641         if (IS_VALLEYVIEW(dev) &&
642             intel_dp->pps_pipe == INVALID_PIPE)
643                 return false;
644
645         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
646 }
647
648 static void
649 intel_dp_check_edp(struct intel_dp *intel_dp)
650 {
651         struct drm_device *dev = intel_dp_to_dev(intel_dp);
652         struct drm_i915_private *dev_priv = dev->dev_private;
653
654         if (!is_edp(intel_dp))
655                 return;
656
657         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
658                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
660                               I915_READ(_pp_stat_reg(intel_dp)),
661                               I915_READ(_pp_ctrl_reg(intel_dp)));
662         }
663 }
664
665 static uint32_t
666 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
667 {
668         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669         struct drm_device *dev = intel_dig_port->base.base.dev;
670         struct drm_i915_private *dev_priv = dev->dev_private;
671         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
672         uint32_t status;
673         bool done;
674
675 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
676         if (has_aux_irq)
677                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
678                                           msecs_to_jiffies_timeout(10));
679         else
680                 done = wait_for_atomic(C, 10) == 0;
681         if (!done)
682                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683                           has_aux_irq);
684 #undef C
685
686         return status;
687 }
688
689 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
690 {
691         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692         struct drm_device *dev = intel_dig_port->base.base.dev;
693
694         /*
695          * The clock divider is based off the hrawclk, and would like to run at
696          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
697          */
698         return index ? 0 : intel_hrawclk(dev) / 2;
699 }
700
701 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
702 {
703         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704         struct drm_device *dev = intel_dig_port->base.base.dev;
705         struct drm_i915_private *dev_priv = dev->dev_private;
706
707         if (index)
708                 return 0;
709
710         if (intel_dig_port->port == PORT_A) {
711                 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
712         } else {
713                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
714         }
715 }
716
717 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
718 {
719         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720         struct drm_device *dev = intel_dig_port->base.base.dev;
721         struct drm_i915_private *dev_priv = dev->dev_private;
722
723         if (intel_dig_port->port == PORT_A) {
724                 if (index)
725                         return 0;
726                 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
727         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728                 /* Workaround for non-ULT HSW */
729                 switch (index) {
730                 case 0: return 63;
731                 case 1: return 72;
732                 default: return 0;
733                 }
734         } else  {
735                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
736         }
737 }
738
739 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740 {
741         return index ? 0 : 100;
742 }
743
744 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745 {
746         /*
747          * SKL doesn't need us to program the AUX clock divider (Hardware will
748          * derive the clock from CDCLK automatically). We still implement the
749          * get_aux_clock_divider vfunc to plug-in into the existing code.
750          */
751         return index ? 0 : 1;
752 }
753
754 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
755                                       bool has_aux_irq,
756                                       int send_bytes,
757                                       uint32_t aux_clock_divider)
758 {
759         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760         struct drm_device *dev = intel_dig_port->base.base.dev;
761         uint32_t precharge, timeout;
762
763         if (IS_GEN6(dev))
764                 precharge = 3;
765         else
766                 precharge = 5;
767
768         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
770         else
771                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
772
773         return DP_AUX_CH_CTL_SEND_BUSY |
774                DP_AUX_CH_CTL_DONE |
775                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
776                DP_AUX_CH_CTL_TIME_OUT_ERROR |
777                timeout |
778                DP_AUX_CH_CTL_RECEIVE_ERROR |
779                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
781                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
782 }
783
784 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785                                       bool has_aux_irq,
786                                       int send_bytes,
787                                       uint32_t unused)
788 {
789         return DP_AUX_CH_CTL_SEND_BUSY |
790                DP_AUX_CH_CTL_DONE |
791                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792                DP_AUX_CH_CTL_TIME_OUT_ERROR |
793                DP_AUX_CH_CTL_TIME_OUT_1600us |
794                DP_AUX_CH_CTL_RECEIVE_ERROR |
795                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
797 }
798
799 static int
800 intel_dp_aux_ch(struct intel_dp *intel_dp,
801                 const uint8_t *send, int send_bytes,
802                 uint8_t *recv, int recv_size)
803 {
804         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805         struct drm_device *dev = intel_dig_port->base.base.dev;
806         struct drm_i915_private *dev_priv = dev->dev_private;
807         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808         uint32_t ch_data = ch_ctl + 4;
809         uint32_t aux_clock_divider;
810         int i, ret, recv_bytes;
811         uint32_t status;
812         int try, clock = 0;
813         bool has_aux_irq = HAS_AUX_IRQ(dev);
814         bool vdd;
815
816         pps_lock(intel_dp);
817
818         /*
819          * We will be called with VDD already enabled for dpcd/edid/oui reads.
820          * In such cases we want to leave VDD enabled and it's up to upper layers
821          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
822          * ourselves.
823          */
824         vdd = edp_panel_vdd_on(intel_dp);
825
826         /* dp aux is extremely sensitive to irq latency, hence request the
827          * lowest possible wakeup latency and so prevent the cpu from going into
828          * deep sleep states.
829          */
830         pm_qos_update_request(&dev_priv->pm_qos, 0);
831
832         intel_dp_check_edp(intel_dp);
833
834         intel_aux_display_runtime_get(dev_priv);
835
836         /* Try to wait for any previous AUX channel activity */
837         for (try = 0; try < 3; try++) {
838                 status = I915_READ_NOTRACE(ch_ctl);
839                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
840                         break;
841                 msleep(1);
842         }
843
844         if (try == 3) {
845                 WARN(1, "dp_aux_ch not started status 0x%08x\n",
846                      I915_READ(ch_ctl));
847                 ret = -EBUSY;
848                 goto out;
849         }
850
851         /* Only 5 data registers! */
852         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853                 ret = -E2BIG;
854                 goto out;
855         }
856
857         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
858                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859                                                           has_aux_irq,
860                                                           send_bytes,
861                                                           aux_clock_divider);
862
863                 /* Must try at least 3 times according to DP spec */
864                 for (try = 0; try < 5; try++) {
865                         /* Load the send data into the aux channel data registers */
866                         for (i = 0; i < send_bytes; i += 4)
867                                 I915_WRITE(ch_data + i,
868                                            intel_dp_pack_aux(send + i,
869                                                              send_bytes - i));
870
871                         /* Send the command and wait for it to complete */
872                         I915_WRITE(ch_ctl, send_ctl);
873
874                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
875
876                         /* Clear done status and any errors */
877                         I915_WRITE(ch_ctl,
878                                    status |
879                                    DP_AUX_CH_CTL_DONE |
880                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
881                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
882
883                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
884                                 continue;
885
886                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887                          *   400us delay required for errors and timeouts
888                          *   Timeout errors from the HW already meet this
889                          *   requirement so skip to next iteration
890                          */
891                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892                                 usleep_range(400, 500);
893                                 continue;
894                         }
895                         if (status & DP_AUX_CH_CTL_DONE)
896                                 goto done;
897                 }
898         }
899
900         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
901                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
902                 ret = -EBUSY;
903                 goto out;
904         }
905
906 done:
907         /* Check for timeout or receive error.
908          * Timeouts occur when the sink is not connected
909          */
910         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
911                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
912                 ret = -EIO;
913                 goto out;
914         }
915
916         /* Timeouts occur when the device isn't connected, so they're
917          * "normal" -- don't fill the kernel log with these */
918         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
919                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
920                 ret = -ETIMEDOUT;
921                 goto out;
922         }
923
924         /* Unload any bytes sent back from the other side */
925         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
926                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
927         if (recv_bytes > recv_size)
928                 recv_bytes = recv_size;
929
930         for (i = 0; i < recv_bytes; i += 4)
931                 intel_dp_unpack_aux(I915_READ(ch_data + i),
932                                     recv + i, recv_bytes - i);
933
934         ret = recv_bytes;
935 out:
936         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
937         intel_aux_display_runtime_put(dev_priv);
938
939         if (vdd)
940                 edp_panel_vdd_off(intel_dp, false);
941
942         pps_unlock(intel_dp);
943
944         return ret;
945 }
946
947 #define BARE_ADDRESS_SIZE       3
948 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
949 static ssize_t
950 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
951 {
952         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
953         uint8_t txbuf[20], rxbuf[20];
954         size_t txsize, rxsize;
955         int ret;
956
957         txbuf[0] = (msg->request << 4) |
958                 ((msg->address >> 16) & 0xf);
959         txbuf[1] = (msg->address >> 8) & 0xff;
960         txbuf[2] = msg->address & 0xff;
961         txbuf[3] = msg->size - 1;
962
963         switch (msg->request & ~DP_AUX_I2C_MOT) {
964         case DP_AUX_NATIVE_WRITE:
965         case DP_AUX_I2C_WRITE:
966                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
967                 rxsize = 2; /* 0 or 1 data bytes */
968
969                 if (WARN_ON(txsize > 20))
970                         return -E2BIG;
971
972                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
973
974                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
975                 if (ret > 0) {
976                         msg->reply = rxbuf[0] >> 4;
977
978                         if (ret > 1) {
979                                 /* Number of bytes written in a short write. */
980                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
981                         } else {
982                                 /* Return payload size. */
983                                 ret = msg->size;
984                         }
985                 }
986                 break;
987
988         case DP_AUX_NATIVE_READ:
989         case DP_AUX_I2C_READ:
990                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
991                 rxsize = msg->size + 1;
992
993                 if (WARN_ON(rxsize > 20))
994                         return -E2BIG;
995
996                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
997                 if (ret > 0) {
998                         msg->reply = rxbuf[0] >> 4;
999                         /*
1000                          * Assume happy day, and copy the data. The caller is
1001                          * expected to check msg->reply before touching it.
1002                          *
1003                          * Return payload size.
1004                          */
1005                         ret--;
1006                         memcpy(msg->buffer, rxbuf + 1, ret);
1007                 }
1008                 break;
1009
1010         default:
1011                 ret = -EINVAL;
1012                 break;
1013         }
1014
1015         return ret;
1016 }
1017
1018 static void
1019 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1020 {
1021         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1022         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1023         enum port port = intel_dig_port->port;
1024         const char *name = NULL;
1025         int ret;
1026
1027         switch (port) {
1028         case PORT_A:
1029                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1030                 name = "DPDDC-A";
1031                 break;
1032         case PORT_B:
1033                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1034                 name = "DPDDC-B";
1035                 break;
1036         case PORT_C:
1037                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1038                 name = "DPDDC-C";
1039                 break;
1040         case PORT_D:
1041                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1042                 name = "DPDDC-D";
1043                 break;
1044         default:
1045                 BUG();
1046         }
1047
1048         /*
1049          * The AUX_CTL register is usually DP_CTL + 0x10.
1050          *
1051          * On Haswell and Broadwell though:
1052          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1053          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1054          *
1055          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1056          */
1057         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1058                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1059
1060         intel_dp->aux.name = name;
1061         intel_dp->aux.dev = dev->dev;
1062         intel_dp->aux.transfer = intel_dp_aux_transfer;
1063
1064         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1065                       connector->base.kdev->kobj.name);
1066
1067         ret = drm_dp_aux_register(&intel_dp->aux);
1068         if (ret < 0) {
1069                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1070                           name, ret);
1071                 return;
1072         }
1073
1074         ret = sysfs_create_link(&connector->base.kdev->kobj,
1075                                 &intel_dp->aux.ddc.dev.kobj,
1076                                 intel_dp->aux.ddc.dev.kobj.name);
1077         if (ret < 0) {
1078                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1079                 drm_dp_aux_unregister(&intel_dp->aux);
1080         }
1081 }
1082
1083 static void
1084 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1085 {
1086         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1087
1088         if (!intel_connector->mst_port)
1089                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1090                                   intel_dp->aux.ddc.dev.kobj.name);
1091         intel_connector_unregister(intel_connector);
1092 }
1093
1094 static void
1095 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1096 {
1097         u32 ctrl1;
1098
1099         memset(&pipe_config->dpll_hw_state, 0,
1100                sizeof(pipe_config->dpll_hw_state));
1101
1102         pipe_config->ddi_pll_sel = SKL_DPLL0;
1103         pipe_config->dpll_hw_state.cfgcr1 = 0;
1104         pipe_config->dpll_hw_state.cfgcr2 = 0;
1105
1106         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1107         switch (link_clock / 2) {
1108         case 81000:
1109                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1110                                               SKL_DPLL0);
1111                 break;
1112         case 135000:
1113                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1114                                               SKL_DPLL0);
1115                 break;
1116         case 270000:
1117                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1118                                               SKL_DPLL0);
1119                 break;
1120         case 162000:
1121                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1122                                               SKL_DPLL0);
1123                 break;
1124         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1125         results in CDCLK change. Need to handle the change of CDCLK by
1126         disabling pipes and re-enabling them */
1127         case 108000:
1128                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1129                                               SKL_DPLL0);
1130                 break;
1131         case 216000:
1132                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1133                                               SKL_DPLL0);
1134                 break;
1135
1136         }
1137         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1138 }
1139
1140 static void
1141 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1142 {
1143         memset(&pipe_config->dpll_hw_state, 0,
1144                sizeof(pipe_config->dpll_hw_state));
1145
1146         switch (link_bw) {
1147         case DP_LINK_BW_1_62:
1148                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1149                 break;
1150         case DP_LINK_BW_2_7:
1151                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1152                 break;
1153         case DP_LINK_BW_5_4:
1154                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1155                 break;
1156         }
1157 }
1158
1159 static int
1160 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1161 {
1162         if (intel_dp->num_sink_rates) {
1163                 *sink_rates = intel_dp->sink_rates;
1164                 return intel_dp->num_sink_rates;
1165         }
1166
1167         *sink_rates = default_rates;
1168
1169         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1170 }
1171
1172 static int
1173 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1174 {
1175         if (IS_SKYLAKE(dev)) {
1176                 *source_rates = skl_rates;
1177                 return ARRAY_SIZE(skl_rates);
1178         } else if (IS_CHERRYVIEW(dev)) {
1179                 *source_rates = chv_rates;
1180                 return ARRAY_SIZE(chv_rates);
1181         }
1182
1183         *source_rates = default_rates;
1184
1185         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1186                 /* WaDisableHBR2:skl */
1187                 return (DP_LINK_BW_2_7 >> 3) + 1;
1188         else if (INTEL_INFO(dev)->gen >= 8 ||
1189             (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1190                 return (DP_LINK_BW_5_4 >> 3) + 1;
1191         else
1192                 return (DP_LINK_BW_2_7 >> 3) + 1;
1193 }
1194
1195 static void
1196 intel_dp_set_clock(struct intel_encoder *encoder,
1197                    struct intel_crtc_state *pipe_config, int link_bw)
1198 {
1199         struct drm_device *dev = encoder->base.dev;
1200         const struct dp_link_dpll *divisor = NULL;
1201         int i, count = 0;
1202
1203         if (IS_G4X(dev)) {
1204                 divisor = gen4_dpll;
1205                 count = ARRAY_SIZE(gen4_dpll);
1206         } else if (HAS_PCH_SPLIT(dev)) {
1207                 divisor = pch_dpll;
1208                 count = ARRAY_SIZE(pch_dpll);
1209         } else if (IS_CHERRYVIEW(dev)) {
1210                 divisor = chv_dpll;
1211                 count = ARRAY_SIZE(chv_dpll);
1212         } else if (IS_VALLEYVIEW(dev)) {
1213                 divisor = vlv_dpll;
1214                 count = ARRAY_SIZE(vlv_dpll);
1215         }
1216
1217         if (divisor && count) {
1218                 for (i = 0; i < count; i++) {
1219                         if (link_bw == divisor[i].link_bw) {
1220                                 pipe_config->dpll = divisor[i].dpll;
1221                                 pipe_config->clock_set = true;
1222                                 break;
1223                         }
1224                 }
1225         }
1226 }
1227
1228 static int intersect_rates(const int *source_rates, int source_len,
1229                            const int *sink_rates, int sink_len,
1230                            int *common_rates)
1231 {
1232         int i = 0, j = 0, k = 0;
1233
1234         while (i < source_len && j < sink_len) {
1235                 if (source_rates[i] == sink_rates[j]) {
1236                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1237                                 return k;
1238                         common_rates[k] = source_rates[i];
1239                         ++k;
1240                         ++i;
1241                         ++j;
1242                 } else if (source_rates[i] < sink_rates[j]) {
1243                         ++i;
1244                 } else {
1245                         ++j;
1246                 }
1247         }
1248         return k;
1249 }
1250
1251 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1252                                  int *common_rates)
1253 {
1254         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1255         const int *source_rates, *sink_rates;
1256         int source_len, sink_len;
1257
1258         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1259         source_len = intel_dp_source_rates(dev, &source_rates);
1260
1261         return intersect_rates(source_rates, source_len,
1262                                sink_rates, sink_len,
1263                                common_rates);
1264 }
1265
1266 static void snprintf_int_array(char *str, size_t len,
1267                                const int *array, int nelem)
1268 {
1269         int i;
1270
1271         str[0] = '\0';
1272
1273         for (i = 0; i < nelem; i++) {
1274                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1275                 if (r >= len)
1276                         return;
1277                 str += r;
1278                 len -= r;
1279         }
1280 }
1281
1282 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1283 {
1284         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1285         const int *source_rates, *sink_rates;
1286         int source_len, sink_len, common_len;
1287         int common_rates[DP_MAX_SUPPORTED_RATES];
1288         char str[128]; /* FIXME: too big for stack? */
1289
1290         if ((drm_debug & DRM_UT_KMS) == 0)
1291                 return;
1292
1293         source_len = intel_dp_source_rates(dev, &source_rates);
1294         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1295         DRM_DEBUG_KMS("source rates: %s\n", str);
1296
1297         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1298         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1299         DRM_DEBUG_KMS("sink rates: %s\n", str);
1300
1301         common_len = intel_dp_common_rates(intel_dp, common_rates);
1302         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1303         DRM_DEBUG_KMS("common rates: %s\n", str);
1304 }
1305
1306 static int rate_to_index(int find, const int *rates)
1307 {
1308         int i = 0;
1309
1310         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1311                 if (find == rates[i])
1312                         break;
1313
1314         return i;
1315 }
1316
1317 int
1318 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1319 {
1320         int rates[DP_MAX_SUPPORTED_RATES] = {};
1321         int len;
1322
1323         len = intel_dp_common_rates(intel_dp, rates);
1324         if (WARN_ON(len <= 0))
1325                 return 162000;
1326
1327         return rates[rate_to_index(0, rates) - 1];
1328 }
1329
1330 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1331 {
1332         return rate_to_index(rate, intel_dp->sink_rates);
1333 }
1334
1335 bool
1336 intel_dp_compute_config(struct intel_encoder *encoder,
1337                         struct intel_crtc_state *pipe_config)
1338 {
1339         struct drm_device *dev = encoder->base.dev;
1340         struct drm_i915_private *dev_priv = dev->dev_private;
1341         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1342         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1343         enum port port = dp_to_dig_port(intel_dp)->port;
1344         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1345         struct intel_connector *intel_connector = intel_dp->attached_connector;
1346         int lane_count, clock;
1347         int min_lane_count = 1;
1348         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1349         /* Conveniently, the link BW constants become indices with a shift...*/
1350         int min_clock = 0;
1351         int max_clock;
1352         int bpp, mode_rate;
1353         int link_avail, link_clock;
1354         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1355         int common_len;
1356
1357         common_len = intel_dp_common_rates(intel_dp, common_rates);
1358
1359         /* No common link rates between source and sink */
1360         WARN_ON(common_len <= 0);
1361
1362         max_clock = common_len - 1;
1363
1364         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1365                 pipe_config->has_pch_encoder = true;
1366
1367         pipe_config->has_dp_encoder = true;
1368         pipe_config->has_drrs = false;
1369         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1370
1371         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1372                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1373                                        adjusted_mode);
1374
1375                 if (INTEL_INFO(dev)->gen >= 9) {
1376                         int ret;
1377                         ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1378                         if (ret)
1379                                 return ret;
1380                 }
1381
1382                 if (!HAS_PCH_SPLIT(dev))
1383                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1384                                                  intel_connector->panel.fitting_mode);
1385                 else
1386                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1387                                                 intel_connector->panel.fitting_mode);
1388         }
1389
1390         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1391                 return false;
1392
1393         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1394                       "max bw %d pixel clock %iKHz\n",
1395                       max_lane_count, common_rates[max_clock],
1396                       adjusted_mode->crtc_clock);
1397
1398         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1399          * bpc in between. */
1400         bpp = pipe_config->pipe_bpp;
1401         if (is_edp(intel_dp)) {
1402                 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1403                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1404                                       dev_priv->vbt.edp_bpp);
1405                         bpp = dev_priv->vbt.edp_bpp;
1406                 }
1407
1408                 /*
1409                  * Use the maximum clock and number of lanes the eDP panel
1410                  * advertizes being capable of. The panels are generally
1411                  * designed to support only a single clock and lane
1412                  * configuration, and typically these values correspond to the
1413                  * native resolution of the panel.
1414                  */
1415                 min_lane_count = max_lane_count;
1416                 min_clock = max_clock;
1417         }
1418
1419         for (; bpp >= 6*3; bpp -= 2*3) {
1420                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1421                                                    bpp);
1422
1423                 for (clock = min_clock; clock <= max_clock; clock++) {
1424                         for (lane_count = min_lane_count;
1425                                 lane_count <= max_lane_count;
1426                                 lane_count <<= 1) {
1427
1428                                 link_clock = common_rates[clock];
1429                                 link_avail = intel_dp_max_data_rate(link_clock,
1430                                                                     lane_count);
1431
1432                                 if (mode_rate <= link_avail) {
1433                                         goto found;
1434                                 }
1435                         }
1436                 }
1437         }
1438
1439         return false;
1440
1441 found:
1442         if (intel_dp->color_range_auto) {
1443                 /*
1444                  * See:
1445                  * CEA-861-E - 5.1 Default Encoding Parameters
1446                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1447                  */
1448                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1449                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1450                 else
1451                         intel_dp->color_range = 0;
1452         }
1453
1454         if (intel_dp->color_range)
1455                 pipe_config->limited_color_range = true;
1456
1457         intel_dp->lane_count = lane_count;
1458
1459         if (intel_dp->num_sink_rates) {
1460                 intel_dp->link_bw = 0;
1461                 intel_dp->rate_select =
1462                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1463         } else {
1464                 intel_dp->link_bw =
1465                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1466                 intel_dp->rate_select = 0;
1467         }
1468
1469         pipe_config->pipe_bpp = bpp;
1470         pipe_config->port_clock = common_rates[clock];
1471
1472         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1473                       intel_dp->link_bw, intel_dp->lane_count,
1474                       pipe_config->port_clock, bpp);
1475         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1476                       mode_rate, link_avail);
1477
1478         intel_link_compute_m_n(bpp, lane_count,
1479                                adjusted_mode->crtc_clock,
1480                                pipe_config->port_clock,
1481                                &pipe_config->dp_m_n);
1482
1483         if (intel_connector->panel.downclock_mode != NULL &&
1484                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1485                         pipe_config->has_drrs = true;
1486                         intel_link_compute_m_n(bpp, lane_count,
1487                                 intel_connector->panel.downclock_mode->clock,
1488                                 pipe_config->port_clock,
1489                                 &pipe_config->dp_m2_n2);
1490         }
1491
1492         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1493                 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1494         else if (IS_BROXTON(dev))
1495                 /* handled in ddi */;
1496         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1497                 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1498         else
1499                 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1500
1501         return true;
1502 }
1503
1504 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1505 {
1506         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1507         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1508         struct drm_device *dev = crtc->base.dev;
1509         struct drm_i915_private *dev_priv = dev->dev_private;
1510         u32 dpa_ctl;
1511
1512         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1513                       crtc->config->port_clock);
1514         dpa_ctl = I915_READ(DP_A);
1515         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1516
1517         if (crtc->config->port_clock == 162000) {
1518                 /* For a long time we've carried around a ILK-DevA w/a for the
1519                  * 160MHz clock. If we're really unlucky, it's still required.
1520                  */
1521                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1522                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1523                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1524         } else {
1525                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1526                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1527         }
1528
1529         I915_WRITE(DP_A, dpa_ctl);
1530
1531         POSTING_READ(DP_A);
1532         udelay(500);
1533 }
1534
1535 static void intel_dp_prepare(struct intel_encoder *encoder)
1536 {
1537         struct drm_device *dev = encoder->base.dev;
1538         struct drm_i915_private *dev_priv = dev->dev_private;
1539         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1540         enum port port = dp_to_dig_port(intel_dp)->port;
1541         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1542         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1543
1544         /*
1545          * There are four kinds of DP registers:
1546          *
1547          *      IBX PCH
1548          *      SNB CPU
1549          *      IVB CPU
1550          *      CPT PCH
1551          *
1552          * IBX PCH and CPU are the same for almost everything,
1553          * except that the CPU DP PLL is configured in this
1554          * register
1555          *
1556          * CPT PCH is quite different, having many bits moved
1557          * to the TRANS_DP_CTL register instead. That
1558          * configuration happens (oddly) in ironlake_pch_enable
1559          */
1560
1561         /* Preserve the BIOS-computed detected bit. This is
1562          * supposed to be read-only.
1563          */
1564         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1565
1566         /* Handle DP bits in common between all three register formats */
1567         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1568         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1569
1570         if (crtc->config->has_audio)
1571                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1572
1573         /* Split out the IBX/CPU vs CPT settings */
1574
1575         if (IS_GEN7(dev) && port == PORT_A) {
1576                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1577                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1578                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1579                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1580                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1581
1582                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1583                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1584
1585                 intel_dp->DP |= crtc->pipe << 29;
1586         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1587                 u32 trans_dp;
1588
1589                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1590
1591                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1592                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1593                         trans_dp |= TRANS_DP_ENH_FRAMING;
1594                 else
1595                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1596                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1597         } else {
1598                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1599                         intel_dp->DP |= intel_dp->color_range;
1600
1601                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1602                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1603                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1604                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1605                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1606
1607                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1608                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1609
1610                 if (IS_CHERRYVIEW(dev))
1611                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1612                 else if (crtc->pipe == PIPE_B)
1613                         intel_dp->DP |= DP_PIPEB_SELECT;
1614         }
1615 }
1616
1617 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1618 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1619
1620 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1621 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1622
1623 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1624 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1625
1626 static void wait_panel_status(struct intel_dp *intel_dp,
1627                                        u32 mask,
1628                                        u32 value)
1629 {
1630         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1631         struct drm_i915_private *dev_priv = dev->dev_private;
1632         u32 pp_stat_reg, pp_ctrl_reg;
1633
1634         lockdep_assert_held(&dev_priv->pps_mutex);
1635
1636         pp_stat_reg = _pp_stat_reg(intel_dp);
1637         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1638
1639         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1640                         mask, value,
1641                         I915_READ(pp_stat_reg),
1642                         I915_READ(pp_ctrl_reg));
1643
1644         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1645                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1646                                 I915_READ(pp_stat_reg),
1647                                 I915_READ(pp_ctrl_reg));
1648         }
1649
1650         DRM_DEBUG_KMS("Wait complete\n");
1651 }
1652
1653 static void wait_panel_on(struct intel_dp *intel_dp)
1654 {
1655         DRM_DEBUG_KMS("Wait for panel power on\n");
1656         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1657 }
1658
1659 static void wait_panel_off(struct intel_dp *intel_dp)
1660 {
1661         DRM_DEBUG_KMS("Wait for panel power off time\n");
1662         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1663 }
1664
1665 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1666 {
1667         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1668
1669         /* When we disable the VDD override bit last we have to do the manual
1670          * wait. */
1671         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1672                                        intel_dp->panel_power_cycle_delay);
1673
1674         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1675 }
1676
1677 static void wait_backlight_on(struct intel_dp *intel_dp)
1678 {
1679         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1680                                        intel_dp->backlight_on_delay);
1681 }
1682
1683 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1684 {
1685         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1686                                        intel_dp->backlight_off_delay);
1687 }
1688
1689 /* Read the current pp_control value, unlocking the register if it
1690  * is locked
1691  */
1692
1693 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1694 {
1695         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1696         struct drm_i915_private *dev_priv = dev->dev_private;
1697         u32 control;
1698
1699         lockdep_assert_held(&dev_priv->pps_mutex);
1700
1701         control = I915_READ(_pp_ctrl_reg(intel_dp));
1702         control &= ~PANEL_UNLOCK_MASK;
1703         control |= PANEL_UNLOCK_REGS;
1704         return control;
1705 }
1706
1707 /*
1708  * Must be paired with edp_panel_vdd_off().
1709  * Must hold pps_mutex around the whole on/off sequence.
1710  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1711  */
1712 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1713 {
1714         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1715         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1716         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1717         struct drm_i915_private *dev_priv = dev->dev_private;
1718         enum intel_display_power_domain power_domain;
1719         u32 pp;
1720         u32 pp_stat_reg, pp_ctrl_reg;
1721         bool need_to_disable = !intel_dp->want_panel_vdd;
1722
1723         lockdep_assert_held(&dev_priv->pps_mutex);
1724
1725         if (!is_edp(intel_dp))
1726                 return false;
1727
1728         cancel_delayed_work(&intel_dp->panel_vdd_work);
1729         intel_dp->want_panel_vdd = true;
1730
1731         if (edp_have_panel_vdd(intel_dp))
1732                 return need_to_disable;
1733
1734         power_domain = intel_display_port_power_domain(intel_encoder);
1735         intel_display_power_get(dev_priv, power_domain);
1736
1737         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1738                       port_name(intel_dig_port->port));
1739
1740         if (!edp_have_panel_power(intel_dp))
1741                 wait_panel_power_cycle(intel_dp);
1742
1743         pp = ironlake_get_pp_control(intel_dp);
1744         pp |= EDP_FORCE_VDD;
1745
1746         pp_stat_reg = _pp_stat_reg(intel_dp);
1747         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1748
1749         I915_WRITE(pp_ctrl_reg, pp);
1750         POSTING_READ(pp_ctrl_reg);
1751         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1752                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1753         /*
1754          * If the panel wasn't on, delay before accessing aux channel
1755          */
1756         if (!edp_have_panel_power(intel_dp)) {
1757                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1758                               port_name(intel_dig_port->port));
1759                 msleep(intel_dp->panel_power_up_delay);
1760         }
1761
1762         return need_to_disable;
1763 }
1764
1765 /*
1766  * Must be paired with intel_edp_panel_vdd_off() or
1767  * intel_edp_panel_off().
1768  * Nested calls to these functions are not allowed since
1769  * we drop the lock. Caller must use some higher level
1770  * locking to prevent nested calls from other threads.
1771  */
1772 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1773 {
1774         bool vdd;
1775
1776         if (!is_edp(intel_dp))
1777                 return;
1778
1779         pps_lock(intel_dp);
1780         vdd = edp_panel_vdd_on(intel_dp);
1781         pps_unlock(intel_dp);
1782
1783         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1784              port_name(dp_to_dig_port(intel_dp)->port));
1785 }
1786
1787 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1788 {
1789         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1790         struct drm_i915_private *dev_priv = dev->dev_private;
1791         struct intel_digital_port *intel_dig_port =
1792                 dp_to_dig_port(intel_dp);
1793         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1794         enum intel_display_power_domain power_domain;
1795         u32 pp;
1796         u32 pp_stat_reg, pp_ctrl_reg;
1797
1798         lockdep_assert_held(&dev_priv->pps_mutex);
1799
1800         WARN_ON(intel_dp->want_panel_vdd);
1801
1802         if (!edp_have_panel_vdd(intel_dp))
1803                 return;
1804
1805         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1806                       port_name(intel_dig_port->port));
1807
1808         pp = ironlake_get_pp_control(intel_dp);
1809         pp &= ~EDP_FORCE_VDD;
1810
1811         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1812         pp_stat_reg = _pp_stat_reg(intel_dp);
1813
1814         I915_WRITE(pp_ctrl_reg, pp);
1815         POSTING_READ(pp_ctrl_reg);
1816
1817         /* Make sure sequencer is idle before allowing subsequent activity */
1818         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1819         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1820
1821         if ((pp & POWER_TARGET_ON) == 0)
1822                 intel_dp->last_power_cycle = jiffies;
1823
1824         power_domain = intel_display_port_power_domain(intel_encoder);
1825         intel_display_power_put(dev_priv, power_domain);
1826 }
1827
1828 static void edp_panel_vdd_work(struct work_struct *__work)
1829 {
1830         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1831                                                  struct intel_dp, panel_vdd_work);
1832
1833         pps_lock(intel_dp);
1834         if (!intel_dp->want_panel_vdd)
1835                 edp_panel_vdd_off_sync(intel_dp);
1836         pps_unlock(intel_dp);
1837 }
1838
1839 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1840 {
1841         unsigned long delay;
1842
1843         /*
1844          * Queue the timer to fire a long time from now (relative to the power
1845          * down delay) to keep the panel power up across a sequence of
1846          * operations.
1847          */
1848         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1849         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1850 }
1851
1852 /*
1853  * Must be paired with edp_panel_vdd_on().
1854  * Must hold pps_mutex around the whole on/off sequence.
1855  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1856  */
1857 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1858 {
1859         struct drm_i915_private *dev_priv =
1860                 intel_dp_to_dev(intel_dp)->dev_private;
1861
1862         lockdep_assert_held(&dev_priv->pps_mutex);
1863
1864         if (!is_edp(intel_dp))
1865                 return;
1866
1867         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1868              port_name(dp_to_dig_port(intel_dp)->port));
1869
1870         intel_dp->want_panel_vdd = false;
1871
1872         if (sync)
1873                 edp_panel_vdd_off_sync(intel_dp);
1874         else
1875                 edp_panel_vdd_schedule_off(intel_dp);
1876 }
1877
1878 static void edp_panel_on(struct intel_dp *intel_dp)
1879 {
1880         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1881         struct drm_i915_private *dev_priv = dev->dev_private;
1882         u32 pp;
1883         u32 pp_ctrl_reg;
1884
1885         lockdep_assert_held(&dev_priv->pps_mutex);
1886
1887         if (!is_edp(intel_dp))
1888                 return;
1889
1890         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1891                       port_name(dp_to_dig_port(intel_dp)->port));
1892
1893         if (WARN(edp_have_panel_power(intel_dp),
1894                  "eDP port %c panel power already on\n",
1895                  port_name(dp_to_dig_port(intel_dp)->port)))
1896                 return;
1897
1898         wait_panel_power_cycle(intel_dp);
1899
1900         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1901         pp = ironlake_get_pp_control(intel_dp);
1902         if (IS_GEN5(dev)) {
1903                 /* ILK workaround: disable reset around power sequence */
1904                 pp &= ~PANEL_POWER_RESET;
1905                 I915_WRITE(pp_ctrl_reg, pp);
1906                 POSTING_READ(pp_ctrl_reg);
1907         }
1908
1909         pp |= POWER_TARGET_ON;
1910         if (!IS_GEN5(dev))
1911                 pp |= PANEL_POWER_RESET;
1912
1913         I915_WRITE(pp_ctrl_reg, pp);
1914         POSTING_READ(pp_ctrl_reg);
1915
1916         wait_panel_on(intel_dp);
1917         intel_dp->last_power_on = jiffies;
1918
1919         if (IS_GEN5(dev)) {
1920                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1921                 I915_WRITE(pp_ctrl_reg, pp);
1922                 POSTING_READ(pp_ctrl_reg);
1923         }
1924 }
1925
1926 void intel_edp_panel_on(struct intel_dp *intel_dp)
1927 {
1928         if (!is_edp(intel_dp))
1929                 return;
1930
1931         pps_lock(intel_dp);
1932         edp_panel_on(intel_dp);
1933         pps_unlock(intel_dp);
1934 }
1935
1936
1937 static void edp_panel_off(struct intel_dp *intel_dp)
1938 {
1939         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1940         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1941         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1942         struct drm_i915_private *dev_priv = dev->dev_private;
1943         enum intel_display_power_domain power_domain;
1944         u32 pp;
1945         u32 pp_ctrl_reg;
1946
1947         lockdep_assert_held(&dev_priv->pps_mutex);
1948
1949         if (!is_edp(intel_dp))
1950                 return;
1951
1952         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1953                       port_name(dp_to_dig_port(intel_dp)->port));
1954
1955         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1956              port_name(dp_to_dig_port(intel_dp)->port));
1957
1958         pp = ironlake_get_pp_control(intel_dp);
1959         /* We need to switch off panel power _and_ force vdd, for otherwise some
1960          * panels get very unhappy and cease to work. */
1961         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1962                 EDP_BLC_ENABLE);
1963
1964         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1965
1966         intel_dp->want_panel_vdd = false;
1967
1968         I915_WRITE(pp_ctrl_reg, pp);
1969         POSTING_READ(pp_ctrl_reg);
1970
1971         intel_dp->last_power_cycle = jiffies;
1972         wait_panel_off(intel_dp);
1973
1974         /* We got a reference when we enabled the VDD. */
1975         power_domain = intel_display_port_power_domain(intel_encoder);
1976         intel_display_power_put(dev_priv, power_domain);
1977 }
1978
1979 void intel_edp_panel_off(struct intel_dp *intel_dp)
1980 {
1981         if (!is_edp(intel_dp))
1982                 return;
1983
1984         pps_lock(intel_dp);
1985         edp_panel_off(intel_dp);
1986         pps_unlock(intel_dp);
1987 }
1988
1989 /* Enable backlight in the panel power control. */
1990 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1991 {
1992         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1993         struct drm_device *dev = intel_dig_port->base.base.dev;
1994         struct drm_i915_private *dev_priv = dev->dev_private;
1995         u32 pp;
1996         u32 pp_ctrl_reg;
1997
1998         /*
1999          * If we enable the backlight right away following a panel power
2000          * on, we may see slight flicker as the panel syncs with the eDP
2001          * link.  So delay a bit to make sure the image is solid before
2002          * allowing it to appear.
2003          */
2004         wait_backlight_on(intel_dp);
2005
2006         pps_lock(intel_dp);
2007
2008         pp = ironlake_get_pp_control(intel_dp);
2009         pp |= EDP_BLC_ENABLE;
2010
2011         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2012
2013         I915_WRITE(pp_ctrl_reg, pp);
2014         POSTING_READ(pp_ctrl_reg);
2015
2016         pps_unlock(intel_dp);
2017 }
2018
2019 /* Enable backlight PWM and backlight PP control. */
2020 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2021 {
2022         if (!is_edp(intel_dp))
2023                 return;
2024
2025         DRM_DEBUG_KMS("\n");
2026
2027         intel_panel_enable_backlight(intel_dp->attached_connector);
2028         _intel_edp_backlight_on(intel_dp);
2029 }
2030
2031 /* Disable backlight in the panel power control. */
2032 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2033 {
2034         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2035         struct drm_i915_private *dev_priv = dev->dev_private;
2036         u32 pp;
2037         u32 pp_ctrl_reg;
2038
2039         if (!is_edp(intel_dp))
2040                 return;
2041
2042         pps_lock(intel_dp);
2043
2044         pp = ironlake_get_pp_control(intel_dp);
2045         pp &= ~EDP_BLC_ENABLE;
2046
2047         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2048
2049         I915_WRITE(pp_ctrl_reg, pp);
2050         POSTING_READ(pp_ctrl_reg);
2051
2052         pps_unlock(intel_dp);
2053
2054         intel_dp->last_backlight_off = jiffies;
2055         edp_wait_backlight_off(intel_dp);
2056 }
2057
2058 /* Disable backlight PP control and backlight PWM. */
2059 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2060 {
2061         if (!is_edp(intel_dp))
2062                 return;
2063
2064         DRM_DEBUG_KMS("\n");
2065
2066         _intel_edp_backlight_off(intel_dp);
2067         intel_panel_disable_backlight(intel_dp->attached_connector);
2068 }
2069
2070 /*
2071  * Hook for controlling the panel power control backlight through the bl_power
2072  * sysfs attribute. Take care to handle multiple calls.
2073  */
2074 static void intel_edp_backlight_power(struct intel_connector *connector,
2075                                       bool enable)
2076 {
2077         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2078         bool is_enabled;
2079
2080         pps_lock(intel_dp);
2081         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2082         pps_unlock(intel_dp);
2083
2084         if (is_enabled == enable)
2085                 return;
2086
2087         DRM_DEBUG_KMS("panel power control backlight %s\n",
2088                       enable ? "enable" : "disable");
2089
2090         if (enable)
2091                 _intel_edp_backlight_on(intel_dp);
2092         else
2093                 _intel_edp_backlight_off(intel_dp);
2094 }
2095
2096 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2097 {
2098         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2099         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2100         struct drm_device *dev = crtc->dev;
2101         struct drm_i915_private *dev_priv = dev->dev_private;
2102         u32 dpa_ctl;
2103
2104         assert_pipe_disabled(dev_priv,
2105                              to_intel_crtc(crtc)->pipe);
2106
2107         DRM_DEBUG_KMS("\n");
2108         dpa_ctl = I915_READ(DP_A);
2109         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2110         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2111
2112         /* We don't adjust intel_dp->DP while tearing down the link, to
2113          * facilitate link retraining (e.g. after hotplug). Hence clear all
2114          * enable bits here to ensure that we don't enable too much. */
2115         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2116         intel_dp->DP |= DP_PLL_ENABLE;
2117         I915_WRITE(DP_A, intel_dp->DP);
2118         POSTING_READ(DP_A);
2119         udelay(200);
2120 }
2121
2122 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2123 {
2124         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2125         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2126         struct drm_device *dev = crtc->dev;
2127         struct drm_i915_private *dev_priv = dev->dev_private;
2128         u32 dpa_ctl;
2129
2130         assert_pipe_disabled(dev_priv,
2131                              to_intel_crtc(crtc)->pipe);
2132
2133         dpa_ctl = I915_READ(DP_A);
2134         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2135              "dp pll off, should be on\n");
2136         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2137
2138         /* We can't rely on the value tracked for the DP register in
2139          * intel_dp->DP because link_down must not change that (otherwise link
2140          * re-training will fail. */
2141         dpa_ctl &= ~DP_PLL_ENABLE;
2142         I915_WRITE(DP_A, dpa_ctl);
2143         POSTING_READ(DP_A);
2144         udelay(200);
2145 }
2146
2147 /* If the sink supports it, try to set the power state appropriately */
2148 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2149 {
2150         int ret, i;
2151
2152         /* Should have a valid DPCD by this point */
2153         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2154                 return;
2155
2156         if (mode != DRM_MODE_DPMS_ON) {
2157                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2158                                          DP_SET_POWER_D3);
2159         } else {
2160                 /*
2161                  * When turning on, we need to retry for 1ms to give the sink
2162                  * time to wake up.
2163                  */
2164                 for (i = 0; i < 3; i++) {
2165                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2166                                                  DP_SET_POWER_D0);
2167                         if (ret == 1)
2168                                 break;
2169                         msleep(1);
2170                 }
2171         }
2172
2173         if (ret != 1)
2174                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2175                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2176 }
2177
2178 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2179                                   enum pipe *pipe)
2180 {
2181         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2182         enum port port = dp_to_dig_port(intel_dp)->port;
2183         struct drm_device *dev = encoder->base.dev;
2184         struct drm_i915_private *dev_priv = dev->dev_private;
2185         enum intel_display_power_domain power_domain;
2186         u32 tmp;
2187
2188         power_domain = intel_display_port_power_domain(encoder);
2189         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2190                 return false;
2191
2192         tmp = I915_READ(intel_dp->output_reg);
2193
2194         if (!(tmp & DP_PORT_EN))
2195                 return false;
2196
2197         if (IS_GEN7(dev) && port == PORT_A) {
2198                 *pipe = PORT_TO_PIPE_CPT(tmp);
2199         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2200                 enum pipe p;
2201
2202                 for_each_pipe(dev_priv, p) {
2203                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2204                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2205                                 *pipe = p;
2206                                 return true;
2207                         }
2208                 }
2209
2210                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2211                               intel_dp->output_reg);
2212         } else if (IS_CHERRYVIEW(dev)) {
2213                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2214         } else {
2215                 *pipe = PORT_TO_PIPE(tmp);
2216         }
2217
2218         return true;
2219 }
2220
2221 static void intel_dp_get_config(struct intel_encoder *encoder,
2222                                 struct intel_crtc_state *pipe_config)
2223 {
2224         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2225         u32 tmp, flags = 0;
2226         struct drm_device *dev = encoder->base.dev;
2227         struct drm_i915_private *dev_priv = dev->dev_private;
2228         enum port port = dp_to_dig_port(intel_dp)->port;
2229         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2230         int dotclock;
2231
2232         tmp = I915_READ(intel_dp->output_reg);
2233
2234         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2235
2236         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2237                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2238                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2239                         flags |= DRM_MODE_FLAG_PHSYNC;
2240                 else
2241                         flags |= DRM_MODE_FLAG_NHSYNC;
2242
2243                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2244                         flags |= DRM_MODE_FLAG_PVSYNC;
2245                 else
2246                         flags |= DRM_MODE_FLAG_NVSYNC;
2247         } else {
2248                 if (tmp & DP_SYNC_HS_HIGH)
2249                         flags |= DRM_MODE_FLAG_PHSYNC;
2250                 else
2251                         flags |= DRM_MODE_FLAG_NHSYNC;
2252
2253                 if (tmp & DP_SYNC_VS_HIGH)
2254                         flags |= DRM_MODE_FLAG_PVSYNC;
2255                 else
2256                         flags |= DRM_MODE_FLAG_NVSYNC;
2257         }
2258
2259         pipe_config->base.adjusted_mode.flags |= flags;
2260
2261         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2262             tmp & DP_COLOR_RANGE_16_235)
2263                 pipe_config->limited_color_range = true;
2264
2265         pipe_config->has_dp_encoder = true;
2266
2267         intel_dp_get_m_n(crtc, pipe_config);
2268
2269         if (port == PORT_A) {
2270                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2271                         pipe_config->port_clock = 162000;
2272                 else
2273                         pipe_config->port_clock = 270000;
2274         }
2275
2276         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2277                                             &pipe_config->dp_m_n);
2278
2279         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2280                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2281
2282         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2283
2284         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2285             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2286                 /*
2287                  * This is a big fat ugly hack.
2288                  *
2289                  * Some machines in UEFI boot mode provide us a VBT that has 18
2290                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2291                  * unknown we fail to light up. Yet the same BIOS boots up with
2292                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2293                  * max, not what it tells us to use.
2294                  *
2295                  * Note: This will still be broken if the eDP panel is not lit
2296                  * up by the BIOS, and thus we can't get the mode at module
2297                  * load.
2298                  */
2299                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2300                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2301                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2302         }
2303 }
2304
2305 static void intel_disable_dp(struct intel_encoder *encoder)
2306 {
2307         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2308         struct drm_device *dev = encoder->base.dev;
2309         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2310
2311         if (crtc->config->has_audio)
2312                 intel_audio_codec_disable(encoder);
2313
2314         if (HAS_PSR(dev) && !HAS_DDI(dev))
2315                 intel_psr_disable(intel_dp);
2316
2317         /* Make sure the panel is off before trying to change the mode. But also
2318          * ensure that we have vdd while we switch off the panel. */
2319         intel_edp_panel_vdd_on(intel_dp);
2320         intel_edp_backlight_off(intel_dp);
2321         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2322         intel_edp_panel_off(intel_dp);
2323
2324         /* disable the port before the pipe on g4x */
2325         if (INTEL_INFO(dev)->gen < 5)
2326                 intel_dp_link_down(intel_dp);
2327 }
2328
2329 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2330 {
2331         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2332         enum port port = dp_to_dig_port(intel_dp)->port;
2333
2334         intel_dp_link_down(intel_dp);
2335         if (port == PORT_A)
2336                 ironlake_edp_pll_off(intel_dp);
2337 }
2338
2339 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2340 {
2341         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2342
2343         intel_dp_link_down(intel_dp);
2344 }
2345
2346 static void chv_post_disable_dp(struct intel_encoder *encoder)
2347 {
2348         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2349         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2350         struct drm_device *dev = encoder->base.dev;
2351         struct drm_i915_private *dev_priv = dev->dev_private;
2352         struct intel_crtc *intel_crtc =
2353                 to_intel_crtc(encoder->base.crtc);
2354         enum dpio_channel ch = vlv_dport_to_channel(dport);
2355         enum pipe pipe = intel_crtc->pipe;
2356         u32 val;
2357
2358         intel_dp_link_down(intel_dp);
2359
2360         mutex_lock(&dev_priv->sb_lock);
2361
2362         /* Propagate soft reset to data lane reset */
2363         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2364         val |= CHV_PCS_REQ_SOFTRESET_EN;
2365         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2366
2367         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2368         val |= CHV_PCS_REQ_SOFTRESET_EN;
2369         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2370
2371         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2372         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2373         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2374
2375         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2376         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2377         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2378
2379         mutex_unlock(&dev_priv->sb_lock);
2380 }
2381
2382 static void
2383 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2384                          uint32_t *DP,
2385                          uint8_t dp_train_pat)
2386 {
2387         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2388         struct drm_device *dev = intel_dig_port->base.base.dev;
2389         struct drm_i915_private *dev_priv = dev->dev_private;
2390         enum port port = intel_dig_port->port;
2391
2392         if (HAS_DDI(dev)) {
2393                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2394
2395                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2396                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2397                 else
2398                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2399
2400                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2401                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2402                 case DP_TRAINING_PATTERN_DISABLE:
2403                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2404
2405                         break;
2406                 case DP_TRAINING_PATTERN_1:
2407                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2408                         break;
2409                 case DP_TRAINING_PATTERN_2:
2410                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2411                         break;
2412                 case DP_TRAINING_PATTERN_3:
2413                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2414                         break;
2415                 }
2416                 I915_WRITE(DP_TP_CTL(port), temp);
2417
2418         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2419                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2420                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2421
2422                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2423                 case DP_TRAINING_PATTERN_DISABLE:
2424                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2425                         break;
2426                 case DP_TRAINING_PATTERN_1:
2427                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2428                         break;
2429                 case DP_TRAINING_PATTERN_2:
2430                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2431                         break;
2432                 case DP_TRAINING_PATTERN_3:
2433                         DRM_ERROR("DP training pattern 3 not supported\n");
2434                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2435                         break;
2436                 }
2437
2438         } else {
2439                 if (IS_CHERRYVIEW(dev))
2440                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2441                 else
2442                         *DP &= ~DP_LINK_TRAIN_MASK;
2443
2444                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2445                 case DP_TRAINING_PATTERN_DISABLE:
2446                         *DP |= DP_LINK_TRAIN_OFF;
2447                         break;
2448                 case DP_TRAINING_PATTERN_1:
2449                         *DP |= DP_LINK_TRAIN_PAT_1;
2450                         break;
2451                 case DP_TRAINING_PATTERN_2:
2452                         *DP |= DP_LINK_TRAIN_PAT_2;
2453                         break;
2454                 case DP_TRAINING_PATTERN_3:
2455                         if (IS_CHERRYVIEW(dev)) {
2456                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2457                         } else {
2458                                 DRM_ERROR("DP training pattern 3 not supported\n");
2459                                 *DP |= DP_LINK_TRAIN_PAT_2;
2460                         }
2461                         break;
2462                 }
2463         }
2464 }
2465
2466 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2467 {
2468         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2469         struct drm_i915_private *dev_priv = dev->dev_private;
2470
2471         /* enable with pattern 1 (as per spec) */
2472         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2473                                  DP_TRAINING_PATTERN_1);
2474
2475         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2476         POSTING_READ(intel_dp->output_reg);
2477
2478         /*
2479          * Magic for VLV/CHV. We _must_ first set up the register
2480          * without actually enabling the port, and then do another
2481          * write to enable the port. Otherwise link training will
2482          * fail when the power sequencer is freshly used for this port.
2483          */
2484         intel_dp->DP |= DP_PORT_EN;
2485
2486         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2487         POSTING_READ(intel_dp->output_reg);
2488 }
2489
2490 static void intel_enable_dp(struct intel_encoder *encoder)
2491 {
2492         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2493         struct drm_device *dev = encoder->base.dev;
2494         struct drm_i915_private *dev_priv = dev->dev_private;
2495         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2496         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2497         unsigned int lane_mask = 0x0;
2498
2499         if (WARN_ON(dp_reg & DP_PORT_EN))
2500                 return;
2501
2502         pps_lock(intel_dp);
2503
2504         if (IS_VALLEYVIEW(dev))
2505                 vlv_init_panel_power_sequencer(intel_dp);
2506
2507         intel_dp_enable_port(intel_dp);
2508
2509         edp_panel_vdd_on(intel_dp);
2510         edp_panel_on(intel_dp);
2511         edp_panel_vdd_off(intel_dp, true);
2512
2513         pps_unlock(intel_dp);
2514
2515         if (IS_VALLEYVIEW(dev))
2516                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2517                                     lane_mask);
2518
2519         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2520         intel_dp_start_link_train(intel_dp);
2521         intel_dp_complete_link_train(intel_dp);
2522         intel_dp_stop_link_train(intel_dp);
2523
2524         if (crtc->config->has_audio) {
2525                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2526                                  pipe_name(crtc->pipe));
2527                 intel_audio_codec_enable(encoder);
2528         }
2529 }
2530
2531 static void g4x_enable_dp(struct intel_encoder *encoder)
2532 {
2533         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2534
2535         intel_enable_dp(encoder);
2536         intel_edp_backlight_on(intel_dp);
2537 }
2538
2539 static void vlv_enable_dp(struct intel_encoder *encoder)
2540 {
2541         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2542
2543         intel_edp_backlight_on(intel_dp);
2544         intel_psr_enable(intel_dp);
2545 }
2546
2547 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2548 {
2549         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2550         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2551
2552         intel_dp_prepare(encoder);
2553
2554         /* Only ilk+ has port A */
2555         if (dport->port == PORT_A) {
2556                 ironlake_set_pll_cpu_edp(intel_dp);
2557                 ironlake_edp_pll_on(intel_dp);
2558         }
2559 }
2560
2561 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2562 {
2563         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2564         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2565         enum pipe pipe = intel_dp->pps_pipe;
2566         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2567
2568         edp_panel_vdd_off_sync(intel_dp);
2569
2570         /*
2571          * VLV seems to get confused when multiple power seqeuencers
2572          * have the same port selected (even if only one has power/vdd
2573          * enabled). The failure manifests as vlv_wait_port_ready() failing
2574          * CHV on the other hand doesn't seem to mind having the same port
2575          * selected in multiple power seqeuencers, but let's clear the
2576          * port select always when logically disconnecting a power sequencer
2577          * from a port.
2578          */
2579         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2580                       pipe_name(pipe), port_name(intel_dig_port->port));
2581         I915_WRITE(pp_on_reg, 0);
2582         POSTING_READ(pp_on_reg);
2583
2584         intel_dp->pps_pipe = INVALID_PIPE;
2585 }
2586
2587 static void vlv_steal_power_sequencer(struct drm_device *dev,
2588                                       enum pipe pipe)
2589 {
2590         struct drm_i915_private *dev_priv = dev->dev_private;
2591         struct intel_encoder *encoder;
2592
2593         lockdep_assert_held(&dev_priv->pps_mutex);
2594
2595         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2596                 return;
2597
2598         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2599                             base.head) {
2600                 struct intel_dp *intel_dp;
2601                 enum port port;
2602
2603                 if (encoder->type != INTEL_OUTPUT_EDP)
2604                         continue;
2605
2606                 intel_dp = enc_to_intel_dp(&encoder->base);
2607                 port = dp_to_dig_port(intel_dp)->port;
2608
2609                 if (intel_dp->pps_pipe != pipe)
2610                         continue;
2611
2612                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2613                               pipe_name(pipe), port_name(port));
2614
2615                 WARN(encoder->connectors_active,
2616                      "stealing pipe %c power sequencer from active eDP port %c\n",
2617                      pipe_name(pipe), port_name(port));
2618
2619                 /* make sure vdd is off before we steal it */
2620                 vlv_detach_power_sequencer(intel_dp);
2621         }
2622 }
2623
2624 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2625 {
2626         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2627         struct intel_encoder *encoder = &intel_dig_port->base;
2628         struct drm_device *dev = encoder->base.dev;
2629         struct drm_i915_private *dev_priv = dev->dev_private;
2630         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2631
2632         lockdep_assert_held(&dev_priv->pps_mutex);
2633
2634         if (!is_edp(intel_dp))
2635                 return;
2636
2637         if (intel_dp->pps_pipe == crtc->pipe)
2638                 return;
2639
2640         /*
2641          * If another power sequencer was being used on this
2642          * port previously make sure to turn off vdd there while
2643          * we still have control of it.
2644          */
2645         if (intel_dp->pps_pipe != INVALID_PIPE)
2646                 vlv_detach_power_sequencer(intel_dp);
2647
2648         /*
2649          * We may be stealing the power
2650          * sequencer from another port.
2651          */
2652         vlv_steal_power_sequencer(dev, crtc->pipe);
2653
2654         /* now it's all ours */
2655         intel_dp->pps_pipe = crtc->pipe;
2656
2657         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2658                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2659
2660         /* init power sequencer on this pipe and port */
2661         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2662         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2663 }
2664
2665 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2666 {
2667         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2668         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2669         struct drm_device *dev = encoder->base.dev;
2670         struct drm_i915_private *dev_priv = dev->dev_private;
2671         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2672         enum dpio_channel port = vlv_dport_to_channel(dport);
2673         int pipe = intel_crtc->pipe;
2674         u32 val;
2675
2676         mutex_lock(&dev_priv->sb_lock);
2677
2678         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2679         val = 0;
2680         if (pipe)
2681                 val |= (1<<21);
2682         else
2683                 val &= ~(1<<21);
2684         val |= 0x001000c4;
2685         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2686         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2687         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2688
2689         mutex_unlock(&dev_priv->sb_lock);
2690
2691         intel_enable_dp(encoder);
2692 }
2693
2694 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2695 {
2696         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2697         struct drm_device *dev = encoder->base.dev;
2698         struct drm_i915_private *dev_priv = dev->dev_private;
2699         struct intel_crtc *intel_crtc =
2700                 to_intel_crtc(encoder->base.crtc);
2701         enum dpio_channel port = vlv_dport_to_channel(dport);
2702         int pipe = intel_crtc->pipe;
2703
2704         intel_dp_prepare(encoder);
2705
2706         /* Program Tx lane resets to default */
2707         mutex_lock(&dev_priv->sb_lock);
2708         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2709                          DPIO_PCS_TX_LANE2_RESET |
2710                          DPIO_PCS_TX_LANE1_RESET);
2711         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2712                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2713                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2714                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2715                                  DPIO_PCS_CLK_SOFT_RESET);
2716
2717         /* Fix up inter-pair skew failure */
2718         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2719         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2720         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2721         mutex_unlock(&dev_priv->sb_lock);
2722 }
2723
2724 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2725 {
2726         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2727         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2728         struct drm_device *dev = encoder->base.dev;
2729         struct drm_i915_private *dev_priv = dev->dev_private;
2730         struct intel_crtc *intel_crtc =
2731                 to_intel_crtc(encoder->base.crtc);
2732         enum dpio_channel ch = vlv_dport_to_channel(dport);
2733         int pipe = intel_crtc->pipe;
2734         int data, i, stagger;
2735         u32 val;
2736
2737         mutex_lock(&dev_priv->sb_lock);
2738
2739         /* allow hardware to manage TX FIFO reset source */
2740         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2741         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2742         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2743
2744         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2745         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2746         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2747
2748         /* Deassert soft data lane reset*/
2749         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2750         val |= CHV_PCS_REQ_SOFTRESET_EN;
2751         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2752
2753         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2754         val |= CHV_PCS_REQ_SOFTRESET_EN;
2755         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2756
2757         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2758         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2759         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2760
2761         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2762         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2763         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2764
2765         /* Program Tx lane latency optimal setting*/
2766         for (i = 0; i < 4; i++) {
2767                 /* Set the upar bit */
2768                 data = (i == 1) ? 0x0 : 0x1;
2769                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2770                                 data << DPIO_UPAR_SHIFT);
2771         }
2772
2773         /* Data lane stagger programming */
2774         if (intel_crtc->config->port_clock > 270000)
2775                 stagger = 0x18;
2776         else if (intel_crtc->config->port_clock > 135000)
2777                 stagger = 0xd;
2778         else if (intel_crtc->config->port_clock > 67500)
2779                 stagger = 0x7;
2780         else if (intel_crtc->config->port_clock > 33750)
2781                 stagger = 0x4;
2782         else
2783                 stagger = 0x2;
2784
2785         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2786         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2787         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2788
2789         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2790         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2791         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2792
2793         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2794                        DPIO_LANESTAGGER_STRAP(stagger) |
2795                        DPIO_LANESTAGGER_STRAP_OVRD |
2796                        DPIO_TX1_STAGGER_MASK(0x1f) |
2797                        DPIO_TX1_STAGGER_MULT(6) |
2798                        DPIO_TX2_STAGGER_MULT(0));
2799
2800         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2801                        DPIO_LANESTAGGER_STRAP(stagger) |
2802                        DPIO_LANESTAGGER_STRAP_OVRD |
2803                        DPIO_TX1_STAGGER_MASK(0x1f) |
2804                        DPIO_TX1_STAGGER_MULT(7) |
2805                        DPIO_TX2_STAGGER_MULT(5));
2806
2807         mutex_unlock(&dev_priv->sb_lock);
2808
2809         intel_enable_dp(encoder);
2810 }
2811
2812 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2813 {
2814         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2815         struct drm_device *dev = encoder->base.dev;
2816         struct drm_i915_private *dev_priv = dev->dev_private;
2817         struct intel_crtc *intel_crtc =
2818                 to_intel_crtc(encoder->base.crtc);
2819         enum dpio_channel ch = vlv_dport_to_channel(dport);
2820         enum pipe pipe = intel_crtc->pipe;
2821         u32 val;
2822
2823         intel_dp_prepare(encoder);
2824
2825         mutex_lock(&dev_priv->sb_lock);
2826
2827         /* program left/right clock distribution */
2828         if (pipe != PIPE_B) {
2829                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2830                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2831                 if (ch == DPIO_CH0)
2832                         val |= CHV_BUFLEFTENA1_FORCE;
2833                 if (ch == DPIO_CH1)
2834                         val |= CHV_BUFRIGHTENA1_FORCE;
2835                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2836         } else {
2837                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2838                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2839                 if (ch == DPIO_CH0)
2840                         val |= CHV_BUFLEFTENA2_FORCE;
2841                 if (ch == DPIO_CH1)
2842                         val |= CHV_BUFRIGHTENA2_FORCE;
2843                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2844         }
2845
2846         /* program clock channel usage */
2847         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2848         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2849         if (pipe != PIPE_B)
2850                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2851         else
2852                 val |= CHV_PCS_USEDCLKCHANNEL;
2853         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2854
2855         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2856         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2857         if (pipe != PIPE_B)
2858                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2859         else
2860                 val |= CHV_PCS_USEDCLKCHANNEL;
2861         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2862
2863         /*
2864          * This a a bit weird since generally CL
2865          * matches the pipe, but here we need to
2866          * pick the CL based on the port.
2867          */
2868         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2869         if (pipe != PIPE_B)
2870                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2871         else
2872                 val |= CHV_CMN_USEDCLKCHANNEL;
2873         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2874
2875         mutex_unlock(&dev_priv->sb_lock);
2876 }
2877
2878 /*
2879  * Native read with retry for link status and receiver capability reads for
2880  * cases where the sink may still be asleep.
2881  *
2882  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2883  * supposed to retry 3 times per the spec.
2884  */
2885 static ssize_t
2886 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2887                         void *buffer, size_t size)
2888 {
2889         ssize_t ret;
2890         int i;
2891
2892         /*
2893          * Sometime we just get the same incorrect byte repeated
2894          * over the entire buffer. Doing just one throw away read
2895          * initially seems to "solve" it.
2896          */
2897         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2898
2899         for (i = 0; i < 3; i++) {
2900                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2901                 if (ret == size)
2902                         return ret;
2903                 msleep(1);
2904         }
2905
2906         return ret;
2907 }
2908
2909 /*
2910  * Fetch AUX CH registers 0x202 - 0x207 which contain
2911  * link status information
2912  */
2913 static bool
2914 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2915 {
2916         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2917                                        DP_LANE0_1_STATUS,
2918                                        link_status,
2919                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2920 }
2921
2922 /* These are source-specific values. */
2923 static uint8_t
2924 intel_dp_voltage_max(struct intel_dp *intel_dp)
2925 {
2926         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2927         struct drm_i915_private *dev_priv = dev->dev_private;
2928         enum port port = dp_to_dig_port(intel_dp)->port;
2929
2930         if (IS_BROXTON(dev))
2931                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2932         else if (INTEL_INFO(dev)->gen >= 9) {
2933                 if (dev_priv->edp_low_vswing && port == PORT_A)
2934                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2935                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2936         } else if (IS_VALLEYVIEW(dev))
2937                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2938         else if (IS_GEN7(dev) && port == PORT_A)
2939                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2940         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2941                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2942         else
2943                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2944 }
2945
2946 static uint8_t
2947 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2948 {
2949         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2950         enum port port = dp_to_dig_port(intel_dp)->port;
2951
2952         if (INTEL_INFO(dev)->gen >= 9) {
2953                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2954                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2955                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2956                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2957                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2958                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2959                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2960                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2961                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2962                 default:
2963                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2964                 }
2965         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2966                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2967                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2968                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2969                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2970                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2971                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2972                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2973                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2974                 default:
2975                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2976                 }
2977         } else if (IS_VALLEYVIEW(dev)) {
2978                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2979                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2980                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2981                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2982                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2983                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2984                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2985                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2986                 default:
2987                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2988                 }
2989         } else if (IS_GEN7(dev) && port == PORT_A) {
2990                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2991                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2992                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2993                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2994                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2995                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2996                 default:
2997                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2998                 }
2999         } else {
3000                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3001                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3002                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3003                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3004                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3005                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3006                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3007                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3008                 default:
3009                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3010                 }
3011         }
3012 }
3013
3014 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3015 {
3016         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3017         struct drm_i915_private *dev_priv = dev->dev_private;
3018         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3019         struct intel_crtc *intel_crtc =
3020                 to_intel_crtc(dport->base.base.crtc);
3021         unsigned long demph_reg_value, preemph_reg_value,
3022                 uniqtranscale_reg_value;
3023         uint8_t train_set = intel_dp->train_set[0];
3024         enum dpio_channel port = vlv_dport_to_channel(dport);
3025         int pipe = intel_crtc->pipe;
3026
3027         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3028         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3029                 preemph_reg_value = 0x0004000;
3030                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3031                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3032                         demph_reg_value = 0x2B405555;
3033                         uniqtranscale_reg_value = 0x552AB83A;
3034                         break;
3035                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3036                         demph_reg_value = 0x2B404040;
3037                         uniqtranscale_reg_value = 0x5548B83A;
3038                         break;
3039                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3040                         demph_reg_value = 0x2B245555;
3041                         uniqtranscale_reg_value = 0x5560B83A;
3042                         break;
3043                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3044                         demph_reg_value = 0x2B405555;
3045                         uniqtranscale_reg_value = 0x5598DA3A;
3046                         break;
3047                 default:
3048                         return 0;
3049                 }
3050                 break;
3051         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3052                 preemph_reg_value = 0x0002000;
3053                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3054                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3055                         demph_reg_value = 0x2B404040;
3056                         uniqtranscale_reg_value = 0x5552B83A;
3057                         break;
3058                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3059                         demph_reg_value = 0x2B404848;
3060                         uniqtranscale_reg_value = 0x5580B83A;
3061                         break;
3062                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3063                         demph_reg_value = 0x2B404040;
3064                         uniqtranscale_reg_value = 0x55ADDA3A;
3065                         break;
3066                 default:
3067                         return 0;
3068                 }
3069                 break;
3070         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3071                 preemph_reg_value = 0x0000000;
3072                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3073                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3074                         demph_reg_value = 0x2B305555;
3075                         uniqtranscale_reg_value = 0x5570B83A;
3076                         break;
3077                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3078                         demph_reg_value = 0x2B2B4040;
3079                         uniqtranscale_reg_value = 0x55ADDA3A;
3080                         break;
3081                 default:
3082                         return 0;
3083                 }
3084                 break;
3085         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3086                 preemph_reg_value = 0x0006000;
3087                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3088                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3089                         demph_reg_value = 0x1B405555;
3090                         uniqtranscale_reg_value = 0x55ADDA3A;
3091                         break;
3092                 default:
3093                         return 0;
3094                 }
3095                 break;
3096         default:
3097                 return 0;
3098         }
3099
3100         mutex_lock(&dev_priv->sb_lock);
3101         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3102         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3103         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3104                          uniqtranscale_reg_value);
3105         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3106         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3107         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3108         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3109         mutex_unlock(&dev_priv->sb_lock);
3110
3111         return 0;
3112 }
3113
3114 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3115 {
3116         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3117         struct drm_i915_private *dev_priv = dev->dev_private;
3118         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3119         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3120         u32 deemph_reg_value, margin_reg_value, val;
3121         uint8_t train_set = intel_dp->train_set[0];
3122         enum dpio_channel ch = vlv_dport_to_channel(dport);
3123         enum pipe pipe = intel_crtc->pipe;
3124         int i;
3125
3126         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3127         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3128                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3129                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3130                         deemph_reg_value = 128;
3131                         margin_reg_value = 52;
3132                         break;
3133                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3134                         deemph_reg_value = 128;
3135                         margin_reg_value = 77;
3136                         break;
3137                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3138                         deemph_reg_value = 128;
3139                         margin_reg_value = 102;
3140                         break;
3141                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3142                         deemph_reg_value = 128;
3143                         margin_reg_value = 154;
3144                         /* FIXME extra to set for 1200 */
3145                         break;
3146                 default:
3147                         return 0;
3148                 }
3149                 break;
3150         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3151                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3152                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3153                         deemph_reg_value = 85;
3154                         margin_reg_value = 78;
3155                         break;
3156                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3157                         deemph_reg_value = 85;
3158                         margin_reg_value = 116;
3159                         break;
3160                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3161                         deemph_reg_value = 85;
3162                         margin_reg_value = 154;
3163                         break;
3164                 default:
3165                         return 0;
3166                 }
3167                 break;
3168         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3169                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3170                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3171                         deemph_reg_value = 64;
3172                         margin_reg_value = 104;
3173                         break;
3174                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3175                         deemph_reg_value = 64;
3176                         margin_reg_value = 154;
3177                         break;
3178                 default:
3179                         return 0;
3180                 }
3181                 break;
3182         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3183                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3184                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3185                         deemph_reg_value = 43;
3186                         margin_reg_value = 154;
3187                         break;
3188                 default:
3189                         return 0;
3190                 }
3191                 break;
3192         default:
3193                 return 0;
3194         }
3195
3196         mutex_lock(&dev_priv->sb_lock);
3197
3198         /* Clear calc init */
3199         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3200         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3201         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3202         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3203         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3204
3205         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3206         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3207         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3208         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3209         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3210
3211         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3212         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3213         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3214         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3215
3216         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3217         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3218         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3219         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3220
3221         /* Program swing deemph */
3222         for (i = 0; i < 4; i++) {
3223                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3224                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3225                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3226                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3227         }
3228
3229         /* Program swing margin */
3230         for (i = 0; i < 4; i++) {
3231                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3232                 val &= ~DPIO_SWING_MARGIN000_MASK;
3233                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3234                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3235         }
3236
3237         /* Disable unique transition scale */
3238         for (i = 0; i < 4; i++) {
3239                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3240                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3241                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3242         }
3243
3244         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3245                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3246                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3247                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3248
3249                 /*
3250                  * The document said it needs to set bit 27 for ch0 and bit 26
3251                  * for ch1. Might be a typo in the doc.
3252                  * For now, for this unique transition scale selection, set bit
3253                  * 27 for ch0 and ch1.
3254                  */
3255                 for (i = 0; i < 4; i++) {
3256                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3257                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3258                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3259                 }
3260
3261                 for (i = 0; i < 4; i++) {
3262                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3263                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3264                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3265                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3266                 }
3267         }
3268
3269         /* Start swing calculation */
3270         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3271         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3272         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3273
3274         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3275         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3276         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3277
3278         /* LRC Bypass */
3279         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3280         val |= DPIO_LRC_BYPASS;
3281         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3282
3283         mutex_unlock(&dev_priv->sb_lock);
3284
3285         return 0;
3286 }
3287
3288 static void
3289 intel_get_adjust_train(struct intel_dp *intel_dp,
3290                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3291 {
3292         uint8_t v = 0;
3293         uint8_t p = 0;
3294         int lane;
3295         uint8_t voltage_max;
3296         uint8_t preemph_max;
3297
3298         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3299                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3300                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3301
3302                 if (this_v > v)
3303                         v = this_v;
3304                 if (this_p > p)
3305                         p = this_p;
3306         }
3307
3308         voltage_max = intel_dp_voltage_max(intel_dp);
3309         if (v >= voltage_max)
3310                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3311
3312         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3313         if (p >= preemph_max)
3314                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3315
3316         for (lane = 0; lane < 4; lane++)
3317                 intel_dp->train_set[lane] = v | p;
3318 }
3319
3320 static uint32_t
3321 gen4_signal_levels(uint8_t train_set)
3322 {
3323         uint32_t        signal_levels = 0;
3324
3325         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3326         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3327         default:
3328                 signal_levels |= DP_VOLTAGE_0_4;
3329                 break;
3330         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3331                 signal_levels |= DP_VOLTAGE_0_6;
3332                 break;
3333         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3334                 signal_levels |= DP_VOLTAGE_0_8;
3335                 break;
3336         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3337                 signal_levels |= DP_VOLTAGE_1_2;
3338                 break;
3339         }
3340         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3341         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3342         default:
3343                 signal_levels |= DP_PRE_EMPHASIS_0;
3344                 break;
3345         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3346                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3347                 break;
3348         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3349                 signal_levels |= DP_PRE_EMPHASIS_6;
3350                 break;
3351         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3352                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3353                 break;
3354         }
3355         return signal_levels;
3356 }
3357
3358 /* Gen6's DP voltage swing and pre-emphasis control */
3359 static uint32_t
3360 gen6_edp_signal_levels(uint8_t train_set)
3361 {
3362         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3363                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3364         switch (signal_levels) {
3365         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3366         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3367                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3368         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3369                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3370         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3371         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3372                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3373         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3374         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3375                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3376         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3377         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3378                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3379         default:
3380                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3381                               "0x%x\n", signal_levels);
3382                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3383         }
3384 }
3385
3386 /* Gen7's DP voltage swing and pre-emphasis control */
3387 static uint32_t
3388 gen7_edp_signal_levels(uint8_t train_set)
3389 {
3390         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3391                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3392         switch (signal_levels) {
3393         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3394                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3395         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3396                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3397         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3398                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3399
3400         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3401                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3402         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3403                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3404
3405         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3406                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3407         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3408                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3409
3410         default:
3411                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3412                               "0x%x\n", signal_levels);
3413                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3414         }
3415 }
3416
3417 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3418 static uint32_t
3419 hsw_signal_levels(uint8_t train_set)
3420 {
3421         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3422                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3423         switch (signal_levels) {
3424         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3425                 return DDI_BUF_TRANS_SELECT(0);
3426         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3427                 return DDI_BUF_TRANS_SELECT(1);
3428         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3429                 return DDI_BUF_TRANS_SELECT(2);
3430         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3431                 return DDI_BUF_TRANS_SELECT(3);
3432
3433         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3434                 return DDI_BUF_TRANS_SELECT(4);
3435         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3436                 return DDI_BUF_TRANS_SELECT(5);
3437         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3438                 return DDI_BUF_TRANS_SELECT(6);
3439
3440         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3441                 return DDI_BUF_TRANS_SELECT(7);
3442         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3443                 return DDI_BUF_TRANS_SELECT(8);
3444
3445         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3446                 return DDI_BUF_TRANS_SELECT(9);
3447         default:
3448                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3449                               "0x%x\n", signal_levels);
3450                 return DDI_BUF_TRANS_SELECT(0);
3451         }
3452 }
3453
3454 static void bxt_signal_levels(struct intel_dp *intel_dp)
3455 {
3456         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3457         enum port port = dport->port;
3458         struct drm_device *dev = dport->base.base.dev;
3459         struct intel_encoder *encoder = &dport->base;
3460         uint8_t train_set = intel_dp->train_set[0];
3461         uint32_t level = 0;
3462
3463         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3464                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3465         switch (signal_levels) {
3466         default:
3467                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3468         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3469                 level = 0;
3470                 break;
3471         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3472                 level = 1;
3473                 break;
3474         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3475                 level = 2;
3476                 break;
3477         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3478                 level = 3;
3479                 break;
3480         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3481                 level = 4;
3482                 break;
3483         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3484                 level = 5;
3485                 break;
3486         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3487                 level = 6;
3488                 break;
3489         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3490                 level = 7;
3491                 break;
3492         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3493                 level = 8;
3494                 break;
3495         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3496                 level = 9;
3497                 break;
3498         }
3499
3500         bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3501 }
3502
3503 /* Properly updates "DP" with the correct signal levels. */
3504 static void
3505 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3506 {
3507         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3508         enum port port = intel_dig_port->port;
3509         struct drm_device *dev = intel_dig_port->base.base.dev;
3510         uint32_t signal_levels, mask;
3511         uint8_t train_set = intel_dp->train_set[0];
3512
3513         if (IS_BROXTON(dev)) {
3514                 signal_levels = 0;
3515                 bxt_signal_levels(intel_dp);
3516                 mask = 0;
3517         } else if (HAS_DDI(dev)) {
3518                 signal_levels = hsw_signal_levels(train_set);
3519                 mask = DDI_BUF_EMP_MASK;
3520         } else if (IS_CHERRYVIEW(dev)) {
3521                 signal_levels = chv_signal_levels(intel_dp);
3522                 mask = 0;
3523         } else if (IS_VALLEYVIEW(dev)) {
3524                 signal_levels = vlv_signal_levels(intel_dp);
3525                 mask = 0;
3526         } else if (IS_GEN7(dev) && port == PORT_A) {
3527                 signal_levels = gen7_edp_signal_levels(train_set);
3528                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3529         } else if (IS_GEN6(dev) && port == PORT_A) {
3530                 signal_levels = gen6_edp_signal_levels(train_set);
3531                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3532         } else {
3533                 signal_levels = gen4_signal_levels(train_set);
3534                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3535         }
3536
3537         if (mask)
3538                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3539
3540         DRM_DEBUG_KMS("Using vswing level %d\n",
3541                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3542         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3543                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3544                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3545
3546         *DP = (*DP & ~mask) | signal_levels;
3547 }
3548
3549 static bool
3550 intel_dp_set_link_train(struct intel_dp *intel_dp,
3551                         uint32_t *DP,
3552                         uint8_t dp_train_pat)
3553 {
3554         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3555         struct drm_device *dev = intel_dig_port->base.base.dev;
3556         struct drm_i915_private *dev_priv = dev->dev_private;
3557         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3558         int ret, len;
3559
3560         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3561
3562         I915_WRITE(intel_dp->output_reg, *DP);
3563         POSTING_READ(intel_dp->output_reg);
3564
3565         buf[0] = dp_train_pat;
3566         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3567             DP_TRAINING_PATTERN_DISABLE) {
3568                 /* don't write DP_TRAINING_LANEx_SET on disable */
3569                 len = 1;
3570         } else {
3571                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3572                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3573                 len = intel_dp->lane_count + 1;
3574         }
3575
3576         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3577                                 buf, len);
3578
3579         return ret == len;
3580 }
3581
3582 static bool
3583 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3584                         uint8_t dp_train_pat)
3585 {
3586         if (!intel_dp->train_set_valid)
3587                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3588         intel_dp_set_signal_levels(intel_dp, DP);
3589         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3590 }
3591
3592 static bool
3593 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3594                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3595 {
3596         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3597         struct drm_device *dev = intel_dig_port->base.base.dev;
3598         struct drm_i915_private *dev_priv = dev->dev_private;
3599         int ret;
3600
3601         intel_get_adjust_train(intel_dp, link_status);
3602         intel_dp_set_signal_levels(intel_dp, DP);
3603
3604         I915_WRITE(intel_dp->output_reg, *DP);
3605         POSTING_READ(intel_dp->output_reg);
3606
3607         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3608                                 intel_dp->train_set, intel_dp->lane_count);
3609
3610         return ret == intel_dp->lane_count;
3611 }
3612
3613 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3614 {
3615         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3616         struct drm_device *dev = intel_dig_port->base.base.dev;
3617         struct drm_i915_private *dev_priv = dev->dev_private;
3618         enum port port = intel_dig_port->port;
3619         uint32_t val;
3620
3621         if (!HAS_DDI(dev))
3622                 return;
3623
3624         val = I915_READ(DP_TP_CTL(port));
3625         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3626         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3627         I915_WRITE(DP_TP_CTL(port), val);
3628
3629         /*
3630          * On PORT_A we can have only eDP in SST mode. There the only reason
3631          * we need to set idle transmission mode is to work around a HW issue
3632          * where we enable the pipe while not in idle link-training mode.
3633          * In this case there is requirement to wait for a minimum number of
3634          * idle patterns to be sent.
3635          */
3636         if (port == PORT_A)
3637                 return;
3638
3639         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3640                      1))
3641                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3642 }
3643
3644 /* Enable corresponding port and start training pattern 1 */
3645 void
3646 intel_dp_start_link_train(struct intel_dp *intel_dp)
3647 {
3648         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3649         struct drm_device *dev = encoder->dev;
3650         int i;
3651         uint8_t voltage;
3652         int voltage_tries, loop_tries;
3653         uint32_t DP = intel_dp->DP;
3654         uint8_t link_config[2];
3655
3656         if (HAS_DDI(dev))
3657                 intel_ddi_prepare_link_retrain(encoder);
3658
3659         /* Write the link configuration data */
3660         link_config[0] = intel_dp->link_bw;
3661         link_config[1] = intel_dp->lane_count;
3662         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3663                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3664         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3665         if (intel_dp->num_sink_rates)
3666                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3667                                 &intel_dp->rate_select, 1);
3668
3669         link_config[0] = 0;
3670         link_config[1] = DP_SET_ANSI_8B10B;
3671         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3672
3673         DP |= DP_PORT_EN;
3674
3675         /* clock recovery */
3676         if (!intel_dp_reset_link_train(intel_dp, &DP,
3677                                        DP_TRAINING_PATTERN_1 |
3678                                        DP_LINK_SCRAMBLING_DISABLE)) {
3679                 DRM_ERROR("failed to enable link training\n");
3680                 return;
3681         }
3682
3683         voltage = 0xff;
3684         voltage_tries = 0;
3685         loop_tries = 0;
3686         for (;;) {
3687                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3688
3689                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3690                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3691                         DRM_ERROR("failed to get link status\n");
3692                         break;
3693                 }
3694
3695                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3696                         DRM_DEBUG_KMS("clock recovery OK\n");
3697                         break;
3698                 }
3699
3700                 /*
3701                  * if we used previously trained voltage and pre-emphasis values
3702                  * and we don't get clock recovery, reset link training values
3703                  */
3704                 if (intel_dp->train_set_valid) {
3705                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3706                         /* clear the flag as we are not reusing train set */
3707                         intel_dp->train_set_valid = false;
3708                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3709                                                        DP_TRAINING_PATTERN_1 |
3710                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3711                                 DRM_ERROR("failed to enable link training\n");
3712                                 return;
3713                         }
3714                         continue;
3715                 }
3716
3717                 /* Check to see if we've tried the max voltage */
3718                 for (i = 0; i < intel_dp->lane_count; i++)
3719                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3720                                 break;
3721                 if (i == intel_dp->lane_count) {
3722                         ++loop_tries;
3723                         if (loop_tries == 5) {
3724                                 DRM_ERROR("too many full retries, give up\n");
3725                                 break;
3726                         }
3727                         intel_dp_reset_link_train(intel_dp, &DP,
3728                                                   DP_TRAINING_PATTERN_1 |
3729                                                   DP_LINK_SCRAMBLING_DISABLE);
3730                         voltage_tries = 0;
3731                         continue;
3732                 }
3733
3734                 /* Check to see if we've tried the same voltage 5 times */
3735                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3736                         ++voltage_tries;
3737                         if (voltage_tries == 5) {
3738                                 DRM_ERROR("too many voltage retries, give up\n");
3739                                 break;
3740                         }
3741                 } else
3742                         voltage_tries = 0;
3743                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3744
3745                 /* Update training set as requested by target */
3746                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3747                         DRM_ERROR("failed to update link training\n");
3748                         break;
3749                 }
3750         }
3751
3752         intel_dp->DP = DP;
3753 }
3754
3755 void
3756 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3757 {
3758         bool channel_eq = false;
3759         int tries, cr_tries;
3760         uint32_t DP = intel_dp->DP;
3761         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3762
3763         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3764         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3765                 training_pattern = DP_TRAINING_PATTERN_3;
3766
3767         /* channel equalization */
3768         if (!intel_dp_set_link_train(intel_dp, &DP,
3769                                      training_pattern |
3770                                      DP_LINK_SCRAMBLING_DISABLE)) {
3771                 DRM_ERROR("failed to start channel equalization\n");
3772                 return;
3773         }
3774
3775         tries = 0;
3776         cr_tries = 0;
3777         channel_eq = false;
3778         for (;;) {
3779                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3780
3781                 if (cr_tries > 5) {
3782                         DRM_ERROR("failed to train DP, aborting\n");
3783                         break;
3784                 }
3785
3786                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3787                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3788                         DRM_ERROR("failed to get link status\n");
3789                         break;
3790                 }
3791
3792                 /* Make sure clock is still ok */
3793                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3794                         intel_dp->train_set_valid = false;
3795                         intel_dp_start_link_train(intel_dp);
3796                         intel_dp_set_link_train(intel_dp, &DP,
3797                                                 training_pattern |
3798                                                 DP_LINK_SCRAMBLING_DISABLE);
3799                         cr_tries++;
3800                         continue;
3801                 }
3802
3803                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3804                         channel_eq = true;
3805                         break;
3806                 }
3807
3808                 /* Try 5 times, then try clock recovery if that fails */
3809                 if (tries > 5) {
3810                         intel_dp->train_set_valid = false;
3811                         intel_dp_start_link_train(intel_dp);
3812                         intel_dp_set_link_train(intel_dp, &DP,
3813                                                 training_pattern |
3814                                                 DP_LINK_SCRAMBLING_DISABLE);
3815                         tries = 0;
3816                         cr_tries++;
3817                         continue;
3818                 }
3819
3820                 /* Update training set as requested by target */
3821                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3822                         DRM_ERROR("failed to update link training\n");
3823                         break;
3824                 }
3825                 ++tries;
3826         }
3827
3828         intel_dp_set_idle_link_train(intel_dp);
3829
3830         intel_dp->DP = DP;
3831
3832         if (channel_eq) {
3833                 intel_dp->train_set_valid = true;
3834                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3835         }
3836 }
3837
3838 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3839 {
3840         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3841                                 DP_TRAINING_PATTERN_DISABLE);
3842 }
3843
3844 static void
3845 intel_dp_link_down(struct intel_dp *intel_dp)
3846 {
3847         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3848         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3849         enum port port = intel_dig_port->port;
3850         struct drm_device *dev = intel_dig_port->base.base.dev;
3851         struct drm_i915_private *dev_priv = dev->dev_private;
3852         uint32_t DP = intel_dp->DP;
3853
3854         if (WARN_ON(HAS_DDI(dev)))
3855                 return;
3856
3857         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3858                 return;
3859
3860         DRM_DEBUG_KMS("\n");
3861
3862         if ((IS_GEN7(dev) && port == PORT_A) ||
3863             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3864                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3865                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3866         } else {
3867                 if (IS_CHERRYVIEW(dev))
3868                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3869                 else
3870                         DP &= ~DP_LINK_TRAIN_MASK;
3871                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3872         }
3873         I915_WRITE(intel_dp->output_reg, DP);
3874         POSTING_READ(intel_dp->output_reg);
3875
3876         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3877         I915_WRITE(intel_dp->output_reg, DP);
3878         POSTING_READ(intel_dp->output_reg);
3879
3880         /*
3881          * HW workaround for IBX, we need to move the port
3882          * to transcoder A after disabling it to allow the
3883          * matching HDMI port to be enabled on transcoder A.
3884          */
3885         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3886                 /* always enable with pattern 1 (as per spec) */
3887                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3888                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3889                 I915_WRITE(intel_dp->output_reg, DP);
3890                 POSTING_READ(intel_dp->output_reg);
3891
3892                 DP &= ~DP_PORT_EN;
3893                 I915_WRITE(intel_dp->output_reg, DP);
3894                 POSTING_READ(intel_dp->output_reg);
3895         }
3896
3897         msleep(intel_dp->panel_power_down_delay);
3898 }
3899
3900 static bool
3901 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3902 {
3903         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3904         struct drm_device *dev = dig_port->base.base.dev;
3905         struct drm_i915_private *dev_priv = dev->dev_private;
3906         uint8_t rev;
3907
3908         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3909                                     sizeof(intel_dp->dpcd)) < 0)
3910                 return false; /* aux transfer failed */
3911
3912         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3913
3914         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3915                 return false; /* DPCD not present */
3916
3917         /* Check if the panel supports PSR */
3918         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3919         if (is_edp(intel_dp)) {
3920                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3921                                         intel_dp->psr_dpcd,
3922                                         sizeof(intel_dp->psr_dpcd));
3923                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3924                         dev_priv->psr.sink_support = true;
3925                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3926                 }
3927
3928                 if (INTEL_INFO(dev)->gen >= 9 &&
3929                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3930                         uint8_t frame_sync_cap;
3931
3932                         dev_priv->psr.sink_support = true;
3933                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3934                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3935                                         &frame_sync_cap, 1);
3936                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3937                         /* PSR2 needs frame sync as well */
3938                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3939                         DRM_DEBUG_KMS("PSR2 %s on sink",
3940                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3941                 }
3942         }
3943
3944         /* Training Pattern 3 support, both source and sink */
3945         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3946             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3947             (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3948                 intel_dp->use_tps3 = true;
3949                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3950         } else
3951                 intel_dp->use_tps3 = false;
3952
3953         /* Intermediate frequency support */
3954         if (is_edp(intel_dp) &&
3955             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3956             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3957             (rev >= 0x03)) { /* eDp v1.4 or higher */
3958                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3959                 int i;
3960
3961                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3962                                 DP_SUPPORTED_LINK_RATES,
3963                                 sink_rates,
3964                                 sizeof(sink_rates));
3965
3966                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3967                         int val = le16_to_cpu(sink_rates[i]);
3968
3969                         if (val == 0)
3970                                 break;
3971
3972                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3973                         intel_dp->sink_rates[i] = (val * 200) / 10;
3974                 }
3975                 intel_dp->num_sink_rates = i;
3976         }
3977
3978         intel_dp_print_rates(intel_dp);
3979
3980         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3981               DP_DWN_STRM_PORT_PRESENT))
3982                 return true; /* native DP sink */
3983
3984         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3985                 return true; /* no per-port downstream info */
3986
3987         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3988                                     intel_dp->downstream_ports,
3989                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3990                 return false; /* downstream port status fetch failed */
3991
3992         return true;
3993 }
3994
3995 static void
3996 intel_dp_probe_oui(struct intel_dp *intel_dp)
3997 {
3998         u8 buf[3];
3999
4000         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4001                 return;
4002
4003         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4004                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4005                               buf[0], buf[1], buf[2]);
4006
4007         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4008                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4009                               buf[0], buf[1], buf[2]);
4010 }
4011
4012 static bool
4013 intel_dp_probe_mst(struct intel_dp *intel_dp)
4014 {
4015         u8 buf[1];
4016
4017         if (!intel_dp->can_mst)
4018                 return false;
4019
4020         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4021                 return false;
4022
4023         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4024                 if (buf[0] & DP_MST_CAP) {
4025                         DRM_DEBUG_KMS("Sink is MST capable\n");
4026                         intel_dp->is_mst = true;
4027                 } else {
4028                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4029                         intel_dp->is_mst = false;
4030                 }
4031         }
4032
4033         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4034         return intel_dp->is_mst;
4035 }
4036
4037 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4038 {
4039         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4040         struct drm_device *dev = intel_dig_port->base.base.dev;
4041         struct intel_crtc *intel_crtc =
4042                 to_intel_crtc(intel_dig_port->base.base.crtc);
4043         u8 buf;
4044         int test_crc_count;
4045         int attempts = 6;
4046         int ret = 0;
4047
4048         hsw_disable_ips(intel_crtc);
4049
4050         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4051                 ret = -EIO;
4052                 goto out;
4053         }
4054
4055         if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4056                 ret = -ENOTTY;
4057                 goto out;
4058         }
4059
4060         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4061                 ret = -EIO;
4062                 goto out;
4063         }
4064
4065         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4066                                 buf | DP_TEST_SINK_START) < 0) {
4067                 ret = -EIO;
4068                 goto out;
4069         }
4070
4071         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4072                 ret = -EIO;
4073                 goto out;
4074         }
4075
4076         test_crc_count = buf & DP_TEST_COUNT_MASK;
4077
4078         do {
4079                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4080                                       DP_TEST_SINK_MISC, &buf) < 0) {
4081                         ret = -EIO;
4082                         goto out;
4083                 }
4084                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4085         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4086
4087         if (attempts == 0) {
4088                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4089                 ret = -ETIMEDOUT;
4090                 goto out;
4091         }
4092
4093         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4094                 ret = -EIO;
4095                 goto out;
4096         }
4097
4098         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4099                 ret = -EIO;
4100                 goto out;
4101         }
4102         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4103                                buf & ~DP_TEST_SINK_START) < 0) {
4104                 ret = -EIO;
4105                 goto out;
4106         }
4107 out:
4108         hsw_enable_ips(intel_crtc);
4109         return ret;
4110 }
4111
4112 static bool
4113 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4114 {
4115         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4116                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4117                                        sink_irq_vector, 1) == 1;
4118 }
4119
4120 static bool
4121 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4122 {
4123         int ret;
4124
4125         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4126                                              DP_SINK_COUNT_ESI,
4127                                              sink_irq_vector, 14);
4128         if (ret != 14)
4129                 return false;
4130
4131         return true;
4132 }
4133
4134 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4135 {
4136         uint8_t test_result = DP_TEST_ACK;
4137         return test_result;
4138 }
4139
4140 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4141 {
4142         uint8_t test_result = DP_TEST_NAK;
4143         return test_result;
4144 }
4145
4146 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4147 {
4148         uint8_t test_result = DP_TEST_NAK;
4149         struct intel_connector *intel_connector = intel_dp->attached_connector;
4150         struct drm_connector *connector = &intel_connector->base;
4151
4152         if (intel_connector->detect_edid == NULL ||
4153             connector->edid_corrupt ||
4154             intel_dp->aux.i2c_defer_count > 6) {
4155                 /* Check EDID read for NACKs, DEFERs and corruption
4156                  * (DP CTS 1.2 Core r1.1)
4157                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4158                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4159                  *    4.2.2.6 : EDID corruption detected
4160                  * Use failsafe mode for all cases
4161                  */
4162                 if (intel_dp->aux.i2c_nack_count > 0 ||
4163                         intel_dp->aux.i2c_defer_count > 0)
4164                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4165                                       intel_dp->aux.i2c_nack_count,
4166                                       intel_dp->aux.i2c_defer_count);
4167                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4168         } else {
4169                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4170                                         DP_TEST_EDID_CHECKSUM,
4171                                         &intel_connector->detect_edid->checksum,
4172                                         1))
4173                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4174
4175                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4176                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4177         }
4178
4179         /* Set test active flag here so userspace doesn't interrupt things */
4180         intel_dp->compliance_test_active = 1;
4181
4182         return test_result;
4183 }
4184
4185 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4186 {
4187         uint8_t test_result = DP_TEST_NAK;
4188         return test_result;
4189 }
4190
4191 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4192 {
4193         uint8_t response = DP_TEST_NAK;
4194         uint8_t rxdata = 0;
4195         int status = 0;
4196
4197         intel_dp->compliance_test_active = 0;
4198         intel_dp->compliance_test_type = 0;
4199         intel_dp->compliance_test_data = 0;
4200
4201         intel_dp->aux.i2c_nack_count = 0;
4202         intel_dp->aux.i2c_defer_count = 0;
4203
4204         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4205         if (status <= 0) {
4206                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4207                 goto update_status;
4208         }
4209
4210         switch (rxdata) {
4211         case DP_TEST_LINK_TRAINING:
4212                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4213                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4214                 response = intel_dp_autotest_link_training(intel_dp);
4215                 break;
4216         case DP_TEST_LINK_VIDEO_PATTERN:
4217                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4218                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4219                 response = intel_dp_autotest_video_pattern(intel_dp);
4220                 break;
4221         case DP_TEST_LINK_EDID_READ:
4222                 DRM_DEBUG_KMS("EDID test requested\n");
4223                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4224                 response = intel_dp_autotest_edid(intel_dp);
4225                 break;
4226         case DP_TEST_LINK_PHY_TEST_PATTERN:
4227                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4228                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4229                 response = intel_dp_autotest_phy_pattern(intel_dp);
4230                 break;
4231         default:
4232                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4233                 break;
4234         }
4235
4236 update_status:
4237         status = drm_dp_dpcd_write(&intel_dp->aux,
4238                                    DP_TEST_RESPONSE,
4239                                    &response, 1);
4240         if (status <= 0)
4241                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4242 }
4243
4244 static int
4245 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4246 {
4247         bool bret;
4248
4249         if (intel_dp->is_mst) {
4250                 u8 esi[16] = { 0 };
4251                 int ret = 0;
4252                 int retry;
4253                 bool handled;
4254                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4255 go_again:
4256                 if (bret == true) {
4257
4258                         /* check link status - esi[10] = 0x200c */
4259                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4260                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4261                                 intel_dp_start_link_train(intel_dp);
4262                                 intel_dp_complete_link_train(intel_dp);
4263                                 intel_dp_stop_link_train(intel_dp);
4264                         }
4265
4266                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4267                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4268
4269                         if (handled) {
4270                                 for (retry = 0; retry < 3; retry++) {
4271                                         int wret;
4272                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4273                                                                  DP_SINK_COUNT_ESI+1,
4274                                                                  &esi[1], 3);
4275                                         if (wret == 3) {
4276                                                 break;
4277                                         }
4278                                 }
4279
4280                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4281                                 if (bret == true) {
4282                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4283                                         goto go_again;
4284                                 }
4285                         } else
4286                                 ret = 0;
4287
4288                         return ret;
4289                 } else {
4290                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4291                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4292                         intel_dp->is_mst = false;
4293                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4294                         /* send a hotplug event */
4295                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4296                 }
4297         }
4298         return -EINVAL;
4299 }
4300
4301 /*
4302  * According to DP spec
4303  * 5.1.2:
4304  *  1. Read DPCD
4305  *  2. Configure link according to Receiver Capabilities
4306  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4307  *  4. Check link status on receipt of hot-plug interrupt
4308  */
4309 static void
4310 intel_dp_check_link_status(struct intel_dp *intel_dp)
4311 {
4312         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4313         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4314         u8 sink_irq_vector;
4315         u8 link_status[DP_LINK_STATUS_SIZE];
4316
4317         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4318
4319         if (!intel_encoder->connectors_active)
4320                 return;
4321
4322         if (WARN_ON(!intel_encoder->base.crtc))
4323                 return;
4324
4325         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4326                 return;
4327
4328         /* Try to read receiver status if the link appears to be up */
4329         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4330                 return;
4331         }
4332
4333         /* Now read the DPCD to see if it's actually running */
4334         if (!intel_dp_get_dpcd(intel_dp)) {
4335                 return;
4336         }
4337
4338         /* Try to read the source of the interrupt */
4339         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4340             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4341                 /* Clear interrupt source */
4342                 drm_dp_dpcd_writeb(&intel_dp->aux,
4343                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4344                                    sink_irq_vector);
4345
4346                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4347                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4348                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4349                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4350         }
4351
4352         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4353                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4354                               intel_encoder->base.name);
4355                 intel_dp_start_link_train(intel_dp);
4356                 intel_dp_complete_link_train(intel_dp);
4357                 intel_dp_stop_link_train(intel_dp);
4358         }
4359 }
4360
4361 /* XXX this is probably wrong for multiple downstream ports */
4362 static enum drm_connector_status
4363 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4364 {
4365         uint8_t *dpcd = intel_dp->dpcd;
4366         uint8_t type;
4367
4368         if (!intel_dp_get_dpcd(intel_dp))
4369                 return connector_status_disconnected;
4370
4371         /* if there's no downstream port, we're done */
4372         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4373                 return connector_status_connected;
4374
4375         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4376         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4377             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4378                 uint8_t reg;
4379
4380                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4381                                             &reg, 1) < 0)
4382                         return connector_status_unknown;
4383
4384                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4385                                               : connector_status_disconnected;
4386         }
4387
4388         /* If no HPD, poke DDC gently */
4389         if (drm_probe_ddc(&intel_dp->aux.ddc))
4390                 return connector_status_connected;
4391
4392         /* Well we tried, say unknown for unreliable port types */
4393         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4394                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4395                 if (type == DP_DS_PORT_TYPE_VGA ||
4396                     type == DP_DS_PORT_TYPE_NON_EDID)
4397                         return connector_status_unknown;
4398         } else {
4399                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4400                         DP_DWN_STRM_PORT_TYPE_MASK;
4401                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4402                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4403                         return connector_status_unknown;
4404         }
4405
4406         /* Anything else is out of spec, warn and ignore */
4407         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4408         return connector_status_disconnected;
4409 }
4410
4411 static enum drm_connector_status
4412 edp_detect(struct intel_dp *intel_dp)
4413 {
4414         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4415         enum drm_connector_status status;
4416
4417         status = intel_panel_detect(dev);
4418         if (status == connector_status_unknown)
4419                 status = connector_status_connected;
4420
4421         return status;
4422 }
4423
4424 static enum drm_connector_status
4425 ironlake_dp_detect(struct intel_dp *intel_dp)
4426 {
4427         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4428         struct drm_i915_private *dev_priv = dev->dev_private;
4429         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4430
4431         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4432                 return connector_status_disconnected;
4433
4434         return intel_dp_detect_dpcd(intel_dp);
4435 }
4436
4437 static int g4x_digital_port_connected(struct drm_device *dev,
4438                                        struct intel_digital_port *intel_dig_port)
4439 {
4440         struct drm_i915_private *dev_priv = dev->dev_private;
4441         uint32_t bit;
4442
4443         if (IS_VALLEYVIEW(dev)) {
4444                 switch (intel_dig_port->port) {
4445                 case PORT_B:
4446                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4447                         break;
4448                 case PORT_C:
4449                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4450                         break;
4451                 case PORT_D:
4452                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4453                         break;
4454                 default:
4455                         return -EINVAL;
4456                 }
4457         } else {
4458                 switch (intel_dig_port->port) {
4459                 case PORT_B:
4460                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4461                         break;
4462                 case PORT_C:
4463                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4464                         break;
4465                 case PORT_D:
4466                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4467                         break;
4468                 default:
4469                         return -EINVAL;
4470                 }
4471         }
4472
4473         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4474                 return 0;
4475         return 1;
4476 }
4477
4478 static enum drm_connector_status
4479 g4x_dp_detect(struct intel_dp *intel_dp)
4480 {
4481         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4482         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4483         int ret;
4484
4485         /* Can't disconnect eDP, but you can close the lid... */
4486         if (is_edp(intel_dp)) {
4487                 enum drm_connector_status status;
4488
4489                 status = intel_panel_detect(dev);
4490                 if (status == connector_status_unknown)
4491                         status = connector_status_connected;
4492                 return status;
4493         }
4494
4495         ret = g4x_digital_port_connected(dev, intel_dig_port);
4496         if (ret == -EINVAL)
4497                 return connector_status_unknown;
4498         else if (ret == 0)
4499                 return connector_status_disconnected;
4500
4501         return intel_dp_detect_dpcd(intel_dp);
4502 }
4503
4504 static struct edid *
4505 intel_dp_get_edid(struct intel_dp *intel_dp)
4506 {
4507         struct intel_connector *intel_connector = intel_dp->attached_connector;
4508
4509         /* use cached edid if we have one */
4510         if (intel_connector->edid) {
4511                 /* invalid edid */
4512                 if (IS_ERR(intel_connector->edid))
4513                         return NULL;
4514
4515                 return drm_edid_duplicate(intel_connector->edid);
4516         } else
4517                 return drm_get_edid(&intel_connector->base,
4518                                     &intel_dp->aux.ddc);
4519 }
4520
4521 static void
4522 intel_dp_set_edid(struct intel_dp *intel_dp)
4523 {
4524         struct intel_connector *intel_connector = intel_dp->attached_connector;
4525         struct edid *edid;
4526
4527         edid = intel_dp_get_edid(intel_dp);
4528         intel_connector->detect_edid = edid;
4529
4530         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4531                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4532         else
4533                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4534 }
4535
4536 static void
4537 intel_dp_unset_edid(struct intel_dp *intel_dp)
4538 {
4539         struct intel_connector *intel_connector = intel_dp->attached_connector;
4540
4541         kfree(intel_connector->detect_edid);
4542         intel_connector->detect_edid = NULL;
4543
4544         intel_dp->has_audio = false;
4545 }
4546
4547 static enum intel_display_power_domain
4548 intel_dp_power_get(struct intel_dp *dp)
4549 {
4550         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4551         enum intel_display_power_domain power_domain;
4552
4553         power_domain = intel_display_port_power_domain(encoder);
4554         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4555
4556         return power_domain;
4557 }
4558
4559 static void
4560 intel_dp_power_put(struct intel_dp *dp,
4561                    enum intel_display_power_domain power_domain)
4562 {
4563         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4564         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4565 }
4566
4567 static enum drm_connector_status
4568 intel_dp_detect(struct drm_connector *connector, bool force)
4569 {
4570         struct intel_dp *intel_dp = intel_attached_dp(connector);
4571         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4572         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4573         struct drm_device *dev = connector->dev;
4574         enum drm_connector_status status;
4575         enum intel_display_power_domain power_domain;
4576         bool ret;
4577         u8 sink_irq_vector;
4578
4579         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4580                       connector->base.id, connector->name);
4581         intel_dp_unset_edid(intel_dp);
4582
4583         if (intel_dp->is_mst) {
4584                 /* MST devices are disconnected from a monitor POV */
4585                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4586                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4587                 return connector_status_disconnected;
4588         }
4589
4590         power_domain = intel_dp_power_get(intel_dp);
4591
4592         /* Can't disconnect eDP, but you can close the lid... */
4593         if (is_edp(intel_dp))
4594                 status = edp_detect(intel_dp);
4595         else if (HAS_PCH_SPLIT(dev))
4596                 status = ironlake_dp_detect(intel_dp);
4597         else
4598                 status = g4x_dp_detect(intel_dp);
4599         if (status != connector_status_connected)
4600                 goto out;
4601
4602         intel_dp_probe_oui(intel_dp);
4603
4604         ret = intel_dp_probe_mst(intel_dp);
4605         if (ret) {
4606                 /* if we are in MST mode then this connector
4607                    won't appear connected or have anything with EDID on it */
4608                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4609                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4610                 status = connector_status_disconnected;
4611                 goto out;
4612         }
4613
4614         intel_dp_set_edid(intel_dp);
4615
4616         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4617                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4618         status = connector_status_connected;
4619
4620         /* Try to read the source of the interrupt */
4621         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4622             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4623                 /* Clear interrupt source */
4624                 drm_dp_dpcd_writeb(&intel_dp->aux,
4625                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4626                                    sink_irq_vector);
4627
4628                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4629                         intel_dp_handle_test_request(intel_dp);
4630                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4631                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4632         }
4633
4634 out:
4635         intel_dp_power_put(intel_dp, power_domain);
4636         return status;
4637 }
4638
4639 static void
4640 intel_dp_force(struct drm_connector *connector)
4641 {
4642         struct intel_dp *intel_dp = intel_attached_dp(connector);
4643         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4644         enum intel_display_power_domain power_domain;
4645
4646         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4647                       connector->base.id, connector->name);
4648         intel_dp_unset_edid(intel_dp);
4649
4650         if (connector->status != connector_status_connected)
4651                 return;
4652
4653         power_domain = intel_dp_power_get(intel_dp);
4654
4655         intel_dp_set_edid(intel_dp);
4656
4657         intel_dp_power_put(intel_dp, power_domain);
4658
4659         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4660                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4661 }
4662
4663 static int intel_dp_get_modes(struct drm_connector *connector)
4664 {
4665         struct intel_connector *intel_connector = to_intel_connector(connector);
4666         struct edid *edid;
4667
4668         edid = intel_connector->detect_edid;
4669         if (edid) {
4670                 int ret = intel_connector_update_modes(connector, edid);
4671                 if (ret)
4672                         return ret;
4673         }
4674
4675         /* if eDP has no EDID, fall back to fixed mode */
4676         if (is_edp(intel_attached_dp(connector)) &&
4677             intel_connector->panel.fixed_mode) {
4678                 struct drm_display_mode *mode;
4679
4680                 mode = drm_mode_duplicate(connector->dev,
4681                                           intel_connector->panel.fixed_mode);
4682                 if (mode) {
4683                         drm_mode_probed_add(connector, mode);
4684                         return 1;
4685                 }
4686         }
4687
4688         return 0;
4689 }
4690
4691 static bool
4692 intel_dp_detect_audio(struct drm_connector *connector)
4693 {
4694         bool has_audio = false;
4695         struct edid *edid;
4696
4697         edid = to_intel_connector(connector)->detect_edid;
4698         if (edid)
4699                 has_audio = drm_detect_monitor_audio(edid);
4700
4701         return has_audio;
4702 }
4703
4704 static int
4705 intel_dp_set_property(struct drm_connector *connector,
4706                       struct drm_property *property,
4707                       uint64_t val)
4708 {
4709         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4710         struct intel_connector *intel_connector = to_intel_connector(connector);
4711         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4712         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4713         int ret;
4714
4715         ret = drm_object_property_set_value(&connector->base, property, val);
4716         if (ret)
4717                 return ret;
4718
4719         if (property == dev_priv->force_audio_property) {
4720                 int i = val;
4721                 bool has_audio;
4722
4723                 if (i == intel_dp->force_audio)
4724                         return 0;
4725
4726                 intel_dp->force_audio = i;
4727
4728                 if (i == HDMI_AUDIO_AUTO)
4729                         has_audio = intel_dp_detect_audio(connector);
4730                 else
4731                         has_audio = (i == HDMI_AUDIO_ON);
4732
4733                 if (has_audio == intel_dp->has_audio)
4734                         return 0;
4735
4736                 intel_dp->has_audio = has_audio;
4737                 goto done;
4738         }
4739
4740         if (property == dev_priv->broadcast_rgb_property) {
4741                 bool old_auto = intel_dp->color_range_auto;
4742                 uint32_t old_range = intel_dp->color_range;
4743
4744                 switch (val) {
4745                 case INTEL_BROADCAST_RGB_AUTO:
4746                         intel_dp->color_range_auto = true;
4747                         break;
4748                 case INTEL_BROADCAST_RGB_FULL:
4749                         intel_dp->color_range_auto = false;
4750                         intel_dp->color_range = 0;
4751                         break;
4752                 case INTEL_BROADCAST_RGB_LIMITED:
4753                         intel_dp->color_range_auto = false;
4754                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4755                         break;
4756                 default:
4757                         return -EINVAL;
4758                 }
4759
4760                 if (old_auto == intel_dp->color_range_auto &&
4761                     old_range == intel_dp->color_range)
4762                         return 0;
4763
4764                 goto done;
4765         }
4766
4767         if (is_edp(intel_dp) &&
4768             property == connector->dev->mode_config.scaling_mode_property) {
4769                 if (val == DRM_MODE_SCALE_NONE) {
4770                         DRM_DEBUG_KMS("no scaling not supported\n");
4771                         return -EINVAL;
4772                 }
4773
4774                 if (intel_connector->panel.fitting_mode == val) {
4775                         /* the eDP scaling property is not changed */
4776                         return 0;
4777                 }
4778                 intel_connector->panel.fitting_mode = val;
4779
4780                 goto done;
4781         }
4782
4783         return -EINVAL;
4784
4785 done:
4786         if (intel_encoder->base.crtc)
4787                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4788
4789         return 0;
4790 }
4791
4792 static void
4793 intel_dp_connector_destroy(struct drm_connector *connector)
4794 {
4795         struct intel_connector *intel_connector = to_intel_connector(connector);
4796
4797         kfree(intel_connector->detect_edid);
4798
4799         if (!IS_ERR_OR_NULL(intel_connector->edid))
4800                 kfree(intel_connector->edid);
4801
4802         /* Can't call is_edp() since the encoder may have been destroyed
4803          * already. */
4804         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4805                 intel_panel_fini(&intel_connector->panel);
4806
4807         drm_connector_cleanup(connector);
4808         kfree(connector);
4809 }
4810
4811 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4812 {
4813         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4814         struct intel_dp *intel_dp = &intel_dig_port->dp;
4815
4816         drm_dp_aux_unregister(&intel_dp->aux);
4817         intel_dp_mst_encoder_cleanup(intel_dig_port);
4818         if (is_edp(intel_dp)) {
4819                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4820                 /*
4821                  * vdd might still be enabled do to the delayed vdd off.
4822                  * Make sure vdd is actually turned off here.
4823                  */
4824                 pps_lock(intel_dp);
4825                 edp_panel_vdd_off_sync(intel_dp);
4826                 pps_unlock(intel_dp);
4827
4828                 if (intel_dp->edp_notifier.notifier_call) {
4829                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4830                         intel_dp->edp_notifier.notifier_call = NULL;
4831                 }
4832         }
4833         drm_encoder_cleanup(encoder);
4834         kfree(intel_dig_port);
4835 }
4836
4837 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4838 {
4839         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4840
4841         if (!is_edp(intel_dp))
4842                 return;
4843
4844         /*
4845          * vdd might still be enabled do to the delayed vdd off.
4846          * Make sure vdd is actually turned off here.
4847          */
4848         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4849         pps_lock(intel_dp);
4850         edp_panel_vdd_off_sync(intel_dp);
4851         pps_unlock(intel_dp);
4852 }
4853
4854 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4855 {
4856         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4857         struct drm_device *dev = intel_dig_port->base.base.dev;
4858         struct drm_i915_private *dev_priv = dev->dev_private;
4859         enum intel_display_power_domain power_domain;
4860
4861         lockdep_assert_held(&dev_priv->pps_mutex);
4862
4863         if (!edp_have_panel_vdd(intel_dp))
4864                 return;
4865
4866         /*
4867          * The VDD bit needs a power domain reference, so if the bit is
4868          * already enabled when we boot or resume, grab this reference and
4869          * schedule a vdd off, so we don't hold on to the reference
4870          * indefinitely.
4871          */
4872         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4873         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4874         intel_display_power_get(dev_priv, power_domain);
4875
4876         edp_panel_vdd_schedule_off(intel_dp);
4877 }
4878
4879 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4880 {
4881         struct intel_dp *intel_dp;
4882
4883         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4884                 return;
4885
4886         intel_dp = enc_to_intel_dp(encoder);
4887
4888         pps_lock(intel_dp);
4889
4890         /*
4891          * Read out the current power sequencer assignment,
4892          * in case the BIOS did something with it.
4893          */
4894         if (IS_VALLEYVIEW(encoder->dev))
4895                 vlv_initial_power_sequencer_setup(intel_dp);
4896
4897         intel_edp_panel_vdd_sanitize(intel_dp);
4898
4899         pps_unlock(intel_dp);
4900 }
4901
4902 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4903         .dpms = intel_connector_dpms,
4904         .detect = intel_dp_detect,
4905         .force = intel_dp_force,
4906         .fill_modes = drm_helper_probe_single_connector_modes,
4907         .set_property = intel_dp_set_property,
4908         .atomic_get_property = intel_connector_atomic_get_property,
4909         .destroy = intel_dp_connector_destroy,
4910         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4911         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4912 };
4913
4914 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4915         .get_modes = intel_dp_get_modes,
4916         .mode_valid = intel_dp_mode_valid,
4917         .best_encoder = intel_best_encoder,
4918 };
4919
4920 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4921         .reset = intel_dp_encoder_reset,
4922         .destroy = intel_dp_encoder_destroy,
4923 };
4924
4925 void
4926 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4927 {
4928         return;
4929 }
4930
4931 enum irqreturn
4932 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4933 {
4934         struct intel_dp *intel_dp = &intel_dig_port->dp;
4935         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4936         struct drm_device *dev = intel_dig_port->base.base.dev;
4937         struct drm_i915_private *dev_priv = dev->dev_private;
4938         enum intel_display_power_domain power_domain;
4939         enum irqreturn ret = IRQ_NONE;
4940
4941         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4942                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4943
4944         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4945                 /*
4946                  * vdd off can generate a long pulse on eDP which
4947                  * would require vdd on to handle it, and thus we
4948                  * would end up in an endless cycle of
4949                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4950                  */
4951                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4952                               port_name(intel_dig_port->port));
4953                 return IRQ_HANDLED;
4954         }
4955
4956         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4957                       port_name(intel_dig_port->port),
4958                       long_hpd ? "long" : "short");
4959
4960         power_domain = intel_display_port_power_domain(intel_encoder);
4961         intel_display_power_get(dev_priv, power_domain);
4962
4963         if (long_hpd) {
4964                 /* indicate that we need to restart link training */
4965                 intel_dp->train_set_valid = false;
4966
4967                 if (HAS_PCH_SPLIT(dev)) {
4968                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4969                                 goto mst_fail;
4970                 } else {
4971                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4972                                 goto mst_fail;
4973                 }
4974
4975                 if (!intel_dp_get_dpcd(intel_dp)) {
4976                         goto mst_fail;
4977                 }
4978
4979                 intel_dp_probe_oui(intel_dp);
4980
4981                 if (!intel_dp_probe_mst(intel_dp))
4982                         goto mst_fail;
4983
4984         } else {
4985                 if (intel_dp->is_mst) {
4986                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4987                                 goto mst_fail;
4988                 }
4989
4990                 if (!intel_dp->is_mst) {
4991                         /*
4992                          * we'll check the link status via the normal hot plug path later -
4993                          * but for short hpds we should check it now
4994                          */
4995                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4996                         intel_dp_check_link_status(intel_dp);
4997                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4998                 }
4999         }
5000
5001         ret = IRQ_HANDLED;
5002
5003         goto put_power;
5004 mst_fail:
5005         /* if we were in MST mode, and device is not there get out of MST mode */
5006         if (intel_dp->is_mst) {
5007                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5008                 intel_dp->is_mst = false;
5009                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5010         }
5011 put_power:
5012         intel_display_power_put(dev_priv, power_domain);
5013
5014         return ret;
5015 }
5016
5017 /* Return which DP Port should be selected for Transcoder DP control */
5018 int
5019 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5020 {
5021         struct drm_device *dev = crtc->dev;
5022         struct intel_encoder *intel_encoder;
5023         struct intel_dp *intel_dp;
5024
5025         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5026                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5027
5028                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5029                     intel_encoder->type == INTEL_OUTPUT_EDP)
5030                         return intel_dp->output_reg;
5031         }
5032
5033         return -1;
5034 }
5035
5036 /* check the VBT to see whether the eDP is on DP-D port */
5037 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5038 {
5039         struct drm_i915_private *dev_priv = dev->dev_private;
5040         union child_device_config *p_child;
5041         int i;
5042         static const short port_mapping[] = {
5043                 [PORT_B] = PORT_IDPB,
5044                 [PORT_C] = PORT_IDPC,
5045                 [PORT_D] = PORT_IDPD,
5046         };
5047
5048         if (port == PORT_A)
5049                 return true;
5050
5051         if (!dev_priv->vbt.child_dev_num)
5052                 return false;
5053
5054         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5055                 p_child = dev_priv->vbt.child_dev + i;
5056
5057                 if (p_child->common.dvo_port == port_mapping[port] &&
5058                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5059                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5060                         return true;
5061         }
5062         return false;
5063 }
5064
5065 void
5066 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5067 {
5068         struct intel_connector *intel_connector = to_intel_connector(connector);
5069
5070         intel_attach_force_audio_property(connector);
5071         intel_attach_broadcast_rgb_property(connector);
5072         intel_dp->color_range_auto = true;
5073
5074         if (is_edp(intel_dp)) {
5075                 drm_mode_create_scaling_mode_property(connector->dev);
5076                 drm_object_attach_property(
5077                         &connector->base,
5078                         connector->dev->mode_config.scaling_mode_property,
5079                         DRM_MODE_SCALE_ASPECT);
5080                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5081         }
5082 }
5083
5084 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5085 {
5086         intel_dp->last_power_cycle = jiffies;
5087         intel_dp->last_power_on = jiffies;
5088         intel_dp->last_backlight_off = jiffies;
5089 }
5090
5091 static void
5092 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5093                                     struct intel_dp *intel_dp)
5094 {
5095         struct drm_i915_private *dev_priv = dev->dev_private;
5096         struct edp_power_seq cur, vbt, spec,
5097                 *final = &intel_dp->pps_delays;
5098         u32 pp_on, pp_off, pp_div, pp;
5099         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5100
5101         lockdep_assert_held(&dev_priv->pps_mutex);
5102
5103         /* already initialized? */
5104         if (final->t11_t12 != 0)
5105                 return;
5106
5107         if (HAS_PCH_SPLIT(dev)) {
5108                 pp_ctrl_reg = PCH_PP_CONTROL;
5109                 pp_on_reg = PCH_PP_ON_DELAYS;
5110                 pp_off_reg = PCH_PP_OFF_DELAYS;
5111                 pp_div_reg = PCH_PP_DIVISOR;
5112         } else {
5113                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5114
5115                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5116                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5117                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5118                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5119         }
5120
5121         /* Workaround: Need to write PP_CONTROL with the unlock key as
5122          * the very first thing. */
5123         pp = ironlake_get_pp_control(intel_dp);
5124         I915_WRITE(pp_ctrl_reg, pp);
5125
5126         pp_on = I915_READ(pp_on_reg);
5127         pp_off = I915_READ(pp_off_reg);
5128         pp_div = I915_READ(pp_div_reg);
5129
5130         /* Pull timing values out of registers */
5131         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5132                 PANEL_POWER_UP_DELAY_SHIFT;
5133
5134         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5135                 PANEL_LIGHT_ON_DELAY_SHIFT;
5136
5137         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5138                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5139
5140         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5141                 PANEL_POWER_DOWN_DELAY_SHIFT;
5142
5143         cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5144                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5145
5146         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5147                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5148
5149         vbt = dev_priv->vbt.edp_pps;
5150
5151         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5152          * our hw here, which are all in 100usec. */
5153         spec.t1_t3 = 210 * 10;
5154         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5155         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5156         spec.t10 = 500 * 10;
5157         /* This one is special and actually in units of 100ms, but zero
5158          * based in the hw (so we need to add 100 ms). But the sw vbt
5159          * table multiplies it with 1000 to make it in units of 100usec,
5160          * too. */
5161         spec.t11_t12 = (510 + 100) * 10;
5162
5163         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5164                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5165
5166         /* Use the max of the register settings and vbt. If both are
5167          * unset, fall back to the spec limits. */
5168 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5169                                        spec.field : \
5170                                        max(cur.field, vbt.field))
5171         assign_final(t1_t3);
5172         assign_final(t8);
5173         assign_final(t9);
5174         assign_final(t10);
5175         assign_final(t11_t12);
5176 #undef assign_final
5177
5178 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5179         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5180         intel_dp->backlight_on_delay = get_delay(t8);
5181         intel_dp->backlight_off_delay = get_delay(t9);
5182         intel_dp->panel_power_down_delay = get_delay(t10);
5183         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5184 #undef get_delay
5185
5186         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5187                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5188                       intel_dp->panel_power_cycle_delay);
5189
5190         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5191                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5192 }
5193
5194 static void
5195 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5196                                               struct intel_dp *intel_dp)
5197 {
5198         struct drm_i915_private *dev_priv = dev->dev_private;
5199         u32 pp_on, pp_off, pp_div, port_sel = 0;
5200         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5201         int pp_on_reg, pp_off_reg, pp_div_reg;
5202         enum port port = dp_to_dig_port(intel_dp)->port;
5203         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5204
5205         lockdep_assert_held(&dev_priv->pps_mutex);
5206
5207         if (HAS_PCH_SPLIT(dev)) {
5208                 pp_on_reg = PCH_PP_ON_DELAYS;
5209                 pp_off_reg = PCH_PP_OFF_DELAYS;
5210                 pp_div_reg = PCH_PP_DIVISOR;
5211         } else {
5212                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5213
5214                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5215                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5216                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5217         }
5218
5219         /*
5220          * And finally store the new values in the power sequencer. The
5221          * backlight delays are set to 1 because we do manual waits on them. For
5222          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5223          * we'll end up waiting for the backlight off delay twice: once when we
5224          * do the manual sleep, and once when we disable the panel and wait for
5225          * the PP_STATUS bit to become zero.
5226          */
5227         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5228                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5229         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5230                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5231         /* Compute the divisor for the pp clock, simply match the Bspec
5232          * formula. */
5233         pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5234         pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5235                         << PANEL_POWER_CYCLE_DELAY_SHIFT);
5236
5237         /* Haswell doesn't have any port selection bits for the panel
5238          * power sequencer any more. */
5239         if (IS_VALLEYVIEW(dev)) {
5240                 port_sel = PANEL_PORT_SELECT_VLV(port);
5241         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5242                 if (port == PORT_A)
5243                         port_sel = PANEL_PORT_SELECT_DPA;
5244                 else
5245                         port_sel = PANEL_PORT_SELECT_DPD;
5246         }
5247
5248         pp_on |= port_sel;
5249
5250         I915_WRITE(pp_on_reg, pp_on);
5251         I915_WRITE(pp_off_reg, pp_off);
5252         I915_WRITE(pp_div_reg, pp_div);
5253
5254         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5255                       I915_READ(pp_on_reg),
5256                       I915_READ(pp_off_reg),
5257                       I915_READ(pp_div_reg));
5258 }
5259
5260 /**
5261  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5262  * @dev: DRM device
5263  * @refresh_rate: RR to be programmed
5264  *
5265  * This function gets called when refresh rate (RR) has to be changed from
5266  * one frequency to another. Switches can be between high and low RR
5267  * supported by the panel or to any other RR based on media playback (in
5268  * this case, RR value needs to be passed from user space).
5269  *
5270  * The caller of this function needs to take a lock on dev_priv->drrs.
5271  */
5272 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5273 {
5274         struct drm_i915_private *dev_priv = dev->dev_private;
5275         struct intel_encoder *encoder;
5276         struct intel_digital_port *dig_port = NULL;
5277         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5278         struct intel_crtc_state *config = NULL;
5279         struct intel_crtc *intel_crtc = NULL;
5280         u32 reg, val;
5281         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5282
5283         if (refresh_rate <= 0) {
5284                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5285                 return;
5286         }
5287
5288         if (intel_dp == NULL) {
5289                 DRM_DEBUG_KMS("DRRS not supported.\n");
5290                 return;
5291         }
5292
5293         /*
5294          * FIXME: This needs proper synchronization with psr state for some
5295          * platforms that cannot have PSR and DRRS enabled at the same time.
5296          */
5297
5298         dig_port = dp_to_dig_port(intel_dp);
5299         encoder = &dig_port->base;
5300         intel_crtc = to_intel_crtc(encoder->base.crtc);
5301
5302         if (!intel_crtc) {
5303                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5304                 return;
5305         }
5306
5307         config = intel_crtc->config;
5308
5309         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5310                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5311                 return;
5312         }
5313
5314         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5315                         refresh_rate)
5316                 index = DRRS_LOW_RR;
5317
5318         if (index == dev_priv->drrs.refresh_rate_type) {
5319                 DRM_DEBUG_KMS(
5320                         "DRRS requested for previously set RR...ignoring\n");
5321                 return;
5322         }
5323
5324         if (!intel_crtc->active) {
5325                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5326                 return;
5327         }
5328
5329         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5330                 switch (index) {
5331                 case DRRS_HIGH_RR:
5332                         intel_dp_set_m_n(intel_crtc, M1_N1);
5333                         break;
5334                 case DRRS_LOW_RR:
5335                         intel_dp_set_m_n(intel_crtc, M2_N2);
5336                         break;
5337                 case DRRS_MAX_RR:
5338                 default:
5339                         DRM_ERROR("Unsupported refreshrate type\n");
5340                 }
5341         } else if (INTEL_INFO(dev)->gen > 6) {
5342                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5343                 val = I915_READ(reg);
5344
5345                 if (index > DRRS_HIGH_RR) {
5346                         if (IS_VALLEYVIEW(dev))
5347                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5348                         else
5349                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5350                 } else {
5351                         if (IS_VALLEYVIEW(dev))
5352                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5353                         else
5354                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5355                 }
5356                 I915_WRITE(reg, val);
5357         }
5358
5359         dev_priv->drrs.refresh_rate_type = index;
5360
5361         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5362 }
5363
5364 /**
5365  * intel_edp_drrs_enable - init drrs struct if supported
5366  * @intel_dp: DP struct
5367  *
5368  * Initializes frontbuffer_bits and drrs.dp
5369  */
5370 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5371 {
5372         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5373         struct drm_i915_private *dev_priv = dev->dev_private;
5374         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5375         struct drm_crtc *crtc = dig_port->base.base.crtc;
5376         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5377
5378         if (!intel_crtc->config->has_drrs) {
5379                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5380                 return;
5381         }
5382
5383         mutex_lock(&dev_priv->drrs.mutex);
5384         if (WARN_ON(dev_priv->drrs.dp)) {
5385                 DRM_ERROR("DRRS already enabled\n");
5386                 goto unlock;
5387         }
5388
5389         dev_priv->drrs.busy_frontbuffer_bits = 0;
5390
5391         dev_priv->drrs.dp = intel_dp;
5392
5393 unlock:
5394         mutex_unlock(&dev_priv->drrs.mutex);
5395 }
5396
5397 /**
5398  * intel_edp_drrs_disable - Disable DRRS
5399  * @intel_dp: DP struct
5400  *
5401  */
5402 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5403 {
5404         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5405         struct drm_i915_private *dev_priv = dev->dev_private;
5406         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5407         struct drm_crtc *crtc = dig_port->base.base.crtc;
5408         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5409
5410         if (!intel_crtc->config->has_drrs)
5411                 return;
5412
5413         mutex_lock(&dev_priv->drrs.mutex);
5414         if (!dev_priv->drrs.dp) {
5415                 mutex_unlock(&dev_priv->drrs.mutex);
5416                 return;
5417         }
5418
5419         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5420                 intel_dp_set_drrs_state(dev_priv->dev,
5421                         intel_dp->attached_connector->panel.
5422                         fixed_mode->vrefresh);
5423
5424         dev_priv->drrs.dp = NULL;
5425         mutex_unlock(&dev_priv->drrs.mutex);
5426
5427         cancel_delayed_work_sync(&dev_priv->drrs.work);
5428 }
5429
5430 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5431 {
5432         struct drm_i915_private *dev_priv =
5433                 container_of(work, typeof(*dev_priv), drrs.work.work);
5434         struct intel_dp *intel_dp;
5435
5436         mutex_lock(&dev_priv->drrs.mutex);
5437
5438         intel_dp = dev_priv->drrs.dp;
5439
5440         if (!intel_dp)
5441                 goto unlock;
5442
5443         /*
5444          * The delayed work can race with an invalidate hence we need to
5445          * recheck.
5446          */
5447
5448         if (dev_priv->drrs.busy_frontbuffer_bits)
5449                 goto unlock;
5450
5451         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5452                 intel_dp_set_drrs_state(dev_priv->dev,
5453                         intel_dp->attached_connector->panel.
5454                         downclock_mode->vrefresh);
5455
5456 unlock:
5457         mutex_unlock(&dev_priv->drrs.mutex);
5458 }
5459
5460 /**
5461  * intel_edp_drrs_invalidate - Invalidate DRRS
5462  * @dev: DRM device
5463  * @frontbuffer_bits: frontbuffer plane tracking bits
5464  *
5465  * When there is a disturbance on screen (due to cursor movement/time
5466  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5467  * high RR.
5468  *
5469  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5470  */
5471 void intel_edp_drrs_invalidate(struct drm_device *dev,
5472                 unsigned frontbuffer_bits)
5473 {
5474         struct drm_i915_private *dev_priv = dev->dev_private;
5475         struct drm_crtc *crtc;
5476         enum pipe pipe;
5477
5478         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5479                 return;
5480
5481         cancel_delayed_work(&dev_priv->drrs.work);
5482
5483         mutex_lock(&dev_priv->drrs.mutex);
5484         if (!dev_priv->drrs.dp) {
5485                 mutex_unlock(&dev_priv->drrs.mutex);
5486                 return;
5487         }
5488
5489         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5490         pipe = to_intel_crtc(crtc)->pipe;
5491
5492         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5493                 intel_dp_set_drrs_state(dev_priv->dev,
5494                                 dev_priv->drrs.dp->attached_connector->panel.
5495                                 fixed_mode->vrefresh);
5496         }
5497
5498         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5499
5500         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5501         mutex_unlock(&dev_priv->drrs.mutex);
5502 }
5503
5504 /**
5505  * intel_edp_drrs_flush - Flush DRRS
5506  * @dev: DRM device
5507  * @frontbuffer_bits: frontbuffer plane tracking bits
5508  *
5509  * When there is no movement on screen, DRRS work can be scheduled.
5510  * This DRRS work is responsible for setting relevant registers after a
5511  * timeout of 1 second.
5512  *
5513  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5514  */
5515 void intel_edp_drrs_flush(struct drm_device *dev,
5516                 unsigned frontbuffer_bits)
5517 {
5518         struct drm_i915_private *dev_priv = dev->dev_private;
5519         struct drm_crtc *crtc;
5520         enum pipe pipe;
5521
5522         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5523                 return;
5524
5525         cancel_delayed_work(&dev_priv->drrs.work);
5526
5527         mutex_lock(&dev_priv->drrs.mutex);
5528         if (!dev_priv->drrs.dp) {
5529                 mutex_unlock(&dev_priv->drrs.mutex);
5530                 return;
5531         }
5532
5533         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5534         pipe = to_intel_crtc(crtc)->pipe;
5535         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5536
5537         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5538                         !dev_priv->drrs.busy_frontbuffer_bits)
5539                 schedule_delayed_work(&dev_priv->drrs.work,
5540                                 msecs_to_jiffies(1000));
5541         mutex_unlock(&dev_priv->drrs.mutex);
5542 }
5543
5544 /**
5545  * DOC: Display Refresh Rate Switching (DRRS)
5546  *
5547  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5548  * which enables swtching between low and high refresh rates,
5549  * dynamically, based on the usage scenario. This feature is applicable
5550  * for internal panels.
5551  *
5552  * Indication that the panel supports DRRS is given by the panel EDID, which
5553  * would list multiple refresh rates for one resolution.
5554  *
5555  * DRRS is of 2 types - static and seamless.
5556  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5557  * (may appear as a blink on screen) and is used in dock-undock scenario.
5558  * Seamless DRRS involves changing RR without any visual effect to the user
5559  * and can be used during normal system usage. This is done by programming
5560  * certain registers.
5561  *
5562  * Support for static/seamless DRRS may be indicated in the VBT based on
5563  * inputs from the panel spec.
5564  *
5565  * DRRS saves power by switching to low RR based on usage scenarios.
5566  *
5567  * eDP DRRS:-
5568  *        The implementation is based on frontbuffer tracking implementation.
5569  * When there is a disturbance on the screen triggered by user activity or a
5570  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5571  * When there is no movement on screen, after a timeout of 1 second, a switch
5572  * to low RR is made.
5573  *        For integration with frontbuffer tracking code,
5574  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5575  *
5576  * DRRS can be further extended to support other internal panels and also
5577  * the scenario of video playback wherein RR is set based on the rate
5578  * requested by userspace.
5579  */
5580
5581 /**
5582  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5583  * @intel_connector: eDP connector
5584  * @fixed_mode: preferred mode of panel
5585  *
5586  * This function is  called only once at driver load to initialize basic
5587  * DRRS stuff.
5588  *
5589  * Returns:
5590  * Downclock mode if panel supports it, else return NULL.
5591  * DRRS support is determined by the presence of downclock mode (apart
5592  * from VBT setting).
5593  */
5594 static struct drm_display_mode *
5595 intel_dp_drrs_init(struct intel_connector *intel_connector,
5596                 struct drm_display_mode *fixed_mode)
5597 {
5598         struct drm_connector *connector = &intel_connector->base;
5599         struct drm_device *dev = connector->dev;
5600         struct drm_i915_private *dev_priv = dev->dev_private;
5601         struct drm_display_mode *downclock_mode = NULL;
5602
5603         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5604         mutex_init(&dev_priv->drrs.mutex);
5605
5606         if (INTEL_INFO(dev)->gen <= 6) {
5607                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5608                 return NULL;
5609         }
5610
5611         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5612                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5613                 return NULL;
5614         }
5615
5616         downclock_mode = intel_find_panel_downclock
5617                                         (dev, fixed_mode, connector);
5618
5619         if (!downclock_mode) {
5620                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5621                 return NULL;
5622         }
5623
5624         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5625
5626         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5627         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5628         return downclock_mode;
5629 }
5630
5631 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5632                                      struct intel_connector *intel_connector)
5633 {
5634         struct drm_connector *connector = &intel_connector->base;
5635         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5636         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5637         struct drm_device *dev = intel_encoder->base.dev;
5638         struct drm_i915_private *dev_priv = dev->dev_private;
5639         struct drm_display_mode *fixed_mode = NULL;
5640         struct drm_display_mode *downclock_mode = NULL;
5641         bool has_dpcd;
5642         struct drm_display_mode *scan;
5643         struct edid *edid;
5644         enum pipe pipe = INVALID_PIPE;
5645
5646         if (!is_edp(intel_dp))
5647                 return true;
5648
5649         pps_lock(intel_dp);
5650         intel_edp_panel_vdd_sanitize(intel_dp);
5651         pps_unlock(intel_dp);
5652
5653         /* Cache DPCD and EDID for edp. */
5654         has_dpcd = intel_dp_get_dpcd(intel_dp);
5655
5656         if (has_dpcd) {
5657                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5658                         dev_priv->no_aux_handshake =
5659                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5660                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5661         } else {
5662                 /* if this fails, presume the device is a ghost */
5663                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5664                 return false;
5665         }
5666
5667         /* We now know it's not a ghost, init power sequence regs. */
5668         pps_lock(intel_dp);
5669         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5670         pps_unlock(intel_dp);
5671
5672         mutex_lock(&dev->mode_config.mutex);
5673         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5674         if (edid) {
5675                 if (drm_add_edid_modes(connector, edid)) {
5676                         drm_mode_connector_update_edid_property(connector,
5677                                                                 edid);
5678                         drm_edid_to_eld(connector, edid);
5679                 } else {
5680                         kfree(edid);
5681                         edid = ERR_PTR(-EINVAL);
5682                 }
5683         } else {
5684                 edid = ERR_PTR(-ENOENT);
5685         }
5686         intel_connector->edid = edid;
5687
5688         /* prefer fixed mode from EDID if available */
5689         list_for_each_entry(scan, &connector->probed_modes, head) {
5690                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5691                         fixed_mode = drm_mode_duplicate(dev, scan);
5692                         downclock_mode = intel_dp_drrs_init(
5693                                                 intel_connector, fixed_mode);
5694                         break;
5695                 }
5696         }
5697
5698         /* fallback to VBT if available for eDP */
5699         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5700                 fixed_mode = drm_mode_duplicate(dev,
5701                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5702                 if (fixed_mode)
5703                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5704         }
5705         mutex_unlock(&dev->mode_config.mutex);
5706
5707         if (IS_VALLEYVIEW(dev)) {
5708                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5709                 register_reboot_notifier(&intel_dp->edp_notifier);
5710
5711                 /*
5712                  * Figure out the current pipe for the initial backlight setup.
5713                  * If the current pipe isn't valid, try the PPS pipe, and if that
5714                  * fails just assume pipe A.
5715                  */
5716                 if (IS_CHERRYVIEW(dev))
5717                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5718                 else
5719                         pipe = PORT_TO_PIPE(intel_dp->DP);
5720
5721                 if (pipe != PIPE_A && pipe != PIPE_B)
5722                         pipe = intel_dp->pps_pipe;
5723
5724                 if (pipe != PIPE_A && pipe != PIPE_B)
5725                         pipe = PIPE_A;
5726
5727                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5728                               pipe_name(pipe));
5729         }
5730
5731         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5732         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5733         intel_panel_setup_backlight(connector, pipe);
5734
5735         return true;
5736 }
5737
5738 bool
5739 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5740                         struct intel_connector *intel_connector)
5741 {
5742         struct drm_connector *connector = &intel_connector->base;
5743         struct intel_dp *intel_dp = &intel_dig_port->dp;
5744         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5745         struct drm_device *dev = intel_encoder->base.dev;
5746         struct drm_i915_private *dev_priv = dev->dev_private;
5747         enum port port = intel_dig_port->port;
5748         int type;
5749
5750         intel_dp->pps_pipe = INVALID_PIPE;
5751
5752         /* intel_dp vfuncs */
5753         if (INTEL_INFO(dev)->gen >= 9)
5754                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5755         else if (IS_VALLEYVIEW(dev))
5756                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5757         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5758                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5759         else if (HAS_PCH_SPLIT(dev))
5760                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5761         else
5762                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5763
5764         if (INTEL_INFO(dev)->gen >= 9)
5765                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5766         else
5767                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5768
5769         /* Preserve the current hw state. */
5770         intel_dp->DP = I915_READ(intel_dp->output_reg);
5771         intel_dp->attached_connector = intel_connector;
5772
5773         if (intel_dp_is_edp(dev, port))
5774                 type = DRM_MODE_CONNECTOR_eDP;
5775         else
5776                 type = DRM_MODE_CONNECTOR_DisplayPort;
5777
5778         /*
5779          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5780          * for DP the encoder type can be set by the caller to
5781          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5782          */
5783         if (type == DRM_MODE_CONNECTOR_eDP)
5784                 intel_encoder->type = INTEL_OUTPUT_EDP;
5785
5786         /* eDP only on port B and/or C on vlv/chv */
5787         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5788                     port != PORT_B && port != PORT_C))
5789                 return false;
5790
5791         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5792                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5793                         port_name(port));
5794
5795         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5796         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5797
5798         connector->interlace_allowed = true;
5799         connector->doublescan_allowed = 0;
5800
5801         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5802                           edp_panel_vdd_work);
5803
5804         intel_connector_attach_encoder(intel_connector, intel_encoder);
5805         drm_connector_register(connector);
5806
5807         if (HAS_DDI(dev))
5808                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5809         else
5810                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5811         intel_connector->unregister = intel_dp_connector_unregister;
5812
5813         /* Set up the hotplug pin. */
5814         switch (port) {
5815         case PORT_A:
5816                 intel_encoder->hpd_pin = HPD_PORT_A;
5817                 break;
5818         case PORT_B:
5819                 intel_encoder->hpd_pin = HPD_PORT_B;
5820                 break;
5821         case PORT_C:
5822                 intel_encoder->hpd_pin = HPD_PORT_C;
5823                 break;
5824         case PORT_D:
5825                 intel_encoder->hpd_pin = HPD_PORT_D;
5826                 break;
5827         default:
5828                 BUG();
5829         }
5830
5831         if (is_edp(intel_dp)) {
5832                 pps_lock(intel_dp);
5833                 intel_dp_init_panel_power_timestamps(intel_dp);
5834                 if (IS_VALLEYVIEW(dev))
5835                         vlv_initial_power_sequencer_setup(intel_dp);
5836                 else
5837                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5838                 pps_unlock(intel_dp);
5839         }
5840
5841         intel_dp_aux_init(intel_dp, intel_connector);
5842
5843         /* init MST on ports that can support it */
5844         if (HAS_DP_MST(dev) &&
5845             (port == PORT_B || port == PORT_C || port == PORT_D))
5846                 intel_dp_mst_encoder_init(intel_dig_port,
5847                                           intel_connector->base.base.id);
5848
5849         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5850                 drm_dp_aux_unregister(&intel_dp->aux);
5851                 if (is_edp(intel_dp)) {
5852                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5853                         /*
5854                          * vdd might still be enabled do to the delayed vdd off.
5855                          * Make sure vdd is actually turned off here.
5856                          */
5857                         pps_lock(intel_dp);
5858                         edp_panel_vdd_off_sync(intel_dp);
5859                         pps_unlock(intel_dp);
5860                 }
5861                 drm_connector_unregister(connector);
5862                 drm_connector_cleanup(connector);
5863                 return false;
5864         }
5865
5866         intel_dp_add_properties(intel_dp, connector);
5867
5868         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5869          * 0xd.  Failure to do so will result in spurious interrupts being
5870          * generated on the port when a cable is not attached.
5871          */
5872         if (IS_G4X(dev) && !IS_GM45(dev)) {
5873                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5874                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5875         }
5876
5877         i915_debugfs_connector_add(connector);
5878
5879         return true;
5880 }
5881
5882 void
5883 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5884 {
5885         struct drm_i915_private *dev_priv = dev->dev_private;
5886         struct intel_digital_port *intel_dig_port;
5887         struct intel_encoder *intel_encoder;
5888         struct drm_encoder *encoder;
5889         struct intel_connector *intel_connector;
5890
5891         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5892         if (!intel_dig_port)
5893                 return;
5894
5895         intel_connector = intel_connector_alloc();
5896         if (!intel_connector) {
5897                 kfree(intel_dig_port);
5898                 return;
5899         }
5900
5901         intel_encoder = &intel_dig_port->base;
5902         encoder = &intel_encoder->base;
5903
5904         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5905                          DRM_MODE_ENCODER_TMDS);
5906
5907         intel_encoder->compute_config = intel_dp_compute_config;
5908         intel_encoder->disable = intel_disable_dp;
5909         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5910         intel_encoder->get_config = intel_dp_get_config;
5911         intel_encoder->suspend = intel_dp_encoder_suspend;
5912         if (IS_CHERRYVIEW(dev)) {
5913                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5914                 intel_encoder->pre_enable = chv_pre_enable_dp;
5915                 intel_encoder->enable = vlv_enable_dp;
5916                 intel_encoder->post_disable = chv_post_disable_dp;
5917         } else if (IS_VALLEYVIEW(dev)) {
5918                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5919                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5920                 intel_encoder->enable = vlv_enable_dp;
5921                 intel_encoder->post_disable = vlv_post_disable_dp;
5922         } else {
5923                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5924                 intel_encoder->enable = g4x_enable_dp;
5925                 if (INTEL_INFO(dev)->gen >= 5)
5926                         intel_encoder->post_disable = ilk_post_disable_dp;
5927         }
5928
5929         intel_dig_port->port = port;
5930         intel_dig_port->dp.output_reg = output_reg;
5931
5932         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5933         if (IS_CHERRYVIEW(dev)) {
5934                 if (port == PORT_D)
5935                         intel_encoder->crtc_mask = 1 << 2;
5936                 else
5937                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5938         } else {
5939                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5940         }
5941         intel_encoder->cloneable = 0;
5942         intel_encoder->hot_plug = intel_dp_hot_plug;
5943
5944         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5945         dev_priv->hpd_irq_port[port] = intel_dig_port;
5946
5947         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5948                 drm_encoder_cleanup(encoder);
5949                 kfree(intel_dig_port);
5950                 kfree(intel_connector);
5951         }
5952 }
5953
5954 void intel_dp_mst_suspend(struct drm_device *dev)
5955 {
5956         struct drm_i915_private *dev_priv = dev->dev_private;
5957         int i;
5958
5959         /* disable MST */
5960         for (i = 0; i < I915_MAX_PORTS; i++) {
5961                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5962                 if (!intel_dig_port)
5963                         continue;
5964
5965                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5966                         if (!intel_dig_port->dp.can_mst)
5967                                 continue;
5968                         if (intel_dig_port->dp.is_mst)
5969                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5970                 }
5971         }
5972 }
5973
5974 void intel_dp_mst_resume(struct drm_device *dev)
5975 {
5976         struct drm_i915_private *dev_priv = dev->dev_private;
5977         int i;
5978
5979         for (i = 0; i < I915_MAX_PORTS; i++) {
5980                 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5981                 if (!intel_dig_port)
5982                         continue;
5983                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5984                         int ret;
5985
5986                         if (!intel_dig_port->dp.can_mst)
5987                                 continue;
5988
5989                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5990                         if (ret != 0) {
5991                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5992                         }
5993                 }
5994         }
5995 }