drm/i915: Handle DP_AUX_I2C_WRITE_STATUS_UPDATE
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int clock;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { 162000,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { 270000,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { 162000,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { 270000,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { 162000,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { 270000,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { 270000,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { 540000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95                                   324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97                                   324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102  * @intel_dp: DP struct
103  *
104  * If a CPU or PCH DP output is attached to an eDP panel, this function
105  * will return true, and false otherwise.
106  */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118         return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131                                       enum pipe pipe);
132
133 static int
134 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
135 {
136         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
137
138         switch (max_link_bw) {
139         case DP_LINK_BW_1_62:
140         case DP_LINK_BW_2_7:
141         case DP_LINK_BW_5_4:
142                 break;
143         default:
144                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
145                      max_link_bw);
146                 max_link_bw = DP_LINK_BW_1_62;
147                 break;
148         }
149         return max_link_bw;
150 }
151
152 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
153 {
154         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
155         struct drm_device *dev = intel_dig_port->base.base.dev;
156         u8 source_max, sink_max;
157
158         source_max = 4;
159         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
160             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
161                 source_max = 2;
162
163         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165         return min(source_max, sink_max);
166 }
167
168 /*
169  * The units on the numbers in the next two are... bizarre.  Examples will
170  * make it clearer; this one parallels an example in the eDP spec.
171  *
172  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173  *
174  *     270000 * 1 * 8 / 10 == 216000
175  *
176  * The actual data capacity of that configuration is 2.16Gbit/s, so the
177  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
178  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179  * 119000.  At 18bpp that's 2142000 kilobits per second.
180  *
181  * Thus the strange-looking division by 10 in intel_dp_link_required, to
182  * get the result in decakilobits instead of kilobits.
183  */
184
185 static int
186 intel_dp_link_required(int pixel_clock, int bpp)
187 {
188         return (pixel_clock * bpp + 9) / 10;
189 }
190
191 static int
192 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193 {
194         return (max_link_clock * max_lanes * 8) / 10;
195 }
196
197 static enum drm_mode_status
198 intel_dp_mode_valid(struct drm_connector *connector,
199                     struct drm_display_mode *mode)
200 {
201         struct intel_dp *intel_dp = intel_attached_dp(connector);
202         struct intel_connector *intel_connector = to_intel_connector(connector);
203         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
204         int target_clock = mode->clock;
205         int max_rate, mode_rate, max_lanes, max_link_clock;
206
207         if (is_edp(intel_dp) && fixed_mode) {
208                 if (mode->hdisplay > fixed_mode->hdisplay)
209                         return MODE_PANEL;
210
211                 if (mode->vdisplay > fixed_mode->vdisplay)
212                         return MODE_PANEL;
213
214                 target_clock = fixed_mode->clock;
215         }
216
217         max_link_clock = intel_dp_max_link_rate(intel_dp);
218         max_lanes = intel_dp_max_lane_count(intel_dp);
219
220         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221         mode_rate = intel_dp_link_required(target_clock, 18);
222
223         if (mode_rate > max_rate)
224                 return MODE_CLOCK_HIGH;
225
226         if (mode->clock < 10000)
227                 return MODE_CLOCK_LOW;
228
229         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230                 return MODE_H_ILLEGAL;
231
232         return MODE_OK;
233 }
234
235 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
236 {
237         int     i;
238         uint32_t v = 0;
239
240         if (src_bytes > 4)
241                 src_bytes = 4;
242         for (i = 0; i < src_bytes; i++)
243                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
244         return v;
245 }
246
247 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
248 {
249         int i;
250         if (dst_bytes > 4)
251                 dst_bytes = 4;
252         for (i = 0; i < dst_bytes; i++)
253                 dst[i] = src >> ((3-i) * 8);
254 }
255
256 /* hrawclock is 1/4 the FSB frequency */
257 static int
258 intel_hrawclk(struct drm_device *dev)
259 {
260         struct drm_i915_private *dev_priv = dev->dev_private;
261         uint32_t clkcfg;
262
263         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
264         if (IS_VALLEYVIEW(dev))
265                 return 200;
266
267         clkcfg = I915_READ(CLKCFG);
268         switch (clkcfg & CLKCFG_FSB_MASK) {
269         case CLKCFG_FSB_400:
270                 return 100;
271         case CLKCFG_FSB_533:
272                 return 133;
273         case CLKCFG_FSB_667:
274                 return 166;
275         case CLKCFG_FSB_800:
276                 return 200;
277         case CLKCFG_FSB_1067:
278                 return 266;
279         case CLKCFG_FSB_1333:
280                 return 333;
281         /* these two are just a guess; one of them might be right */
282         case CLKCFG_FSB_1600:
283         case CLKCFG_FSB_1600_ALT:
284                 return 400;
285         default:
286                 return 133;
287         }
288 }
289
290 static void
291 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
292                                     struct intel_dp *intel_dp);
293 static void
294 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
295                                               struct intel_dp *intel_dp);
296
297 static void pps_lock(struct intel_dp *intel_dp)
298 {
299         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300         struct intel_encoder *encoder = &intel_dig_port->base;
301         struct drm_device *dev = encoder->base.dev;
302         struct drm_i915_private *dev_priv = dev->dev_private;
303         enum intel_display_power_domain power_domain;
304
305         /*
306          * See vlv_power_sequencer_reset() why we need
307          * a power domain reference here.
308          */
309         power_domain = intel_display_port_power_domain(encoder);
310         intel_display_power_get(dev_priv, power_domain);
311
312         mutex_lock(&dev_priv->pps_mutex);
313 }
314
315 static void pps_unlock(struct intel_dp *intel_dp)
316 {
317         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
318         struct intel_encoder *encoder = &intel_dig_port->base;
319         struct drm_device *dev = encoder->base.dev;
320         struct drm_i915_private *dev_priv = dev->dev_private;
321         enum intel_display_power_domain power_domain;
322
323         mutex_unlock(&dev_priv->pps_mutex);
324
325         power_domain = intel_display_port_power_domain(encoder);
326         intel_display_power_put(dev_priv, power_domain);
327 }
328
329 static void
330 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
331 {
332         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
333         struct drm_device *dev = intel_dig_port->base.base.dev;
334         struct drm_i915_private *dev_priv = dev->dev_private;
335         enum pipe pipe = intel_dp->pps_pipe;
336         bool pll_enabled;
337         uint32_t DP;
338
339         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
340                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
341                  pipe_name(pipe), port_name(intel_dig_port->port)))
342                 return;
343
344         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
345                       pipe_name(pipe), port_name(intel_dig_port->port));
346
347         /* Preserve the BIOS-computed detected bit. This is
348          * supposed to be read-only.
349          */
350         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
351         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
352         DP |= DP_PORT_WIDTH(1);
353         DP |= DP_LINK_TRAIN_PAT_1;
354
355         if (IS_CHERRYVIEW(dev))
356                 DP |= DP_PIPE_SELECT_CHV(pipe);
357         else if (pipe == PIPE_B)
358                 DP |= DP_PIPEB_SELECT;
359
360         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
361
362         /*
363          * The DPLL for the pipe must be enabled for this to work.
364          * So enable temporarily it if it's not already enabled.
365          */
366         if (!pll_enabled)
367                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
368                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
369
370         /*
371          * Similar magic as in intel_dp_enable_port().
372          * We _must_ do this port enable + disable trick
373          * to make this power seqeuencer lock onto the port.
374          * Otherwise even VDD force bit won't work.
375          */
376         I915_WRITE(intel_dp->output_reg, DP);
377         POSTING_READ(intel_dp->output_reg);
378
379         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
380         POSTING_READ(intel_dp->output_reg);
381
382         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
383         POSTING_READ(intel_dp->output_reg);
384
385         if (!pll_enabled)
386                 vlv_force_pll_off(dev, pipe);
387 }
388
389 static enum pipe
390 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
391 {
392         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
393         struct drm_device *dev = intel_dig_port->base.base.dev;
394         struct drm_i915_private *dev_priv = dev->dev_private;
395         struct intel_encoder *encoder;
396         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
397         enum pipe pipe;
398
399         lockdep_assert_held(&dev_priv->pps_mutex);
400
401         /* We should never land here with regular DP ports */
402         WARN_ON(!is_edp(intel_dp));
403
404         if (intel_dp->pps_pipe != INVALID_PIPE)
405                 return intel_dp->pps_pipe;
406
407         /*
408          * We don't have power sequencer currently.
409          * Pick one that's not used by other ports.
410          */
411         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
412                             base.head) {
413                 struct intel_dp *tmp;
414
415                 if (encoder->type != INTEL_OUTPUT_EDP)
416                         continue;
417
418                 tmp = enc_to_intel_dp(&encoder->base);
419
420                 if (tmp->pps_pipe != INVALID_PIPE)
421                         pipes &= ~(1 << tmp->pps_pipe);
422         }
423
424         /*
425          * Didn't find one. This should not happen since there
426          * are two power sequencers and up to two eDP ports.
427          */
428         if (WARN_ON(pipes == 0))
429                 pipe = PIPE_A;
430         else
431                 pipe = ffs(pipes) - 1;
432
433         vlv_steal_power_sequencer(dev, pipe);
434         intel_dp->pps_pipe = pipe;
435
436         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
437                       pipe_name(intel_dp->pps_pipe),
438                       port_name(intel_dig_port->port));
439
440         /* init power sequencer on this pipe and port */
441         intel_dp_init_panel_power_sequencer(dev, intel_dp);
442         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
443
444         /*
445          * Even vdd force doesn't work until we've made
446          * the power sequencer lock in on the port.
447          */
448         vlv_power_sequencer_kick(intel_dp);
449
450         return intel_dp->pps_pipe;
451 }
452
453 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
454                                enum pipe pipe);
455
456 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
457                                enum pipe pipe)
458 {
459         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
460 }
461
462 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
463                                 enum pipe pipe)
464 {
465         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
466 }
467
468 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
469                          enum pipe pipe)
470 {
471         return true;
472 }
473
474 static enum pipe
475 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
476                      enum port port,
477                      vlv_pipe_check pipe_check)
478 {
479         enum pipe pipe;
480
481         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
482                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
483                         PANEL_PORT_SELECT_MASK;
484
485                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
486                         continue;
487
488                 if (!pipe_check(dev_priv, pipe))
489                         continue;
490
491                 return pipe;
492         }
493
494         return INVALID_PIPE;
495 }
496
497 static void
498 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
499 {
500         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
501         struct drm_device *dev = intel_dig_port->base.base.dev;
502         struct drm_i915_private *dev_priv = dev->dev_private;
503         enum port port = intel_dig_port->port;
504
505         lockdep_assert_held(&dev_priv->pps_mutex);
506
507         /* try to find a pipe with this port selected */
508         /* first pick one where the panel is on */
509         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510                                                   vlv_pipe_has_pp_on);
511         /* didn't find one? pick one where vdd is on */
512         if (intel_dp->pps_pipe == INVALID_PIPE)
513                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
514                                                           vlv_pipe_has_vdd_on);
515         /* didn't find one? pick one with just the correct port */
516         if (intel_dp->pps_pipe == INVALID_PIPE)
517                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
518                                                           vlv_pipe_any);
519
520         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
521         if (intel_dp->pps_pipe == INVALID_PIPE) {
522                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
523                               port_name(port));
524                 return;
525         }
526
527         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
528                       port_name(port), pipe_name(intel_dp->pps_pipe));
529
530         intel_dp_init_panel_power_sequencer(dev, intel_dp);
531         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
532 }
533
534 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
535 {
536         struct drm_device *dev = dev_priv->dev;
537         struct intel_encoder *encoder;
538
539         if (WARN_ON(!IS_VALLEYVIEW(dev)))
540                 return;
541
542         /*
543          * We can't grab pps_mutex here due to deadlock with power_domain
544          * mutex when power_domain functions are called while holding pps_mutex.
545          * That also means that in order to use pps_pipe the code needs to
546          * hold both a power domain reference and pps_mutex, and the power domain
547          * reference get/put must be done while _not_ holding pps_mutex.
548          * pps_{lock,unlock}() do these steps in the correct order, so one
549          * should use them always.
550          */
551
552         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
553                 struct intel_dp *intel_dp;
554
555                 if (encoder->type != INTEL_OUTPUT_EDP)
556                         continue;
557
558                 intel_dp = enc_to_intel_dp(&encoder->base);
559                 intel_dp->pps_pipe = INVALID_PIPE;
560         }
561 }
562
563 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
564 {
565         struct drm_device *dev = intel_dp_to_dev(intel_dp);
566
567         if (IS_BROXTON(dev))
568                 return BXT_PP_CONTROL(0);
569         else if (HAS_PCH_SPLIT(dev))
570                 return PCH_PP_CONTROL;
571         else
572                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
573 }
574
575 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
576 {
577         struct drm_device *dev = intel_dp_to_dev(intel_dp);
578
579         if (IS_BROXTON(dev))
580                 return BXT_PP_STATUS(0);
581         else if (HAS_PCH_SPLIT(dev))
582                 return PCH_PP_STATUS;
583         else
584                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
585 }
586
587 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
588    This function only applicable when panel PM state is not to be tracked */
589 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
590                               void *unused)
591 {
592         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
593                                                  edp_notifier);
594         struct drm_device *dev = intel_dp_to_dev(intel_dp);
595         struct drm_i915_private *dev_priv = dev->dev_private;
596         u32 pp_div;
597         u32 pp_ctrl_reg, pp_div_reg;
598
599         if (!is_edp(intel_dp) || code != SYS_RESTART)
600                 return 0;
601
602         pps_lock(intel_dp);
603
604         if (IS_VALLEYVIEW(dev)) {
605                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
606
607                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
609                 pp_div = I915_READ(pp_div_reg);
610                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
611
612                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
613                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
614                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
615                 msleep(intel_dp->panel_power_cycle_delay);
616         }
617
618         pps_unlock(intel_dp);
619
620         return 0;
621 }
622
623 static bool edp_have_panel_power(struct intel_dp *intel_dp)
624 {
625         struct drm_device *dev = intel_dp_to_dev(intel_dp);
626         struct drm_i915_private *dev_priv = dev->dev_private;
627
628         lockdep_assert_held(&dev_priv->pps_mutex);
629
630         if (IS_VALLEYVIEW(dev) &&
631             intel_dp->pps_pipe == INVALID_PIPE)
632                 return false;
633
634         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
635 }
636
637 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
638 {
639         struct drm_device *dev = intel_dp_to_dev(intel_dp);
640         struct drm_i915_private *dev_priv = dev->dev_private;
641
642         lockdep_assert_held(&dev_priv->pps_mutex);
643
644         if (IS_VALLEYVIEW(dev) &&
645             intel_dp->pps_pipe == INVALID_PIPE)
646                 return false;
647
648         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
649 }
650
651 static void
652 intel_dp_check_edp(struct intel_dp *intel_dp)
653 {
654         struct drm_device *dev = intel_dp_to_dev(intel_dp);
655         struct drm_i915_private *dev_priv = dev->dev_private;
656
657         if (!is_edp(intel_dp))
658                 return;
659
660         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
661                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
662                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
663                               I915_READ(_pp_stat_reg(intel_dp)),
664                               I915_READ(_pp_ctrl_reg(intel_dp)));
665         }
666 }
667
668 static uint32_t
669 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
670 {
671         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
672         struct drm_device *dev = intel_dig_port->base.base.dev;
673         struct drm_i915_private *dev_priv = dev->dev_private;
674         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
675         uint32_t status;
676         bool done;
677
678 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
679         if (has_aux_irq)
680                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
681                                           msecs_to_jiffies_timeout(10));
682         else
683                 done = wait_for_atomic(C, 10) == 0;
684         if (!done)
685                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
686                           has_aux_irq);
687 #undef C
688
689         return status;
690 }
691
692 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693 {
694         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695         struct drm_device *dev = intel_dig_port->base.base.dev;
696
697         /*
698          * The clock divider is based off the hrawclk, and would like to run at
699          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
700          */
701         return index ? 0 : intel_hrawclk(dev) / 2;
702 }
703
704 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705 {
706         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707         struct drm_device *dev = intel_dig_port->base.base.dev;
708         struct drm_i915_private *dev_priv = dev->dev_private;
709
710         if (index)
711                 return 0;
712
713         if (intel_dig_port->port == PORT_A) {
714                 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
715
716         } else {
717                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
718         }
719 }
720
721 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
722 {
723         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724         struct drm_device *dev = intel_dig_port->base.base.dev;
725         struct drm_i915_private *dev_priv = dev->dev_private;
726
727         if (intel_dig_port->port == PORT_A) {
728                 if (index)
729                         return 0;
730                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
731         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732                 /* Workaround for non-ULT HSW */
733                 switch (index) {
734                 case 0: return 63;
735                 case 1: return 72;
736                 default: return 0;
737                 }
738         } else  {
739                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
740         }
741 }
742
743 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744 {
745         return index ? 0 : 100;
746 }
747
748 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
749 {
750         /*
751          * SKL doesn't need us to program the AUX clock divider (Hardware will
752          * derive the clock from CDCLK automatically). We still implement the
753          * get_aux_clock_divider vfunc to plug-in into the existing code.
754          */
755         return index ? 0 : 1;
756 }
757
758 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
759                                       bool has_aux_irq,
760                                       int send_bytes,
761                                       uint32_t aux_clock_divider)
762 {
763         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764         struct drm_device *dev = intel_dig_port->base.base.dev;
765         uint32_t precharge, timeout;
766
767         if (IS_GEN6(dev))
768                 precharge = 3;
769         else
770                 precharge = 5;
771
772         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
774         else
775                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
776
777         return DP_AUX_CH_CTL_SEND_BUSY |
778                DP_AUX_CH_CTL_DONE |
779                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
780                DP_AUX_CH_CTL_TIME_OUT_ERROR |
781                timeout |
782                DP_AUX_CH_CTL_RECEIVE_ERROR |
783                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
785                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
786 }
787
788 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789                                       bool has_aux_irq,
790                                       int send_bytes,
791                                       uint32_t unused)
792 {
793         return DP_AUX_CH_CTL_SEND_BUSY |
794                DP_AUX_CH_CTL_DONE |
795                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796                DP_AUX_CH_CTL_TIME_OUT_ERROR |
797                DP_AUX_CH_CTL_TIME_OUT_1600us |
798                DP_AUX_CH_CTL_RECEIVE_ERROR |
799                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
801 }
802
803 static int
804 intel_dp_aux_ch(struct intel_dp *intel_dp,
805                 const uint8_t *send, int send_bytes,
806                 uint8_t *recv, int recv_size)
807 {
808         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809         struct drm_device *dev = intel_dig_port->base.base.dev;
810         struct drm_i915_private *dev_priv = dev->dev_private;
811         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
812         uint32_t ch_data = ch_ctl + 4;
813         uint32_t aux_clock_divider;
814         int i, ret, recv_bytes;
815         uint32_t status;
816         int try, clock = 0;
817         bool has_aux_irq = HAS_AUX_IRQ(dev);
818         bool vdd;
819
820         pps_lock(intel_dp);
821
822         /*
823          * We will be called with VDD already enabled for dpcd/edid/oui reads.
824          * In such cases we want to leave VDD enabled and it's up to upper layers
825          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
826          * ourselves.
827          */
828         vdd = edp_panel_vdd_on(intel_dp);
829
830         /* dp aux is extremely sensitive to irq latency, hence request the
831          * lowest possible wakeup latency and so prevent the cpu from going into
832          * deep sleep states.
833          */
834         pm_qos_update_request(&dev_priv->pm_qos, 0);
835
836         intel_dp_check_edp(intel_dp);
837
838         intel_aux_display_runtime_get(dev_priv);
839
840         /* Try to wait for any previous AUX channel activity */
841         for (try = 0; try < 3; try++) {
842                 status = I915_READ_NOTRACE(ch_ctl);
843                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
844                         break;
845                 msleep(1);
846         }
847
848         if (try == 3) {
849                 static u32 last_status = -1;
850                 const u32 status = I915_READ(ch_ctl);
851
852                 if (status != last_status) {
853                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
854                              status);
855                         last_status = status;
856                 }
857
858                 ret = -EBUSY;
859                 goto out;
860         }
861
862         /* Only 5 data registers! */
863         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
864                 ret = -E2BIG;
865                 goto out;
866         }
867
868         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
869                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
870                                                           has_aux_irq,
871                                                           send_bytes,
872                                                           aux_clock_divider);
873
874                 /* Must try at least 3 times according to DP spec */
875                 for (try = 0; try < 5; try++) {
876                         /* Load the send data into the aux channel data registers */
877                         for (i = 0; i < send_bytes; i += 4)
878                                 I915_WRITE(ch_data + i,
879                                            intel_dp_pack_aux(send + i,
880                                                              send_bytes - i));
881
882                         /* Send the command and wait for it to complete */
883                         I915_WRITE(ch_ctl, send_ctl);
884
885                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
886
887                         /* Clear done status and any errors */
888                         I915_WRITE(ch_ctl,
889                                    status |
890                                    DP_AUX_CH_CTL_DONE |
891                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
892                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
893
894                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
895                                 continue;
896
897                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
898                          *   400us delay required for errors and timeouts
899                          *   Timeout errors from the HW already meet this
900                          *   requirement so skip to next iteration
901                          */
902                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903                                 usleep_range(400, 500);
904                                 continue;
905                         }
906                         if (status & DP_AUX_CH_CTL_DONE)
907                                 goto done;
908                 }
909         }
910
911         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
912                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
913                 ret = -EBUSY;
914                 goto out;
915         }
916
917 done:
918         /* Check for timeout or receive error.
919          * Timeouts occur when the sink is not connected
920          */
921         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
922                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
923                 ret = -EIO;
924                 goto out;
925         }
926
927         /* Timeouts occur when the device isn't connected, so they're
928          * "normal" -- don't fill the kernel log with these */
929         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
930                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
931                 ret = -ETIMEDOUT;
932                 goto out;
933         }
934
935         /* Unload any bytes sent back from the other side */
936         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
937                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
938         if (recv_bytes > recv_size)
939                 recv_bytes = recv_size;
940
941         for (i = 0; i < recv_bytes; i += 4)
942                 intel_dp_unpack_aux(I915_READ(ch_data + i),
943                                     recv + i, recv_bytes - i);
944
945         ret = recv_bytes;
946 out:
947         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
948         intel_aux_display_runtime_put(dev_priv);
949
950         if (vdd)
951                 edp_panel_vdd_off(intel_dp, false);
952
953         pps_unlock(intel_dp);
954
955         return ret;
956 }
957
958 #define BARE_ADDRESS_SIZE       3
959 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
960 static ssize_t
961 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
962 {
963         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
964         uint8_t txbuf[20], rxbuf[20];
965         size_t txsize, rxsize;
966         int ret;
967
968         txbuf[0] = (msg->request << 4) |
969                 ((msg->address >> 16) & 0xf);
970         txbuf[1] = (msg->address >> 8) & 0xff;
971         txbuf[2] = msg->address & 0xff;
972         txbuf[3] = msg->size - 1;
973
974         switch (msg->request & ~DP_AUX_I2C_MOT) {
975         case DP_AUX_NATIVE_WRITE:
976         case DP_AUX_I2C_WRITE:
977         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
978                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
979                 rxsize = 2; /* 0 or 1 data bytes */
980
981                 if (WARN_ON(txsize > 20))
982                         return -E2BIG;
983
984                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
985
986                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
987                 if (ret > 0) {
988                         msg->reply = rxbuf[0] >> 4;
989
990                         if (ret > 1) {
991                                 /* Number of bytes written in a short write. */
992                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
993                         } else {
994                                 /* Return payload size. */
995                                 ret = msg->size;
996                         }
997                 }
998                 break;
999
1000         case DP_AUX_NATIVE_READ:
1001         case DP_AUX_I2C_READ:
1002                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1003                 rxsize = msg->size + 1;
1004
1005                 if (WARN_ON(rxsize > 20))
1006                         return -E2BIG;
1007
1008                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1009                 if (ret > 0) {
1010                         msg->reply = rxbuf[0] >> 4;
1011                         /*
1012                          * Assume happy day, and copy the data. The caller is
1013                          * expected to check msg->reply before touching it.
1014                          *
1015                          * Return payload size.
1016                          */
1017                         ret--;
1018                         memcpy(msg->buffer, rxbuf + 1, ret);
1019                 }
1020                 break;
1021
1022         default:
1023                 ret = -EINVAL;
1024                 break;
1025         }
1026
1027         return ret;
1028 }
1029
1030 static void
1031 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1032 {
1033         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1034         struct drm_i915_private *dev_priv = dev->dev_private;
1035         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1036         enum port port = intel_dig_port->port;
1037         struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1038         const char *name = NULL;
1039         uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040         int ret;
1041
1042         /* On SKL we don't have Aux for port E so we rely on VBT to set
1043          * a proper alternate aux channel.
1044          */
1045         if (IS_SKYLAKE(dev) && port == PORT_E) {
1046                 switch (info->alternate_aux_channel) {
1047                 case DP_AUX_B:
1048                         porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1049                         break;
1050                 case DP_AUX_C:
1051                         porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1052                         break;
1053                 case DP_AUX_D:
1054                         porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1055                         break;
1056                 case DP_AUX_A:
1057                 default:
1058                         porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1059                 }
1060         }
1061
1062         switch (port) {
1063         case PORT_A:
1064                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1065                 name = "DPDDC-A";
1066                 break;
1067         case PORT_B:
1068                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1069                 name = "DPDDC-B";
1070                 break;
1071         case PORT_C:
1072                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1073                 name = "DPDDC-C";
1074                 break;
1075         case PORT_D:
1076                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1077                 name = "DPDDC-D";
1078                 break;
1079         case PORT_E:
1080                 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1081                 name = "DPDDC-E";
1082                 break;
1083         default:
1084                 BUG();
1085         }
1086
1087         /*
1088          * The AUX_CTL register is usually DP_CTL + 0x10.
1089          *
1090          * On Haswell and Broadwell though:
1091          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1092          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1093          *
1094          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1095          */
1096         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1097                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1098
1099         intel_dp->aux.name = name;
1100         intel_dp->aux.dev = dev->dev;
1101         intel_dp->aux.transfer = intel_dp_aux_transfer;
1102
1103         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1104                       connector->base.kdev->kobj.name);
1105
1106         ret = drm_dp_aux_register(&intel_dp->aux);
1107         if (ret < 0) {
1108                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1109                           name, ret);
1110                 return;
1111         }
1112
1113         ret = sysfs_create_link(&connector->base.kdev->kobj,
1114                                 &intel_dp->aux.ddc.dev.kobj,
1115                                 intel_dp->aux.ddc.dev.kobj.name);
1116         if (ret < 0) {
1117                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1118                 drm_dp_aux_unregister(&intel_dp->aux);
1119         }
1120 }
1121
1122 static void
1123 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1124 {
1125         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1126
1127         if (!intel_connector->mst_port)
1128                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1129                                   intel_dp->aux.ddc.dev.kobj.name);
1130         intel_connector_unregister(intel_connector);
1131 }
1132
1133 static void
1134 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1135 {
1136         u32 ctrl1;
1137
1138         memset(&pipe_config->dpll_hw_state, 0,
1139                sizeof(pipe_config->dpll_hw_state));
1140
1141         pipe_config->ddi_pll_sel = SKL_DPLL0;
1142         pipe_config->dpll_hw_state.cfgcr1 = 0;
1143         pipe_config->dpll_hw_state.cfgcr2 = 0;
1144
1145         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1146         switch (pipe_config->port_clock / 2) {
1147         case 81000:
1148                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1149                                               SKL_DPLL0);
1150                 break;
1151         case 135000:
1152                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1153                                               SKL_DPLL0);
1154                 break;
1155         case 270000:
1156                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1157                                               SKL_DPLL0);
1158                 break;
1159         case 162000:
1160                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1161                                               SKL_DPLL0);
1162                 break;
1163         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1164         results in CDCLK change. Need to handle the change of CDCLK by
1165         disabling pipes and re-enabling them */
1166         case 108000:
1167                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1168                                               SKL_DPLL0);
1169                 break;
1170         case 216000:
1171                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1172                                               SKL_DPLL0);
1173                 break;
1174
1175         }
1176         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1177 }
1178
1179 void
1180 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1181 {
1182         memset(&pipe_config->dpll_hw_state, 0,
1183                sizeof(pipe_config->dpll_hw_state));
1184
1185         switch (pipe_config->port_clock / 2) {
1186         case 81000:
1187                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1188                 break;
1189         case 135000:
1190                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1191                 break;
1192         case 270000:
1193                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1194                 break;
1195         }
1196 }
1197
1198 static int
1199 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1200 {
1201         if (intel_dp->num_sink_rates) {
1202                 *sink_rates = intel_dp->sink_rates;
1203                 return intel_dp->num_sink_rates;
1204         }
1205
1206         *sink_rates = default_rates;
1207
1208         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1209 }
1210
1211 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1212 {
1213         /* WaDisableHBR2:skl */
1214         if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1215                 return false;
1216
1217         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1218             (INTEL_INFO(dev)->gen >= 9))
1219                 return true;
1220         else
1221                 return false;
1222 }
1223
1224 static int
1225 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1226 {
1227         int size;
1228
1229         if (IS_BROXTON(dev)) {
1230                 *source_rates = bxt_rates;
1231                 size = ARRAY_SIZE(bxt_rates);
1232         } else if (IS_SKYLAKE(dev)) {
1233                 *source_rates = skl_rates;
1234                 size = ARRAY_SIZE(skl_rates);
1235         } else {
1236                 *source_rates = default_rates;
1237                 size = ARRAY_SIZE(default_rates);
1238         }
1239
1240         /* This depends on the fact that 5.4 is last value in the array */
1241         if (!intel_dp_source_supports_hbr2(dev))
1242                 size--;
1243
1244         return size;
1245 }
1246
1247 static void
1248 intel_dp_set_clock(struct intel_encoder *encoder,
1249                    struct intel_crtc_state *pipe_config)
1250 {
1251         struct drm_device *dev = encoder->base.dev;
1252         const struct dp_link_dpll *divisor = NULL;
1253         int i, count = 0;
1254
1255         if (IS_G4X(dev)) {
1256                 divisor = gen4_dpll;
1257                 count = ARRAY_SIZE(gen4_dpll);
1258         } else if (HAS_PCH_SPLIT(dev)) {
1259                 divisor = pch_dpll;
1260                 count = ARRAY_SIZE(pch_dpll);
1261         } else if (IS_CHERRYVIEW(dev)) {
1262                 divisor = chv_dpll;
1263                 count = ARRAY_SIZE(chv_dpll);
1264         } else if (IS_VALLEYVIEW(dev)) {
1265                 divisor = vlv_dpll;
1266                 count = ARRAY_SIZE(vlv_dpll);
1267         }
1268
1269         if (divisor && count) {
1270                 for (i = 0; i < count; i++) {
1271                         if (pipe_config->port_clock == divisor[i].clock) {
1272                                 pipe_config->dpll = divisor[i].dpll;
1273                                 pipe_config->clock_set = true;
1274                                 break;
1275                         }
1276                 }
1277         }
1278 }
1279
1280 static int intersect_rates(const int *source_rates, int source_len,
1281                            const int *sink_rates, int sink_len,
1282                            int *common_rates)
1283 {
1284         int i = 0, j = 0, k = 0;
1285
1286         while (i < source_len && j < sink_len) {
1287                 if (source_rates[i] == sink_rates[j]) {
1288                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1289                                 return k;
1290                         common_rates[k] = source_rates[i];
1291                         ++k;
1292                         ++i;
1293                         ++j;
1294                 } else if (source_rates[i] < sink_rates[j]) {
1295                         ++i;
1296                 } else {
1297                         ++j;
1298                 }
1299         }
1300         return k;
1301 }
1302
1303 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1304                                  int *common_rates)
1305 {
1306         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1307         const int *source_rates, *sink_rates;
1308         int source_len, sink_len;
1309
1310         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1311         source_len = intel_dp_source_rates(dev, &source_rates);
1312
1313         return intersect_rates(source_rates, source_len,
1314                                sink_rates, sink_len,
1315                                common_rates);
1316 }
1317
1318 static void snprintf_int_array(char *str, size_t len,
1319                                const int *array, int nelem)
1320 {
1321         int i;
1322
1323         str[0] = '\0';
1324
1325         for (i = 0; i < nelem; i++) {
1326                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1327                 if (r >= len)
1328                         return;
1329                 str += r;
1330                 len -= r;
1331         }
1332 }
1333
1334 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1335 {
1336         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1337         const int *source_rates, *sink_rates;
1338         int source_len, sink_len, common_len;
1339         int common_rates[DP_MAX_SUPPORTED_RATES];
1340         char str[128]; /* FIXME: too big for stack? */
1341
1342         if ((drm_debug & DRM_UT_KMS) == 0)
1343                 return;
1344
1345         source_len = intel_dp_source_rates(dev, &source_rates);
1346         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1347         DRM_DEBUG_KMS("source rates: %s\n", str);
1348
1349         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1350         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1351         DRM_DEBUG_KMS("sink rates: %s\n", str);
1352
1353         common_len = intel_dp_common_rates(intel_dp, common_rates);
1354         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1355         DRM_DEBUG_KMS("common rates: %s\n", str);
1356 }
1357
1358 static int rate_to_index(int find, const int *rates)
1359 {
1360         int i = 0;
1361
1362         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1363                 if (find == rates[i])
1364                         break;
1365
1366         return i;
1367 }
1368
1369 int
1370 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1371 {
1372         int rates[DP_MAX_SUPPORTED_RATES] = {};
1373         int len;
1374
1375         len = intel_dp_common_rates(intel_dp, rates);
1376         if (WARN_ON(len <= 0))
1377                 return 162000;
1378
1379         return rates[rate_to_index(0, rates) - 1];
1380 }
1381
1382 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1383 {
1384         return rate_to_index(rate, intel_dp->sink_rates);
1385 }
1386
1387 bool
1388 intel_dp_compute_config(struct intel_encoder *encoder,
1389                         struct intel_crtc_state *pipe_config)
1390 {
1391         struct drm_device *dev = encoder->base.dev;
1392         struct drm_i915_private *dev_priv = dev->dev_private;
1393         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1394         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1395         enum port port = dp_to_dig_port(intel_dp)->port;
1396         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1397         struct intel_connector *intel_connector = intel_dp->attached_connector;
1398         int lane_count, clock;
1399         int min_lane_count = 1;
1400         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1401         /* Conveniently, the link BW constants become indices with a shift...*/
1402         int min_clock = 0;
1403         int max_clock;
1404         int bpp, mode_rate;
1405         int link_avail, link_clock;
1406         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1407         int common_len;
1408
1409         common_len = intel_dp_common_rates(intel_dp, common_rates);
1410
1411         /* No common link rates between source and sink */
1412         WARN_ON(common_len <= 0);
1413
1414         max_clock = common_len - 1;
1415
1416         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1417                 pipe_config->has_pch_encoder = true;
1418
1419         pipe_config->has_dp_encoder = true;
1420         pipe_config->has_drrs = false;
1421         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1422
1423         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1424                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1425                                        adjusted_mode);
1426
1427                 if (INTEL_INFO(dev)->gen >= 9) {
1428                         int ret;
1429                         ret = skl_update_scaler_crtc(pipe_config);
1430                         if (ret)
1431                                 return ret;
1432                 }
1433
1434                 if (!HAS_PCH_SPLIT(dev))
1435                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1436                                                  intel_connector->panel.fitting_mode);
1437                 else
1438                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1439                                                 intel_connector->panel.fitting_mode);
1440         }
1441
1442         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1443                 return false;
1444
1445         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1446                       "max bw %d pixel clock %iKHz\n",
1447                       max_lane_count, common_rates[max_clock],
1448                       adjusted_mode->crtc_clock);
1449
1450         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1451          * bpc in between. */
1452         bpp = pipe_config->pipe_bpp;
1453         if (is_edp(intel_dp)) {
1454
1455                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1456                 if (intel_connector->base.display_info.bpc == 0 &&
1457                         (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1458                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1459                                       dev_priv->vbt.edp_bpp);
1460                         bpp = dev_priv->vbt.edp_bpp;
1461                 }
1462
1463                 /*
1464                  * Use the maximum clock and number of lanes the eDP panel
1465                  * advertizes being capable of. The panels are generally
1466                  * designed to support only a single clock and lane
1467                  * configuration, and typically these values correspond to the
1468                  * native resolution of the panel.
1469                  */
1470                 min_lane_count = max_lane_count;
1471                 min_clock = max_clock;
1472         }
1473
1474         for (; bpp >= 6*3; bpp -= 2*3) {
1475                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1476                                                    bpp);
1477
1478                 for (clock = min_clock; clock <= max_clock; clock++) {
1479                         for (lane_count = min_lane_count;
1480                                 lane_count <= max_lane_count;
1481                                 lane_count <<= 1) {
1482
1483                                 link_clock = common_rates[clock];
1484                                 link_avail = intel_dp_max_data_rate(link_clock,
1485                                                                     lane_count);
1486
1487                                 if (mode_rate <= link_avail) {
1488                                         goto found;
1489                                 }
1490                         }
1491                 }
1492         }
1493
1494         return false;
1495
1496 found:
1497         if (intel_dp->color_range_auto) {
1498                 /*
1499                  * See:
1500                  * CEA-861-E - 5.1 Default Encoding Parameters
1501                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1502                  */
1503                 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1504                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
1505                 else
1506                         intel_dp->color_range = 0;
1507         }
1508
1509         if (intel_dp->color_range)
1510                 pipe_config->limited_color_range = true;
1511
1512         intel_dp->lane_count = lane_count;
1513
1514         if (intel_dp->num_sink_rates) {
1515                 intel_dp->link_bw = 0;
1516                 intel_dp->rate_select =
1517                         intel_dp_rate_select(intel_dp, common_rates[clock]);
1518         } else {
1519                 intel_dp->link_bw =
1520                         drm_dp_link_rate_to_bw_code(common_rates[clock]);
1521                 intel_dp->rate_select = 0;
1522         }
1523
1524         pipe_config->pipe_bpp = bpp;
1525         pipe_config->port_clock = common_rates[clock];
1526
1527         DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1528                       intel_dp->link_bw, intel_dp->lane_count,
1529                       pipe_config->port_clock, bpp);
1530         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1531                       mode_rate, link_avail);
1532
1533         intel_link_compute_m_n(bpp, lane_count,
1534                                adjusted_mode->crtc_clock,
1535                                pipe_config->port_clock,
1536                                &pipe_config->dp_m_n);
1537
1538         if (intel_connector->panel.downclock_mode != NULL &&
1539                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1540                         pipe_config->has_drrs = true;
1541                         intel_link_compute_m_n(bpp, lane_count,
1542                                 intel_connector->panel.downclock_mode->clock,
1543                                 pipe_config->port_clock,
1544                                 &pipe_config->dp_m2_n2);
1545         }
1546
1547         if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1548                 skl_edp_set_pll_config(pipe_config);
1549         else if (IS_BROXTON(dev))
1550                 /* handled in ddi */;
1551         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1552                 hsw_dp_set_ddi_pll_sel(pipe_config);
1553         else
1554                 intel_dp_set_clock(encoder, pipe_config);
1555
1556         return true;
1557 }
1558
1559 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1560 {
1561         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1562         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1563         struct drm_device *dev = crtc->base.dev;
1564         struct drm_i915_private *dev_priv = dev->dev_private;
1565         u32 dpa_ctl;
1566
1567         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1568                       crtc->config->port_clock);
1569         dpa_ctl = I915_READ(DP_A);
1570         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1571
1572         if (crtc->config->port_clock == 162000) {
1573                 /* For a long time we've carried around a ILK-DevA w/a for the
1574                  * 160MHz clock. If we're really unlucky, it's still required.
1575                  */
1576                 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1577                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1578                 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1579         } else {
1580                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1581                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1582         }
1583
1584         I915_WRITE(DP_A, dpa_ctl);
1585
1586         POSTING_READ(DP_A);
1587         udelay(500);
1588 }
1589
1590 static void intel_dp_prepare(struct intel_encoder *encoder)
1591 {
1592         struct drm_device *dev = encoder->base.dev;
1593         struct drm_i915_private *dev_priv = dev->dev_private;
1594         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1595         enum port port = dp_to_dig_port(intel_dp)->port;
1596         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1597         struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1598
1599         /*
1600          * There are four kinds of DP registers:
1601          *
1602          *      IBX PCH
1603          *      SNB CPU
1604          *      IVB CPU
1605          *      CPT PCH
1606          *
1607          * IBX PCH and CPU are the same for almost everything,
1608          * except that the CPU DP PLL is configured in this
1609          * register
1610          *
1611          * CPT PCH is quite different, having many bits moved
1612          * to the TRANS_DP_CTL register instead. That
1613          * configuration happens (oddly) in ironlake_pch_enable
1614          */
1615
1616         /* Preserve the BIOS-computed detected bit. This is
1617          * supposed to be read-only.
1618          */
1619         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1620
1621         /* Handle DP bits in common between all three register formats */
1622         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1623         intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1624
1625         if (crtc->config->has_audio)
1626                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1627
1628         /* Split out the IBX/CPU vs CPT settings */
1629
1630         if (IS_GEN7(dev) && port == PORT_A) {
1631                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1632                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1633                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1634                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1635                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1636
1637                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1638                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1639
1640                 intel_dp->DP |= crtc->pipe << 29;
1641         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1642                 u32 trans_dp;
1643
1644                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1645
1646                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1647                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1648                         trans_dp |= TRANS_DP_ENH_FRAMING;
1649                 else
1650                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1651                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1652         } else {
1653                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1654                         intel_dp->DP |= intel_dp->color_range;
1655
1656                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1657                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1658                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1659                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1660                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1661
1662                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1663                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1664
1665                 if (IS_CHERRYVIEW(dev))
1666                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1667                 else if (crtc->pipe == PIPE_B)
1668                         intel_dp->DP |= DP_PIPEB_SELECT;
1669         }
1670 }
1671
1672 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1673 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1674
1675 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1676 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1677
1678 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1679 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1680
1681 static void wait_panel_status(struct intel_dp *intel_dp,
1682                                        u32 mask,
1683                                        u32 value)
1684 {
1685         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1686         struct drm_i915_private *dev_priv = dev->dev_private;
1687         u32 pp_stat_reg, pp_ctrl_reg;
1688
1689         lockdep_assert_held(&dev_priv->pps_mutex);
1690
1691         pp_stat_reg = _pp_stat_reg(intel_dp);
1692         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1693
1694         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1695                         mask, value,
1696                         I915_READ(pp_stat_reg),
1697                         I915_READ(pp_ctrl_reg));
1698
1699         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1700                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1701                                 I915_READ(pp_stat_reg),
1702                                 I915_READ(pp_ctrl_reg));
1703         }
1704
1705         DRM_DEBUG_KMS("Wait complete\n");
1706 }
1707
1708 static void wait_panel_on(struct intel_dp *intel_dp)
1709 {
1710         DRM_DEBUG_KMS("Wait for panel power on\n");
1711         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1712 }
1713
1714 static void wait_panel_off(struct intel_dp *intel_dp)
1715 {
1716         DRM_DEBUG_KMS("Wait for panel power off time\n");
1717         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1718 }
1719
1720 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1721 {
1722         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1723
1724         /* When we disable the VDD override bit last we have to do the manual
1725          * wait. */
1726         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1727                                        intel_dp->panel_power_cycle_delay);
1728
1729         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1730 }
1731
1732 static void wait_backlight_on(struct intel_dp *intel_dp)
1733 {
1734         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1735                                        intel_dp->backlight_on_delay);
1736 }
1737
1738 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1739 {
1740         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1741                                        intel_dp->backlight_off_delay);
1742 }
1743
1744 /* Read the current pp_control value, unlocking the register if it
1745  * is locked
1746  */
1747
1748 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1749 {
1750         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1751         struct drm_i915_private *dev_priv = dev->dev_private;
1752         u32 control;
1753
1754         lockdep_assert_held(&dev_priv->pps_mutex);
1755
1756         control = I915_READ(_pp_ctrl_reg(intel_dp));
1757         if (!IS_BROXTON(dev)) {
1758                 control &= ~PANEL_UNLOCK_MASK;
1759                 control |= PANEL_UNLOCK_REGS;
1760         }
1761         return control;
1762 }
1763
1764 /*
1765  * Must be paired with edp_panel_vdd_off().
1766  * Must hold pps_mutex around the whole on/off sequence.
1767  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1768  */
1769 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1770 {
1771         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1772         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1773         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1774         struct drm_i915_private *dev_priv = dev->dev_private;
1775         enum intel_display_power_domain power_domain;
1776         u32 pp;
1777         u32 pp_stat_reg, pp_ctrl_reg;
1778         bool need_to_disable = !intel_dp->want_panel_vdd;
1779
1780         lockdep_assert_held(&dev_priv->pps_mutex);
1781
1782         if (!is_edp(intel_dp))
1783                 return false;
1784
1785         cancel_delayed_work(&intel_dp->panel_vdd_work);
1786         intel_dp->want_panel_vdd = true;
1787
1788         if (edp_have_panel_vdd(intel_dp))
1789                 return need_to_disable;
1790
1791         power_domain = intel_display_port_power_domain(intel_encoder);
1792         intel_display_power_get(dev_priv, power_domain);
1793
1794         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1795                       port_name(intel_dig_port->port));
1796
1797         if (!edp_have_panel_power(intel_dp))
1798                 wait_panel_power_cycle(intel_dp);
1799
1800         pp = ironlake_get_pp_control(intel_dp);
1801         pp |= EDP_FORCE_VDD;
1802
1803         pp_stat_reg = _pp_stat_reg(intel_dp);
1804         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1805
1806         I915_WRITE(pp_ctrl_reg, pp);
1807         POSTING_READ(pp_ctrl_reg);
1808         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1809                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1810         /*
1811          * If the panel wasn't on, delay before accessing aux channel
1812          */
1813         if (!edp_have_panel_power(intel_dp)) {
1814                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1815                               port_name(intel_dig_port->port));
1816                 msleep(intel_dp->panel_power_up_delay);
1817         }
1818
1819         return need_to_disable;
1820 }
1821
1822 /*
1823  * Must be paired with intel_edp_panel_vdd_off() or
1824  * intel_edp_panel_off().
1825  * Nested calls to these functions are not allowed since
1826  * we drop the lock. Caller must use some higher level
1827  * locking to prevent nested calls from other threads.
1828  */
1829 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1830 {
1831         bool vdd;
1832
1833         if (!is_edp(intel_dp))
1834                 return;
1835
1836         pps_lock(intel_dp);
1837         vdd = edp_panel_vdd_on(intel_dp);
1838         pps_unlock(intel_dp);
1839
1840         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1841              port_name(dp_to_dig_port(intel_dp)->port));
1842 }
1843
1844 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1845 {
1846         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1847         struct drm_i915_private *dev_priv = dev->dev_private;
1848         struct intel_digital_port *intel_dig_port =
1849                 dp_to_dig_port(intel_dp);
1850         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1851         enum intel_display_power_domain power_domain;
1852         u32 pp;
1853         u32 pp_stat_reg, pp_ctrl_reg;
1854
1855         lockdep_assert_held(&dev_priv->pps_mutex);
1856
1857         WARN_ON(intel_dp->want_panel_vdd);
1858
1859         if (!edp_have_panel_vdd(intel_dp))
1860                 return;
1861
1862         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1863                       port_name(intel_dig_port->port));
1864
1865         pp = ironlake_get_pp_control(intel_dp);
1866         pp &= ~EDP_FORCE_VDD;
1867
1868         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1869         pp_stat_reg = _pp_stat_reg(intel_dp);
1870
1871         I915_WRITE(pp_ctrl_reg, pp);
1872         POSTING_READ(pp_ctrl_reg);
1873
1874         /* Make sure sequencer is idle before allowing subsequent activity */
1875         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1876         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1877
1878         if ((pp & POWER_TARGET_ON) == 0)
1879                 intel_dp->last_power_cycle = jiffies;
1880
1881         power_domain = intel_display_port_power_domain(intel_encoder);
1882         intel_display_power_put(dev_priv, power_domain);
1883 }
1884
1885 static void edp_panel_vdd_work(struct work_struct *__work)
1886 {
1887         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1888                                                  struct intel_dp, panel_vdd_work);
1889
1890         pps_lock(intel_dp);
1891         if (!intel_dp->want_panel_vdd)
1892                 edp_panel_vdd_off_sync(intel_dp);
1893         pps_unlock(intel_dp);
1894 }
1895
1896 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1897 {
1898         unsigned long delay;
1899
1900         /*
1901          * Queue the timer to fire a long time from now (relative to the power
1902          * down delay) to keep the panel power up across a sequence of
1903          * operations.
1904          */
1905         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1906         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1907 }
1908
1909 /*
1910  * Must be paired with edp_panel_vdd_on().
1911  * Must hold pps_mutex around the whole on/off sequence.
1912  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1913  */
1914 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1915 {
1916         struct drm_i915_private *dev_priv =
1917                 intel_dp_to_dev(intel_dp)->dev_private;
1918
1919         lockdep_assert_held(&dev_priv->pps_mutex);
1920
1921         if (!is_edp(intel_dp))
1922                 return;
1923
1924         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1925              port_name(dp_to_dig_port(intel_dp)->port));
1926
1927         intel_dp->want_panel_vdd = false;
1928
1929         if (sync)
1930                 edp_panel_vdd_off_sync(intel_dp);
1931         else
1932                 edp_panel_vdd_schedule_off(intel_dp);
1933 }
1934
1935 static void edp_panel_on(struct intel_dp *intel_dp)
1936 {
1937         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1938         struct drm_i915_private *dev_priv = dev->dev_private;
1939         u32 pp;
1940         u32 pp_ctrl_reg;
1941
1942         lockdep_assert_held(&dev_priv->pps_mutex);
1943
1944         if (!is_edp(intel_dp))
1945                 return;
1946
1947         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1948                       port_name(dp_to_dig_port(intel_dp)->port));
1949
1950         if (WARN(edp_have_panel_power(intel_dp),
1951                  "eDP port %c panel power already on\n",
1952                  port_name(dp_to_dig_port(intel_dp)->port)))
1953                 return;
1954
1955         wait_panel_power_cycle(intel_dp);
1956
1957         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1958         pp = ironlake_get_pp_control(intel_dp);
1959         if (IS_GEN5(dev)) {
1960                 /* ILK workaround: disable reset around power sequence */
1961                 pp &= ~PANEL_POWER_RESET;
1962                 I915_WRITE(pp_ctrl_reg, pp);
1963                 POSTING_READ(pp_ctrl_reg);
1964         }
1965
1966         pp |= POWER_TARGET_ON;
1967         if (!IS_GEN5(dev))
1968                 pp |= PANEL_POWER_RESET;
1969
1970         I915_WRITE(pp_ctrl_reg, pp);
1971         POSTING_READ(pp_ctrl_reg);
1972
1973         wait_panel_on(intel_dp);
1974         intel_dp->last_power_on = jiffies;
1975
1976         if (IS_GEN5(dev)) {
1977                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1978                 I915_WRITE(pp_ctrl_reg, pp);
1979                 POSTING_READ(pp_ctrl_reg);
1980         }
1981 }
1982
1983 void intel_edp_panel_on(struct intel_dp *intel_dp)
1984 {
1985         if (!is_edp(intel_dp))
1986                 return;
1987
1988         pps_lock(intel_dp);
1989         edp_panel_on(intel_dp);
1990         pps_unlock(intel_dp);
1991 }
1992
1993
1994 static void edp_panel_off(struct intel_dp *intel_dp)
1995 {
1996         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1997         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1998         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1999         struct drm_i915_private *dev_priv = dev->dev_private;
2000         enum intel_display_power_domain power_domain;
2001         u32 pp;
2002         u32 pp_ctrl_reg;
2003
2004         lockdep_assert_held(&dev_priv->pps_mutex);
2005
2006         if (!is_edp(intel_dp))
2007                 return;
2008
2009         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2010                       port_name(dp_to_dig_port(intel_dp)->port));
2011
2012         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2013              port_name(dp_to_dig_port(intel_dp)->port));
2014
2015         pp = ironlake_get_pp_control(intel_dp);
2016         /* We need to switch off panel power _and_ force vdd, for otherwise some
2017          * panels get very unhappy and cease to work. */
2018         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2019                 EDP_BLC_ENABLE);
2020
2021         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2022
2023         intel_dp->want_panel_vdd = false;
2024
2025         I915_WRITE(pp_ctrl_reg, pp);
2026         POSTING_READ(pp_ctrl_reg);
2027
2028         intel_dp->last_power_cycle = jiffies;
2029         wait_panel_off(intel_dp);
2030
2031         /* We got a reference when we enabled the VDD. */
2032         power_domain = intel_display_port_power_domain(intel_encoder);
2033         intel_display_power_put(dev_priv, power_domain);
2034 }
2035
2036 void intel_edp_panel_off(struct intel_dp *intel_dp)
2037 {
2038         if (!is_edp(intel_dp))
2039                 return;
2040
2041         pps_lock(intel_dp);
2042         edp_panel_off(intel_dp);
2043         pps_unlock(intel_dp);
2044 }
2045
2046 /* Enable backlight in the panel power control. */
2047 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2048 {
2049         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2050         struct drm_device *dev = intel_dig_port->base.base.dev;
2051         struct drm_i915_private *dev_priv = dev->dev_private;
2052         u32 pp;
2053         u32 pp_ctrl_reg;
2054
2055         /*
2056          * If we enable the backlight right away following a panel power
2057          * on, we may see slight flicker as the panel syncs with the eDP
2058          * link.  So delay a bit to make sure the image is solid before
2059          * allowing it to appear.
2060          */
2061         wait_backlight_on(intel_dp);
2062
2063         pps_lock(intel_dp);
2064
2065         pp = ironlake_get_pp_control(intel_dp);
2066         pp |= EDP_BLC_ENABLE;
2067
2068         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2069
2070         I915_WRITE(pp_ctrl_reg, pp);
2071         POSTING_READ(pp_ctrl_reg);
2072
2073         pps_unlock(intel_dp);
2074 }
2075
2076 /* Enable backlight PWM and backlight PP control. */
2077 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2078 {
2079         if (!is_edp(intel_dp))
2080                 return;
2081
2082         DRM_DEBUG_KMS("\n");
2083
2084         intel_panel_enable_backlight(intel_dp->attached_connector);
2085         _intel_edp_backlight_on(intel_dp);
2086 }
2087
2088 /* Disable backlight in the panel power control. */
2089 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2090 {
2091         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2092         struct drm_i915_private *dev_priv = dev->dev_private;
2093         u32 pp;
2094         u32 pp_ctrl_reg;
2095
2096         if (!is_edp(intel_dp))
2097                 return;
2098
2099         pps_lock(intel_dp);
2100
2101         pp = ironlake_get_pp_control(intel_dp);
2102         pp &= ~EDP_BLC_ENABLE;
2103
2104         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2105
2106         I915_WRITE(pp_ctrl_reg, pp);
2107         POSTING_READ(pp_ctrl_reg);
2108
2109         pps_unlock(intel_dp);
2110
2111         intel_dp->last_backlight_off = jiffies;
2112         edp_wait_backlight_off(intel_dp);
2113 }
2114
2115 /* Disable backlight PP control and backlight PWM. */
2116 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2117 {
2118         if (!is_edp(intel_dp))
2119                 return;
2120
2121         DRM_DEBUG_KMS("\n");
2122
2123         _intel_edp_backlight_off(intel_dp);
2124         intel_panel_disable_backlight(intel_dp->attached_connector);
2125 }
2126
2127 /*
2128  * Hook for controlling the panel power control backlight through the bl_power
2129  * sysfs attribute. Take care to handle multiple calls.
2130  */
2131 static void intel_edp_backlight_power(struct intel_connector *connector,
2132                                       bool enable)
2133 {
2134         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2135         bool is_enabled;
2136
2137         pps_lock(intel_dp);
2138         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2139         pps_unlock(intel_dp);
2140
2141         if (is_enabled == enable)
2142                 return;
2143
2144         DRM_DEBUG_KMS("panel power control backlight %s\n",
2145                       enable ? "enable" : "disable");
2146
2147         if (enable)
2148                 _intel_edp_backlight_on(intel_dp);
2149         else
2150                 _intel_edp_backlight_off(intel_dp);
2151 }
2152
2153 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2154 {
2155         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2156         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2157         struct drm_device *dev = crtc->dev;
2158         struct drm_i915_private *dev_priv = dev->dev_private;
2159         u32 dpa_ctl;
2160
2161         assert_pipe_disabled(dev_priv,
2162                              to_intel_crtc(crtc)->pipe);
2163
2164         DRM_DEBUG_KMS("\n");
2165         dpa_ctl = I915_READ(DP_A);
2166         WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2167         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2168
2169         /* We don't adjust intel_dp->DP while tearing down the link, to
2170          * facilitate link retraining (e.g. after hotplug). Hence clear all
2171          * enable bits here to ensure that we don't enable too much. */
2172         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2173         intel_dp->DP |= DP_PLL_ENABLE;
2174         I915_WRITE(DP_A, intel_dp->DP);
2175         POSTING_READ(DP_A);
2176         udelay(200);
2177 }
2178
2179 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2180 {
2181         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2182         struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2183         struct drm_device *dev = crtc->dev;
2184         struct drm_i915_private *dev_priv = dev->dev_private;
2185         u32 dpa_ctl;
2186
2187         assert_pipe_disabled(dev_priv,
2188                              to_intel_crtc(crtc)->pipe);
2189
2190         dpa_ctl = I915_READ(DP_A);
2191         WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2192              "dp pll off, should be on\n");
2193         WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2194
2195         /* We can't rely on the value tracked for the DP register in
2196          * intel_dp->DP because link_down must not change that (otherwise link
2197          * re-training will fail. */
2198         dpa_ctl &= ~DP_PLL_ENABLE;
2199         I915_WRITE(DP_A, dpa_ctl);
2200         POSTING_READ(DP_A);
2201         udelay(200);
2202 }
2203
2204 /* If the sink supports it, try to set the power state appropriately */
2205 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2206 {
2207         int ret, i;
2208
2209         /* Should have a valid DPCD by this point */
2210         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2211                 return;
2212
2213         if (mode != DRM_MODE_DPMS_ON) {
2214                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2215                                          DP_SET_POWER_D3);
2216         } else {
2217                 /*
2218                  * When turning on, we need to retry for 1ms to give the sink
2219                  * time to wake up.
2220                  */
2221                 for (i = 0; i < 3; i++) {
2222                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2223                                                  DP_SET_POWER_D0);
2224                         if (ret == 1)
2225                                 break;
2226                         msleep(1);
2227                 }
2228         }
2229
2230         if (ret != 1)
2231                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2232                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2233 }
2234
2235 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2236                                   enum pipe *pipe)
2237 {
2238         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2239         enum port port = dp_to_dig_port(intel_dp)->port;
2240         struct drm_device *dev = encoder->base.dev;
2241         struct drm_i915_private *dev_priv = dev->dev_private;
2242         enum intel_display_power_domain power_domain;
2243         u32 tmp;
2244
2245         power_domain = intel_display_port_power_domain(encoder);
2246         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2247                 return false;
2248
2249         tmp = I915_READ(intel_dp->output_reg);
2250
2251         if (!(tmp & DP_PORT_EN))
2252                 return false;
2253
2254         if (IS_GEN7(dev) && port == PORT_A) {
2255                 *pipe = PORT_TO_PIPE_CPT(tmp);
2256         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2257                 enum pipe p;
2258
2259                 for_each_pipe(dev_priv, p) {
2260                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2261                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2262                                 *pipe = p;
2263                                 return true;
2264                         }
2265                 }
2266
2267                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2268                               intel_dp->output_reg);
2269         } else if (IS_CHERRYVIEW(dev)) {
2270                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2271         } else {
2272                 *pipe = PORT_TO_PIPE(tmp);
2273         }
2274
2275         return true;
2276 }
2277
2278 static void intel_dp_get_config(struct intel_encoder *encoder,
2279                                 struct intel_crtc_state *pipe_config)
2280 {
2281         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2282         u32 tmp, flags = 0;
2283         struct drm_device *dev = encoder->base.dev;
2284         struct drm_i915_private *dev_priv = dev->dev_private;
2285         enum port port = dp_to_dig_port(intel_dp)->port;
2286         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2287         int dotclock;
2288
2289         tmp = I915_READ(intel_dp->output_reg);
2290
2291         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2292
2293         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2294                 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2295                 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2296                         flags |= DRM_MODE_FLAG_PHSYNC;
2297                 else
2298                         flags |= DRM_MODE_FLAG_NHSYNC;
2299
2300                 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2301                         flags |= DRM_MODE_FLAG_PVSYNC;
2302                 else
2303                         flags |= DRM_MODE_FLAG_NVSYNC;
2304         } else {
2305                 if (tmp & DP_SYNC_HS_HIGH)
2306                         flags |= DRM_MODE_FLAG_PHSYNC;
2307                 else
2308                         flags |= DRM_MODE_FLAG_NHSYNC;
2309
2310                 if (tmp & DP_SYNC_VS_HIGH)
2311                         flags |= DRM_MODE_FLAG_PVSYNC;
2312                 else
2313                         flags |= DRM_MODE_FLAG_NVSYNC;
2314         }
2315
2316         pipe_config->base.adjusted_mode.flags |= flags;
2317
2318         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2319             tmp & DP_COLOR_RANGE_16_235)
2320                 pipe_config->limited_color_range = true;
2321
2322         pipe_config->has_dp_encoder = true;
2323
2324         intel_dp_get_m_n(crtc, pipe_config);
2325
2326         if (port == PORT_A) {
2327                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2328                         pipe_config->port_clock = 162000;
2329                 else
2330                         pipe_config->port_clock = 270000;
2331         }
2332
2333         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2334                                             &pipe_config->dp_m_n);
2335
2336         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2337                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2338
2339         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2340
2341         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2342             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2343                 /*
2344                  * This is a big fat ugly hack.
2345                  *
2346                  * Some machines in UEFI boot mode provide us a VBT that has 18
2347                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2348                  * unknown we fail to light up. Yet the same BIOS boots up with
2349                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2350                  * max, not what it tells us to use.
2351                  *
2352                  * Note: This will still be broken if the eDP panel is not lit
2353                  * up by the BIOS, and thus we can't get the mode at module
2354                  * load.
2355                  */
2356                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2357                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2358                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2359         }
2360 }
2361
2362 static void intel_disable_dp(struct intel_encoder *encoder)
2363 {
2364         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2365         struct drm_device *dev = encoder->base.dev;
2366         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2367
2368         if (crtc->config->has_audio)
2369                 intel_audio_codec_disable(encoder);
2370
2371         if (HAS_PSR(dev) && !HAS_DDI(dev))
2372                 intel_psr_disable(intel_dp);
2373
2374         /* Make sure the panel is off before trying to change the mode. But also
2375          * ensure that we have vdd while we switch off the panel. */
2376         intel_edp_panel_vdd_on(intel_dp);
2377         intel_edp_backlight_off(intel_dp);
2378         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2379         intel_edp_panel_off(intel_dp);
2380
2381         /* disable the port before the pipe on g4x */
2382         if (INTEL_INFO(dev)->gen < 5)
2383                 intel_dp_link_down(intel_dp);
2384 }
2385
2386 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2387 {
2388         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2389         enum port port = dp_to_dig_port(intel_dp)->port;
2390
2391         intel_dp_link_down(intel_dp);
2392         if (port == PORT_A)
2393                 ironlake_edp_pll_off(intel_dp);
2394 }
2395
2396 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2397 {
2398         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2399
2400         intel_dp_link_down(intel_dp);
2401 }
2402
2403 static void chv_post_disable_dp(struct intel_encoder *encoder)
2404 {
2405         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2406         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2407         struct drm_device *dev = encoder->base.dev;
2408         struct drm_i915_private *dev_priv = dev->dev_private;
2409         struct intel_crtc *intel_crtc =
2410                 to_intel_crtc(encoder->base.crtc);
2411         enum dpio_channel ch = vlv_dport_to_channel(dport);
2412         enum pipe pipe = intel_crtc->pipe;
2413         u32 val;
2414
2415         intel_dp_link_down(intel_dp);
2416
2417         mutex_lock(&dev_priv->sb_lock);
2418
2419         /* Propagate soft reset to data lane reset */
2420         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2421         val |= CHV_PCS_REQ_SOFTRESET_EN;
2422         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2423
2424         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2425         val |= CHV_PCS_REQ_SOFTRESET_EN;
2426         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2427
2428         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2429         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2430         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2431
2432         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2433         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2434         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2435
2436         mutex_unlock(&dev_priv->sb_lock);
2437 }
2438
2439 static void
2440 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2441                          uint32_t *DP,
2442                          uint8_t dp_train_pat)
2443 {
2444         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2445         struct drm_device *dev = intel_dig_port->base.base.dev;
2446         struct drm_i915_private *dev_priv = dev->dev_private;
2447         enum port port = intel_dig_port->port;
2448
2449         if (HAS_DDI(dev)) {
2450                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2451
2452                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2453                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2454                 else
2455                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2456
2457                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2458                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2459                 case DP_TRAINING_PATTERN_DISABLE:
2460                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2461
2462                         break;
2463                 case DP_TRAINING_PATTERN_1:
2464                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2465                         break;
2466                 case DP_TRAINING_PATTERN_2:
2467                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2468                         break;
2469                 case DP_TRAINING_PATTERN_3:
2470                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2471                         break;
2472                 }
2473                 I915_WRITE(DP_TP_CTL(port), temp);
2474
2475         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2476                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2477                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2478
2479                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2480                 case DP_TRAINING_PATTERN_DISABLE:
2481                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2482                         break;
2483                 case DP_TRAINING_PATTERN_1:
2484                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2485                         break;
2486                 case DP_TRAINING_PATTERN_2:
2487                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2488                         break;
2489                 case DP_TRAINING_PATTERN_3:
2490                         DRM_ERROR("DP training pattern 3 not supported\n");
2491                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2492                         break;
2493                 }
2494
2495         } else {
2496                 if (IS_CHERRYVIEW(dev))
2497                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2498                 else
2499                         *DP &= ~DP_LINK_TRAIN_MASK;
2500
2501                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2502                 case DP_TRAINING_PATTERN_DISABLE:
2503                         *DP |= DP_LINK_TRAIN_OFF;
2504                         break;
2505                 case DP_TRAINING_PATTERN_1:
2506                         *DP |= DP_LINK_TRAIN_PAT_1;
2507                         break;
2508                 case DP_TRAINING_PATTERN_2:
2509                         *DP |= DP_LINK_TRAIN_PAT_2;
2510                         break;
2511                 case DP_TRAINING_PATTERN_3:
2512                         if (IS_CHERRYVIEW(dev)) {
2513                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2514                         } else {
2515                                 DRM_ERROR("DP training pattern 3 not supported\n");
2516                                 *DP |= DP_LINK_TRAIN_PAT_2;
2517                         }
2518                         break;
2519                 }
2520         }
2521 }
2522
2523 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2524 {
2525         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2526         struct drm_i915_private *dev_priv = dev->dev_private;
2527
2528         /* enable with pattern 1 (as per spec) */
2529         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2530                                  DP_TRAINING_PATTERN_1);
2531
2532         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2533         POSTING_READ(intel_dp->output_reg);
2534
2535         /*
2536          * Magic for VLV/CHV. We _must_ first set up the register
2537          * without actually enabling the port, and then do another
2538          * write to enable the port. Otherwise link training will
2539          * fail when the power sequencer is freshly used for this port.
2540          */
2541         intel_dp->DP |= DP_PORT_EN;
2542
2543         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2544         POSTING_READ(intel_dp->output_reg);
2545 }
2546
2547 static void intel_enable_dp(struct intel_encoder *encoder)
2548 {
2549         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2550         struct drm_device *dev = encoder->base.dev;
2551         struct drm_i915_private *dev_priv = dev->dev_private;
2552         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2553         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2554         unsigned int lane_mask = 0x0;
2555
2556         if (WARN_ON(dp_reg & DP_PORT_EN))
2557                 return;
2558
2559         pps_lock(intel_dp);
2560
2561         if (IS_VALLEYVIEW(dev))
2562                 vlv_init_panel_power_sequencer(intel_dp);
2563
2564         intel_dp_enable_port(intel_dp);
2565
2566         edp_panel_vdd_on(intel_dp);
2567         edp_panel_on(intel_dp);
2568         edp_panel_vdd_off(intel_dp, true);
2569
2570         pps_unlock(intel_dp);
2571
2572         if (IS_VALLEYVIEW(dev))
2573                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2574                                     lane_mask);
2575
2576         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2577         intel_dp_start_link_train(intel_dp);
2578         intel_dp_complete_link_train(intel_dp);
2579         intel_dp_stop_link_train(intel_dp);
2580
2581         if (crtc->config->has_audio) {
2582                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2583                                  pipe_name(crtc->pipe));
2584                 intel_audio_codec_enable(encoder);
2585         }
2586 }
2587
2588 static void g4x_enable_dp(struct intel_encoder *encoder)
2589 {
2590         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2591
2592         intel_enable_dp(encoder);
2593         intel_edp_backlight_on(intel_dp);
2594 }
2595
2596 static void vlv_enable_dp(struct intel_encoder *encoder)
2597 {
2598         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2599
2600         intel_edp_backlight_on(intel_dp);
2601         intel_psr_enable(intel_dp);
2602 }
2603
2604 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2605 {
2606         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2607         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2608
2609         intel_dp_prepare(encoder);
2610
2611         /* Only ilk+ has port A */
2612         if (dport->port == PORT_A) {
2613                 ironlake_set_pll_cpu_edp(intel_dp);
2614                 ironlake_edp_pll_on(intel_dp);
2615         }
2616 }
2617
2618 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2619 {
2620         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2621         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2622         enum pipe pipe = intel_dp->pps_pipe;
2623         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2624
2625         edp_panel_vdd_off_sync(intel_dp);
2626
2627         /*
2628          * VLV seems to get confused when multiple power seqeuencers
2629          * have the same port selected (even if only one has power/vdd
2630          * enabled). The failure manifests as vlv_wait_port_ready() failing
2631          * CHV on the other hand doesn't seem to mind having the same port
2632          * selected in multiple power seqeuencers, but let's clear the
2633          * port select always when logically disconnecting a power sequencer
2634          * from a port.
2635          */
2636         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2637                       pipe_name(pipe), port_name(intel_dig_port->port));
2638         I915_WRITE(pp_on_reg, 0);
2639         POSTING_READ(pp_on_reg);
2640
2641         intel_dp->pps_pipe = INVALID_PIPE;
2642 }
2643
2644 static void vlv_steal_power_sequencer(struct drm_device *dev,
2645                                       enum pipe pipe)
2646 {
2647         struct drm_i915_private *dev_priv = dev->dev_private;
2648         struct intel_encoder *encoder;
2649
2650         lockdep_assert_held(&dev_priv->pps_mutex);
2651
2652         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2653                 return;
2654
2655         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2656                             base.head) {
2657                 struct intel_dp *intel_dp;
2658                 enum port port;
2659
2660                 if (encoder->type != INTEL_OUTPUT_EDP)
2661                         continue;
2662
2663                 intel_dp = enc_to_intel_dp(&encoder->base);
2664                 port = dp_to_dig_port(intel_dp)->port;
2665
2666                 if (intel_dp->pps_pipe != pipe)
2667                         continue;
2668
2669                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2670                               pipe_name(pipe), port_name(port));
2671
2672                 WARN(encoder->base.crtc,
2673                      "stealing pipe %c power sequencer from active eDP port %c\n",
2674                      pipe_name(pipe), port_name(port));
2675
2676                 /* make sure vdd is off before we steal it */
2677                 vlv_detach_power_sequencer(intel_dp);
2678         }
2679 }
2680
2681 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2682 {
2683         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2684         struct intel_encoder *encoder = &intel_dig_port->base;
2685         struct drm_device *dev = encoder->base.dev;
2686         struct drm_i915_private *dev_priv = dev->dev_private;
2687         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2688
2689         lockdep_assert_held(&dev_priv->pps_mutex);
2690
2691         if (!is_edp(intel_dp))
2692                 return;
2693
2694         if (intel_dp->pps_pipe == crtc->pipe)
2695                 return;
2696
2697         /*
2698          * If another power sequencer was being used on this
2699          * port previously make sure to turn off vdd there while
2700          * we still have control of it.
2701          */
2702         if (intel_dp->pps_pipe != INVALID_PIPE)
2703                 vlv_detach_power_sequencer(intel_dp);
2704
2705         /*
2706          * We may be stealing the power
2707          * sequencer from another port.
2708          */
2709         vlv_steal_power_sequencer(dev, crtc->pipe);
2710
2711         /* now it's all ours */
2712         intel_dp->pps_pipe = crtc->pipe;
2713
2714         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2715                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2716
2717         /* init power sequencer on this pipe and port */
2718         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2719         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2720 }
2721
2722 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2723 {
2724         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2725         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2726         struct drm_device *dev = encoder->base.dev;
2727         struct drm_i915_private *dev_priv = dev->dev_private;
2728         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2729         enum dpio_channel port = vlv_dport_to_channel(dport);
2730         int pipe = intel_crtc->pipe;
2731         u32 val;
2732
2733         mutex_lock(&dev_priv->sb_lock);
2734
2735         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2736         val = 0;
2737         if (pipe)
2738                 val |= (1<<21);
2739         else
2740                 val &= ~(1<<21);
2741         val |= 0x001000c4;
2742         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2743         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2744         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2745
2746         mutex_unlock(&dev_priv->sb_lock);
2747
2748         intel_enable_dp(encoder);
2749 }
2750
2751 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2752 {
2753         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2754         struct drm_device *dev = encoder->base.dev;
2755         struct drm_i915_private *dev_priv = dev->dev_private;
2756         struct intel_crtc *intel_crtc =
2757                 to_intel_crtc(encoder->base.crtc);
2758         enum dpio_channel port = vlv_dport_to_channel(dport);
2759         int pipe = intel_crtc->pipe;
2760
2761         intel_dp_prepare(encoder);
2762
2763         /* Program Tx lane resets to default */
2764         mutex_lock(&dev_priv->sb_lock);
2765         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2766                          DPIO_PCS_TX_LANE2_RESET |
2767                          DPIO_PCS_TX_LANE1_RESET);
2768         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2769                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2770                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2771                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2772                                  DPIO_PCS_CLK_SOFT_RESET);
2773
2774         /* Fix up inter-pair skew failure */
2775         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2776         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2777         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2778         mutex_unlock(&dev_priv->sb_lock);
2779 }
2780
2781 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2782 {
2783         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2784         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2785         struct drm_device *dev = encoder->base.dev;
2786         struct drm_i915_private *dev_priv = dev->dev_private;
2787         struct intel_crtc *intel_crtc =
2788                 to_intel_crtc(encoder->base.crtc);
2789         enum dpio_channel ch = vlv_dport_to_channel(dport);
2790         int pipe = intel_crtc->pipe;
2791         int data, i, stagger;
2792         u32 val;
2793
2794         mutex_lock(&dev_priv->sb_lock);
2795
2796         /* allow hardware to manage TX FIFO reset source */
2797         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2798         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2799         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2800
2801         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2802         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2803         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2804
2805         /* Deassert soft data lane reset*/
2806         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2807         val |= CHV_PCS_REQ_SOFTRESET_EN;
2808         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2809
2810         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2811         val |= CHV_PCS_REQ_SOFTRESET_EN;
2812         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2813
2814         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2815         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2816         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2817
2818         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2819         val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2820         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2821
2822         /* Program Tx lane latency optimal setting*/
2823         for (i = 0; i < 4; i++) {
2824                 /* Set the upar bit */
2825                 data = (i == 1) ? 0x0 : 0x1;
2826                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2827                                 data << DPIO_UPAR_SHIFT);
2828         }
2829
2830         /* Data lane stagger programming */
2831         if (intel_crtc->config->port_clock > 270000)
2832                 stagger = 0x18;
2833         else if (intel_crtc->config->port_clock > 135000)
2834                 stagger = 0xd;
2835         else if (intel_crtc->config->port_clock > 67500)
2836                 stagger = 0x7;
2837         else if (intel_crtc->config->port_clock > 33750)
2838                 stagger = 0x4;
2839         else
2840                 stagger = 0x2;
2841
2842         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2843         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2844         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2845
2846         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2847         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2848         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2849
2850         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2851                        DPIO_LANESTAGGER_STRAP(stagger) |
2852                        DPIO_LANESTAGGER_STRAP_OVRD |
2853                        DPIO_TX1_STAGGER_MASK(0x1f) |
2854                        DPIO_TX1_STAGGER_MULT(6) |
2855                        DPIO_TX2_STAGGER_MULT(0));
2856
2857         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2858                        DPIO_LANESTAGGER_STRAP(stagger) |
2859                        DPIO_LANESTAGGER_STRAP_OVRD |
2860                        DPIO_TX1_STAGGER_MASK(0x1f) |
2861                        DPIO_TX1_STAGGER_MULT(7) |
2862                        DPIO_TX2_STAGGER_MULT(5));
2863
2864         mutex_unlock(&dev_priv->sb_lock);
2865
2866         intel_enable_dp(encoder);
2867 }
2868
2869 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2870 {
2871         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2872         struct drm_device *dev = encoder->base.dev;
2873         struct drm_i915_private *dev_priv = dev->dev_private;
2874         struct intel_crtc *intel_crtc =
2875                 to_intel_crtc(encoder->base.crtc);
2876         enum dpio_channel ch = vlv_dport_to_channel(dport);
2877         enum pipe pipe = intel_crtc->pipe;
2878         u32 val;
2879
2880         intel_dp_prepare(encoder);
2881
2882         mutex_lock(&dev_priv->sb_lock);
2883
2884         /* program left/right clock distribution */
2885         if (pipe != PIPE_B) {
2886                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2887                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2888                 if (ch == DPIO_CH0)
2889                         val |= CHV_BUFLEFTENA1_FORCE;
2890                 if (ch == DPIO_CH1)
2891                         val |= CHV_BUFRIGHTENA1_FORCE;
2892                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2893         } else {
2894                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2895                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2896                 if (ch == DPIO_CH0)
2897                         val |= CHV_BUFLEFTENA2_FORCE;
2898                 if (ch == DPIO_CH1)
2899                         val |= CHV_BUFRIGHTENA2_FORCE;
2900                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2901         }
2902
2903         /* program clock channel usage */
2904         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2905         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2906         if (pipe != PIPE_B)
2907                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2908         else
2909                 val |= CHV_PCS_USEDCLKCHANNEL;
2910         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2911
2912         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2913         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2914         if (pipe != PIPE_B)
2915                 val &= ~CHV_PCS_USEDCLKCHANNEL;
2916         else
2917                 val |= CHV_PCS_USEDCLKCHANNEL;
2918         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2919
2920         /*
2921          * This a a bit weird since generally CL
2922          * matches the pipe, but here we need to
2923          * pick the CL based on the port.
2924          */
2925         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2926         if (pipe != PIPE_B)
2927                 val &= ~CHV_CMN_USEDCLKCHANNEL;
2928         else
2929                 val |= CHV_CMN_USEDCLKCHANNEL;
2930         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2931
2932         mutex_unlock(&dev_priv->sb_lock);
2933 }
2934
2935 /*
2936  * Native read with retry for link status and receiver capability reads for
2937  * cases where the sink may still be asleep.
2938  *
2939  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2940  * supposed to retry 3 times per the spec.
2941  */
2942 static ssize_t
2943 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2944                         void *buffer, size_t size)
2945 {
2946         ssize_t ret;
2947         int i;
2948
2949         /*
2950          * Sometime we just get the same incorrect byte repeated
2951          * over the entire buffer. Doing just one throw away read
2952          * initially seems to "solve" it.
2953          */
2954         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2955
2956         for (i = 0; i < 3; i++) {
2957                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2958                 if (ret == size)
2959                         return ret;
2960                 msleep(1);
2961         }
2962
2963         return ret;
2964 }
2965
2966 /*
2967  * Fetch AUX CH registers 0x202 - 0x207 which contain
2968  * link status information
2969  */
2970 static bool
2971 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2972 {
2973         return intel_dp_dpcd_read_wake(&intel_dp->aux,
2974                                        DP_LANE0_1_STATUS,
2975                                        link_status,
2976                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2977 }
2978
2979 /* These are source-specific values. */
2980 static uint8_t
2981 intel_dp_voltage_max(struct intel_dp *intel_dp)
2982 {
2983         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2984         struct drm_i915_private *dev_priv = dev->dev_private;
2985         enum port port = dp_to_dig_port(intel_dp)->port;
2986
2987         if (IS_BROXTON(dev))
2988                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2989         else if (INTEL_INFO(dev)->gen >= 9) {
2990                 if (dev_priv->edp_low_vswing && port == PORT_A)
2991                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2992                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2993         } else if (IS_VALLEYVIEW(dev))
2994                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2995         else if (IS_GEN7(dev) && port == PORT_A)
2996                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2997         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2998                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2999         else
3000                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3001 }
3002
3003 static uint8_t
3004 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3005 {
3006         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3007         enum port port = dp_to_dig_port(intel_dp)->port;
3008
3009         if (INTEL_INFO(dev)->gen >= 9) {
3010                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3011                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3012                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3013                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3014                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3015                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3016                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3017                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3018                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3019                 default:
3020                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3021                 }
3022         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3023                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3024                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3025                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3026                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3027                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3028                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3029                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3030                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3031                 default:
3032                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3033                 }
3034         } else if (IS_VALLEYVIEW(dev)) {
3035                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3036                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3037                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3038                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3039                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3040                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3041                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3042                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3043                 default:
3044                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3045                 }
3046         } else if (IS_GEN7(dev) && port == PORT_A) {
3047                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3048                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3049                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3050                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3051                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3052                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3053                 default:
3054                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3055                 }
3056         } else {
3057                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3058                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3059                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3060                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3061                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3062                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3063                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3064                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3065                 default:
3066                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3067                 }
3068         }
3069 }
3070
3071 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3072 {
3073         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3074         struct drm_i915_private *dev_priv = dev->dev_private;
3075         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3076         struct intel_crtc *intel_crtc =
3077                 to_intel_crtc(dport->base.base.crtc);
3078         unsigned long demph_reg_value, preemph_reg_value,
3079                 uniqtranscale_reg_value;
3080         uint8_t train_set = intel_dp->train_set[0];
3081         enum dpio_channel port = vlv_dport_to_channel(dport);
3082         int pipe = intel_crtc->pipe;
3083
3084         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3085         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3086                 preemph_reg_value = 0x0004000;
3087                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3088                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3089                         demph_reg_value = 0x2B405555;
3090                         uniqtranscale_reg_value = 0x552AB83A;
3091                         break;
3092                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3093                         demph_reg_value = 0x2B404040;
3094                         uniqtranscale_reg_value = 0x5548B83A;
3095                         break;
3096                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3097                         demph_reg_value = 0x2B245555;
3098                         uniqtranscale_reg_value = 0x5560B83A;
3099                         break;
3100                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3101                         demph_reg_value = 0x2B405555;
3102                         uniqtranscale_reg_value = 0x5598DA3A;
3103                         break;
3104                 default:
3105                         return 0;
3106                 }
3107                 break;
3108         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3109                 preemph_reg_value = 0x0002000;
3110                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3111                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3112                         demph_reg_value = 0x2B404040;
3113                         uniqtranscale_reg_value = 0x5552B83A;
3114                         break;
3115                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3116                         demph_reg_value = 0x2B404848;
3117                         uniqtranscale_reg_value = 0x5580B83A;
3118                         break;
3119                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3120                         demph_reg_value = 0x2B404040;
3121                         uniqtranscale_reg_value = 0x55ADDA3A;
3122                         break;
3123                 default:
3124                         return 0;
3125                 }
3126                 break;
3127         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3128                 preemph_reg_value = 0x0000000;
3129                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3130                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3131                         demph_reg_value = 0x2B305555;
3132                         uniqtranscale_reg_value = 0x5570B83A;
3133                         break;
3134                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3135                         demph_reg_value = 0x2B2B4040;
3136                         uniqtranscale_reg_value = 0x55ADDA3A;
3137                         break;
3138                 default:
3139                         return 0;
3140                 }
3141                 break;
3142         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3143                 preemph_reg_value = 0x0006000;
3144                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3145                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3146                         demph_reg_value = 0x1B405555;
3147                         uniqtranscale_reg_value = 0x55ADDA3A;
3148                         break;
3149                 default:
3150                         return 0;
3151                 }
3152                 break;
3153         default:
3154                 return 0;
3155         }
3156
3157         mutex_lock(&dev_priv->sb_lock);
3158         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3159         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3160         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3161                          uniqtranscale_reg_value);
3162         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3163         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3164         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3165         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3166         mutex_unlock(&dev_priv->sb_lock);
3167
3168         return 0;
3169 }
3170
3171 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3172 {
3173         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3174         struct drm_i915_private *dev_priv = dev->dev_private;
3175         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3176         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3177         u32 deemph_reg_value, margin_reg_value, val;
3178         uint8_t train_set = intel_dp->train_set[0];
3179         enum dpio_channel ch = vlv_dport_to_channel(dport);
3180         enum pipe pipe = intel_crtc->pipe;
3181         int i;
3182
3183         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3184         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3185                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3186                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3187                         deemph_reg_value = 128;
3188                         margin_reg_value = 52;
3189                         break;
3190                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3191                         deemph_reg_value = 128;
3192                         margin_reg_value = 77;
3193                         break;
3194                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3195                         deemph_reg_value = 128;
3196                         margin_reg_value = 102;
3197                         break;
3198                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3199                         deemph_reg_value = 128;
3200                         margin_reg_value = 154;
3201                         /* FIXME extra to set for 1200 */
3202                         break;
3203                 default:
3204                         return 0;
3205                 }
3206                 break;
3207         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3208                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3209                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3210                         deemph_reg_value = 85;
3211                         margin_reg_value = 78;
3212                         break;
3213                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3214                         deemph_reg_value = 85;
3215                         margin_reg_value = 116;
3216                         break;
3217                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3218                         deemph_reg_value = 85;
3219                         margin_reg_value = 154;
3220                         break;
3221                 default:
3222                         return 0;
3223                 }
3224                 break;
3225         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3226                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3227                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3228                         deemph_reg_value = 64;
3229                         margin_reg_value = 104;
3230                         break;
3231                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3232                         deemph_reg_value = 64;
3233                         margin_reg_value = 154;
3234                         break;
3235                 default:
3236                         return 0;
3237                 }
3238                 break;
3239         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3240                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3241                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3242                         deemph_reg_value = 43;
3243                         margin_reg_value = 154;
3244                         break;
3245                 default:
3246                         return 0;
3247                 }
3248                 break;
3249         default:
3250                 return 0;
3251         }
3252
3253         mutex_lock(&dev_priv->sb_lock);
3254
3255         /* Clear calc init */
3256         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3257         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3258         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3259         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3260         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3261
3262         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3263         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3264         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3265         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3266         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3267
3268         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3269         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3270         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3271         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3272
3273         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3274         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3275         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3276         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3277
3278         /* Program swing deemph */
3279         for (i = 0; i < 4; i++) {
3280                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3281                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3282                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3283                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3284         }
3285
3286         /* Program swing margin */
3287         for (i = 0; i < 4; i++) {
3288                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3289                 val &= ~DPIO_SWING_MARGIN000_MASK;
3290                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3291                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3292         }
3293
3294         /* Disable unique transition scale */
3295         for (i = 0; i < 4; i++) {
3296                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3297                 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3298                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3299         }
3300
3301         if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3302                         == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3303                 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3304                         == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3305
3306                 /*
3307                  * The document said it needs to set bit 27 for ch0 and bit 26
3308                  * for ch1. Might be a typo in the doc.
3309                  * For now, for this unique transition scale selection, set bit
3310                  * 27 for ch0 and ch1.
3311                  */
3312                 for (i = 0; i < 4; i++) {
3313                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3314                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3315                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3316                 }
3317
3318                 for (i = 0; i < 4; i++) {
3319                         val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3320                         val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3321                         val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3322                         vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3323                 }
3324         }
3325
3326         /* Start swing calculation */
3327         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3328         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3329         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3330
3331         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3332         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3333         vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3334
3335         /* LRC Bypass */
3336         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3337         val |= DPIO_LRC_BYPASS;
3338         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3339
3340         mutex_unlock(&dev_priv->sb_lock);
3341
3342         return 0;
3343 }
3344
3345 static void
3346 intel_get_adjust_train(struct intel_dp *intel_dp,
3347                        const uint8_t link_status[DP_LINK_STATUS_SIZE])
3348 {
3349         uint8_t v = 0;
3350         uint8_t p = 0;
3351         int lane;
3352         uint8_t voltage_max;
3353         uint8_t preemph_max;
3354
3355         for (lane = 0; lane < intel_dp->lane_count; lane++) {
3356                 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3357                 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3358
3359                 if (this_v > v)
3360                         v = this_v;
3361                 if (this_p > p)
3362                         p = this_p;
3363         }
3364
3365         voltage_max = intel_dp_voltage_max(intel_dp);
3366         if (v >= voltage_max)
3367                 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3368
3369         preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3370         if (p >= preemph_max)
3371                 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3372
3373         for (lane = 0; lane < 4; lane++)
3374                 intel_dp->train_set[lane] = v | p;
3375 }
3376
3377 static uint32_t
3378 gen4_signal_levels(uint8_t train_set)
3379 {
3380         uint32_t        signal_levels = 0;
3381
3382         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3383         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3384         default:
3385                 signal_levels |= DP_VOLTAGE_0_4;
3386                 break;
3387         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3388                 signal_levels |= DP_VOLTAGE_0_6;
3389                 break;
3390         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3391                 signal_levels |= DP_VOLTAGE_0_8;
3392                 break;
3393         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3394                 signal_levels |= DP_VOLTAGE_1_2;
3395                 break;
3396         }
3397         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3398         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3399         default:
3400                 signal_levels |= DP_PRE_EMPHASIS_0;
3401                 break;
3402         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3403                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3404                 break;
3405         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3406                 signal_levels |= DP_PRE_EMPHASIS_6;
3407                 break;
3408         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3409                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3410                 break;
3411         }
3412         return signal_levels;
3413 }
3414
3415 /* Gen6's DP voltage swing and pre-emphasis control */
3416 static uint32_t
3417 gen6_edp_signal_levels(uint8_t train_set)
3418 {
3419         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3420                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3421         switch (signal_levels) {
3422         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3423         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3424                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3425         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3426                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3427         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3428         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3429                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3430         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3431         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3432                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3433         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3434         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3435                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3436         default:
3437                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3438                               "0x%x\n", signal_levels);
3439                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3440         }
3441 }
3442
3443 /* Gen7's DP voltage swing and pre-emphasis control */
3444 static uint32_t
3445 gen7_edp_signal_levels(uint8_t train_set)
3446 {
3447         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3448                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3449         switch (signal_levels) {
3450         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3451                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3452         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3453                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3454         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3455                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3456
3457         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3458                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3459         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3460                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3461
3462         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3463                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3464         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3465                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3466
3467         default:
3468                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3469                               "0x%x\n", signal_levels);
3470                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3471         }
3472 }
3473
3474 /* Properly updates "DP" with the correct signal levels. */
3475 static void
3476 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3477 {
3478         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3479         enum port port = intel_dig_port->port;
3480         struct drm_device *dev = intel_dig_port->base.base.dev;
3481         uint32_t signal_levels, mask = 0;
3482         uint8_t train_set = intel_dp->train_set[0];
3483
3484         if (HAS_DDI(dev)) {
3485                 signal_levels = ddi_signal_levels(intel_dp);
3486
3487                 if (IS_BROXTON(dev))
3488                         signal_levels = 0;
3489                 else
3490                         mask = DDI_BUF_EMP_MASK;
3491         } else if (IS_CHERRYVIEW(dev)) {
3492                 signal_levels = chv_signal_levels(intel_dp);
3493         } else if (IS_VALLEYVIEW(dev)) {
3494                 signal_levels = vlv_signal_levels(intel_dp);
3495         } else if (IS_GEN7(dev) && port == PORT_A) {
3496                 signal_levels = gen7_edp_signal_levels(train_set);
3497                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3498         } else if (IS_GEN6(dev) && port == PORT_A) {
3499                 signal_levels = gen6_edp_signal_levels(train_set);
3500                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3501         } else {
3502                 signal_levels = gen4_signal_levels(train_set);
3503                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3504         }
3505
3506         if (mask)
3507                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3508
3509         DRM_DEBUG_KMS("Using vswing level %d\n",
3510                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3511         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3512                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3513                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3514
3515         *DP = (*DP & ~mask) | signal_levels;
3516 }
3517
3518 static bool
3519 intel_dp_set_link_train(struct intel_dp *intel_dp,
3520                         uint32_t *DP,
3521                         uint8_t dp_train_pat)
3522 {
3523         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3524         struct drm_device *dev = intel_dig_port->base.base.dev;
3525         struct drm_i915_private *dev_priv = dev->dev_private;
3526         uint8_t buf[sizeof(intel_dp->train_set) + 1];
3527         int ret, len;
3528
3529         _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3530
3531         I915_WRITE(intel_dp->output_reg, *DP);
3532         POSTING_READ(intel_dp->output_reg);
3533
3534         buf[0] = dp_train_pat;
3535         if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3536             DP_TRAINING_PATTERN_DISABLE) {
3537                 /* don't write DP_TRAINING_LANEx_SET on disable */
3538                 len = 1;
3539         } else {
3540                 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3541                 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3542                 len = intel_dp->lane_count + 1;
3543         }
3544
3545         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3546                                 buf, len);
3547
3548         return ret == len;
3549 }
3550
3551 static bool
3552 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3553                         uint8_t dp_train_pat)
3554 {
3555         if (!intel_dp->train_set_valid)
3556                 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3557         intel_dp_set_signal_levels(intel_dp, DP);
3558         return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3559 }
3560
3561 static bool
3562 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3563                            const uint8_t link_status[DP_LINK_STATUS_SIZE])
3564 {
3565         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3566         struct drm_device *dev = intel_dig_port->base.base.dev;
3567         struct drm_i915_private *dev_priv = dev->dev_private;
3568         int ret;
3569
3570         intel_get_adjust_train(intel_dp, link_status);
3571         intel_dp_set_signal_levels(intel_dp, DP);
3572
3573         I915_WRITE(intel_dp->output_reg, *DP);
3574         POSTING_READ(intel_dp->output_reg);
3575
3576         ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3577                                 intel_dp->train_set, intel_dp->lane_count);
3578
3579         return ret == intel_dp->lane_count;
3580 }
3581
3582 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3583 {
3584         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3585         struct drm_device *dev = intel_dig_port->base.base.dev;
3586         struct drm_i915_private *dev_priv = dev->dev_private;
3587         enum port port = intel_dig_port->port;
3588         uint32_t val;
3589
3590         if (!HAS_DDI(dev))
3591                 return;
3592
3593         val = I915_READ(DP_TP_CTL(port));
3594         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3595         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3596         I915_WRITE(DP_TP_CTL(port), val);
3597
3598         /*
3599          * On PORT_A we can have only eDP in SST mode. There the only reason
3600          * we need to set idle transmission mode is to work around a HW issue
3601          * where we enable the pipe while not in idle link-training mode.
3602          * In this case there is requirement to wait for a minimum number of
3603          * idle patterns to be sent.
3604          */
3605         if (port == PORT_A)
3606                 return;
3607
3608         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3609                      1))
3610                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3611 }
3612
3613 /* Enable corresponding port and start training pattern 1 */
3614 void
3615 intel_dp_start_link_train(struct intel_dp *intel_dp)
3616 {
3617         struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3618         struct drm_device *dev = encoder->dev;
3619         int i;
3620         uint8_t voltage;
3621         int voltage_tries, loop_tries;
3622         uint32_t DP = intel_dp->DP;
3623         uint8_t link_config[2];
3624
3625         if (HAS_DDI(dev))
3626                 intel_ddi_prepare_link_retrain(encoder);
3627
3628         /* Write the link configuration data */
3629         link_config[0] = intel_dp->link_bw;
3630         link_config[1] = intel_dp->lane_count;
3631         if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3632                 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3633         drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3634         if (intel_dp->num_sink_rates)
3635                 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3636                                 &intel_dp->rate_select, 1);
3637
3638         link_config[0] = 0;
3639         link_config[1] = DP_SET_ANSI_8B10B;
3640         drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3641
3642         DP |= DP_PORT_EN;
3643
3644         /* clock recovery */
3645         if (!intel_dp_reset_link_train(intel_dp, &DP,
3646                                        DP_TRAINING_PATTERN_1 |
3647                                        DP_LINK_SCRAMBLING_DISABLE)) {
3648                 DRM_ERROR("failed to enable link training\n");
3649                 return;
3650         }
3651
3652         voltage = 0xff;
3653         voltage_tries = 0;
3654         loop_tries = 0;
3655         for (;;) {
3656                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3657
3658                 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3659                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3660                         DRM_ERROR("failed to get link status\n");
3661                         break;
3662                 }
3663
3664                 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3665                         DRM_DEBUG_KMS("clock recovery OK\n");
3666                         break;
3667                 }
3668
3669                 /*
3670                  * if we used previously trained voltage and pre-emphasis values
3671                  * and we don't get clock recovery, reset link training values
3672                  */
3673                 if (intel_dp->train_set_valid) {
3674                         DRM_DEBUG_KMS("clock recovery not ok, reset");
3675                         /* clear the flag as we are not reusing train set */
3676                         intel_dp->train_set_valid = false;
3677                         if (!intel_dp_reset_link_train(intel_dp, &DP,
3678                                                        DP_TRAINING_PATTERN_1 |
3679                                                        DP_LINK_SCRAMBLING_DISABLE)) {
3680                                 DRM_ERROR("failed to enable link training\n");
3681                                 return;
3682                         }
3683                         continue;
3684                 }
3685
3686                 /* Check to see if we've tried the max voltage */
3687                 for (i = 0; i < intel_dp->lane_count; i++)
3688                         if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3689                                 break;
3690                 if (i == intel_dp->lane_count) {
3691                         ++loop_tries;
3692                         if (loop_tries == 5) {
3693                                 DRM_ERROR("too many full retries, give up\n");
3694                                 break;
3695                         }
3696                         intel_dp_reset_link_train(intel_dp, &DP,
3697                                                   DP_TRAINING_PATTERN_1 |
3698                                                   DP_LINK_SCRAMBLING_DISABLE);
3699                         voltage_tries = 0;
3700                         continue;
3701                 }
3702
3703                 /* Check to see if we've tried the same voltage 5 times */
3704                 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3705                         ++voltage_tries;
3706                         if (voltage_tries == 5) {
3707                                 DRM_ERROR("too many voltage retries, give up\n");
3708                                 break;
3709                         }
3710                 } else
3711                         voltage_tries = 0;
3712                 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3713
3714                 /* Update training set as requested by target */
3715                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3716                         DRM_ERROR("failed to update link training\n");
3717                         break;
3718                 }
3719         }
3720
3721         intel_dp->DP = DP;
3722 }
3723
3724 void
3725 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3726 {
3727         bool channel_eq = false;
3728         int tries, cr_tries;
3729         uint32_t DP = intel_dp->DP;
3730         uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3731
3732         /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3733         if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3734                 training_pattern = DP_TRAINING_PATTERN_3;
3735
3736         /* channel equalization */
3737         if (!intel_dp_set_link_train(intel_dp, &DP,
3738                                      training_pattern |
3739                                      DP_LINK_SCRAMBLING_DISABLE)) {
3740                 DRM_ERROR("failed to start channel equalization\n");
3741                 return;
3742         }
3743
3744         tries = 0;
3745         cr_tries = 0;
3746         channel_eq = false;
3747         for (;;) {
3748                 uint8_t link_status[DP_LINK_STATUS_SIZE];
3749
3750                 if (cr_tries > 5) {
3751                         DRM_ERROR("failed to train DP, aborting\n");
3752                         break;
3753                 }
3754
3755                 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3756                 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3757                         DRM_ERROR("failed to get link status\n");
3758                         break;
3759                 }
3760
3761                 /* Make sure clock is still ok */
3762                 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3763                         intel_dp->train_set_valid = false;
3764                         intel_dp_start_link_train(intel_dp);
3765                         intel_dp_set_link_train(intel_dp, &DP,
3766                                                 training_pattern |
3767                                                 DP_LINK_SCRAMBLING_DISABLE);
3768                         cr_tries++;
3769                         continue;
3770                 }
3771
3772                 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3773                         channel_eq = true;
3774                         break;
3775                 }
3776
3777                 /* Try 5 times, then try clock recovery if that fails */
3778                 if (tries > 5) {
3779                         intel_dp->train_set_valid = false;
3780                         intel_dp_start_link_train(intel_dp);
3781                         intel_dp_set_link_train(intel_dp, &DP,
3782                                                 training_pattern |
3783                                                 DP_LINK_SCRAMBLING_DISABLE);
3784                         tries = 0;
3785                         cr_tries++;
3786                         continue;
3787                 }
3788
3789                 /* Update training set as requested by target */
3790                 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3791                         DRM_ERROR("failed to update link training\n");
3792                         break;
3793                 }
3794                 ++tries;
3795         }
3796
3797         intel_dp_set_idle_link_train(intel_dp);
3798
3799         intel_dp->DP = DP;
3800
3801         if (channel_eq) {
3802                 intel_dp->train_set_valid = true;
3803                 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3804         }
3805 }
3806
3807 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3808 {
3809         intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3810                                 DP_TRAINING_PATTERN_DISABLE);
3811 }
3812
3813 static void
3814 intel_dp_link_down(struct intel_dp *intel_dp)
3815 {
3816         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3817         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3818         enum port port = intel_dig_port->port;
3819         struct drm_device *dev = intel_dig_port->base.base.dev;
3820         struct drm_i915_private *dev_priv = dev->dev_private;
3821         uint32_t DP = intel_dp->DP;
3822
3823         if (WARN_ON(HAS_DDI(dev)))
3824                 return;
3825
3826         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3827                 return;
3828
3829         DRM_DEBUG_KMS("\n");
3830
3831         if ((IS_GEN7(dev) && port == PORT_A) ||
3832             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3833                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3834                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3835         } else {
3836                 if (IS_CHERRYVIEW(dev))
3837                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3838                 else
3839                         DP &= ~DP_LINK_TRAIN_MASK;
3840                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3841         }
3842         I915_WRITE(intel_dp->output_reg, DP);
3843         POSTING_READ(intel_dp->output_reg);
3844
3845         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3846         I915_WRITE(intel_dp->output_reg, DP);
3847         POSTING_READ(intel_dp->output_reg);
3848
3849         /*
3850          * HW workaround for IBX, we need to move the port
3851          * to transcoder A after disabling it to allow the
3852          * matching HDMI port to be enabled on transcoder A.
3853          */
3854         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3855                 /* always enable with pattern 1 (as per spec) */
3856                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3857                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3858                 I915_WRITE(intel_dp->output_reg, DP);
3859                 POSTING_READ(intel_dp->output_reg);
3860
3861                 DP &= ~DP_PORT_EN;
3862                 I915_WRITE(intel_dp->output_reg, DP);
3863                 POSTING_READ(intel_dp->output_reg);
3864         }
3865
3866         msleep(intel_dp->panel_power_down_delay);
3867 }
3868
3869 static bool
3870 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3871 {
3872         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3873         struct drm_device *dev = dig_port->base.base.dev;
3874         struct drm_i915_private *dev_priv = dev->dev_private;
3875         uint8_t rev;
3876
3877         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3878                                     sizeof(intel_dp->dpcd)) < 0)
3879                 return false; /* aux transfer failed */
3880
3881         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3882
3883         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3884                 return false; /* DPCD not present */
3885
3886         /* Check if the panel supports PSR */
3887         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3888         if (is_edp(intel_dp)) {
3889                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3890                                         intel_dp->psr_dpcd,
3891                                         sizeof(intel_dp->psr_dpcd));
3892                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3893                         dev_priv->psr.sink_support = true;
3894                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3895                 }
3896
3897                 if (INTEL_INFO(dev)->gen >= 9 &&
3898                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3899                         uint8_t frame_sync_cap;
3900
3901                         dev_priv->psr.sink_support = true;
3902                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3903                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3904                                         &frame_sync_cap, 1);
3905                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3906                         /* PSR2 needs frame sync as well */
3907                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3908                         DRM_DEBUG_KMS("PSR2 %s on sink",
3909                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3910                 }
3911         }
3912
3913         /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3914          * have support for TP3 hence that check is used along with dpcd check
3915          * to ensure TP3 can be enabled.
3916          * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3917          * supported but still not enabled.
3918          */
3919         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3920             intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3921             intel_dp_source_supports_hbr2(dev)) {
3922                 intel_dp->use_tps3 = true;
3923                 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3924         } else
3925                 intel_dp->use_tps3 = false;
3926
3927         /* Intermediate frequency support */
3928         if (is_edp(intel_dp) &&
3929             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3930             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3931             (rev >= 0x03)) { /* eDp v1.4 or higher */
3932                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3933                 int i;
3934
3935                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3936                                 DP_SUPPORTED_LINK_RATES,
3937                                 sink_rates,
3938                                 sizeof(sink_rates));
3939
3940                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3941                         int val = le16_to_cpu(sink_rates[i]);
3942
3943                         if (val == 0)
3944                                 break;
3945
3946                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3947                         intel_dp->sink_rates[i] = (val * 200) / 10;
3948                 }
3949                 intel_dp->num_sink_rates = i;
3950         }
3951
3952         intel_dp_print_rates(intel_dp);
3953
3954         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3955               DP_DWN_STRM_PORT_PRESENT))
3956                 return true; /* native DP sink */
3957
3958         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3959                 return true; /* no per-port downstream info */
3960
3961         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3962                                     intel_dp->downstream_ports,
3963                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3964                 return false; /* downstream port status fetch failed */
3965
3966         return true;
3967 }
3968
3969 static void
3970 intel_dp_probe_oui(struct intel_dp *intel_dp)
3971 {
3972         u8 buf[3];
3973
3974         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3975                 return;
3976
3977         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3978                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3979                               buf[0], buf[1], buf[2]);
3980
3981         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3982                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3983                               buf[0], buf[1], buf[2]);
3984 }
3985
3986 static bool
3987 intel_dp_probe_mst(struct intel_dp *intel_dp)
3988 {
3989         u8 buf[1];
3990
3991         if (!intel_dp->can_mst)
3992                 return false;
3993
3994         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3995                 return false;
3996
3997         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3998                 if (buf[0] & DP_MST_CAP) {
3999                         DRM_DEBUG_KMS("Sink is MST capable\n");
4000                         intel_dp->is_mst = true;
4001                 } else {
4002                         DRM_DEBUG_KMS("Sink is not MST capable\n");
4003                         intel_dp->is_mst = false;
4004                 }
4005         }
4006
4007         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4008         return intel_dp->is_mst;
4009 }
4010
4011 static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4012 {
4013         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4014         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4015         u8 buf;
4016
4017         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4018                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4019                 return;
4020         }
4021
4022         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4023                                buf & ~DP_TEST_SINK_START) < 0)
4024                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4025
4026         hsw_enable_ips(intel_crtc);
4027 }
4028
4029 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4030 {
4031         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4032         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4033         u8 buf;
4034
4035         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4036                 return -EIO;
4037
4038         if (!(buf & DP_TEST_CRC_SUPPORTED))
4039                 return -ENOTTY;
4040
4041         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4042                 return -EIO;
4043
4044         hsw_disable_ips(intel_crtc);
4045
4046         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4047                                buf | DP_TEST_SINK_START) < 0) {
4048                 hsw_enable_ips(intel_crtc);
4049                 return -EIO;
4050         }
4051
4052         return 0;
4053 }
4054
4055 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4056 {
4057         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4058         struct drm_device *dev = dig_port->base.base.dev;
4059         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4060         u8 buf;
4061         int test_crc_count;
4062         int attempts = 6;
4063         int ret;
4064
4065         ret = intel_dp_sink_crc_start(intel_dp);
4066         if (ret)
4067                 return ret;
4068
4069         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4070                 ret = -EIO;
4071                 goto stop;
4072         }
4073
4074         test_crc_count = buf & DP_TEST_COUNT_MASK;
4075
4076         do {
4077                 if (drm_dp_dpcd_readb(&intel_dp->aux,
4078                                       DP_TEST_SINK_MISC, &buf) < 0) {
4079                         ret = -EIO;
4080                         goto stop;
4081                 }
4082                 intel_wait_for_vblank(dev, intel_crtc->pipe);
4083         } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4084
4085         if (attempts == 0) {
4086                 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4087                 ret = -ETIMEDOUT;
4088                 goto stop;
4089         }
4090
4091         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4092                 ret = -EIO;
4093 stop:
4094         intel_dp_sink_crc_stop(intel_dp);
4095         return ret;
4096 }
4097
4098 static bool
4099 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4100 {
4101         return intel_dp_dpcd_read_wake(&intel_dp->aux,
4102                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
4103                                        sink_irq_vector, 1) == 1;
4104 }
4105
4106 static bool
4107 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4108 {
4109         int ret;
4110
4111         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4112                                              DP_SINK_COUNT_ESI,
4113                                              sink_irq_vector, 14);
4114         if (ret != 14)
4115                 return false;
4116
4117         return true;
4118 }
4119
4120 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4121 {
4122         uint8_t test_result = DP_TEST_ACK;
4123         return test_result;
4124 }
4125
4126 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4127 {
4128         uint8_t test_result = DP_TEST_NAK;
4129         return test_result;
4130 }
4131
4132 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4133 {
4134         uint8_t test_result = DP_TEST_NAK;
4135         struct intel_connector *intel_connector = intel_dp->attached_connector;
4136         struct drm_connector *connector = &intel_connector->base;
4137
4138         if (intel_connector->detect_edid == NULL ||
4139             connector->edid_corrupt ||
4140             intel_dp->aux.i2c_defer_count > 6) {
4141                 /* Check EDID read for NACKs, DEFERs and corruption
4142                  * (DP CTS 1.2 Core r1.1)
4143                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4144                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4145                  *    4.2.2.6 : EDID corruption detected
4146                  * Use failsafe mode for all cases
4147                  */
4148                 if (intel_dp->aux.i2c_nack_count > 0 ||
4149                         intel_dp->aux.i2c_defer_count > 0)
4150                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4151                                       intel_dp->aux.i2c_nack_count,
4152                                       intel_dp->aux.i2c_defer_count);
4153                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4154         } else {
4155                 struct edid *block = intel_connector->detect_edid;
4156
4157                 /* We have to write the checksum
4158                  * of the last block read
4159                  */
4160                 block += intel_connector->detect_edid->extensions;
4161
4162                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4163                                         DP_TEST_EDID_CHECKSUM,
4164                                         &block->checksum,
4165                                         1))
4166                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4167
4168                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4169                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4170         }
4171
4172         /* Set test active flag here so userspace doesn't interrupt things */
4173         intel_dp->compliance_test_active = 1;
4174
4175         return test_result;
4176 }
4177
4178 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4179 {
4180         uint8_t test_result = DP_TEST_NAK;
4181         return test_result;
4182 }
4183
4184 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4185 {
4186         uint8_t response = DP_TEST_NAK;
4187         uint8_t rxdata = 0;
4188         int status = 0;
4189
4190         intel_dp->compliance_test_active = 0;
4191         intel_dp->compliance_test_type = 0;
4192         intel_dp->compliance_test_data = 0;
4193
4194         intel_dp->aux.i2c_nack_count = 0;
4195         intel_dp->aux.i2c_defer_count = 0;
4196
4197         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4198         if (status <= 0) {
4199                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4200                 goto update_status;
4201         }
4202
4203         switch (rxdata) {
4204         case DP_TEST_LINK_TRAINING:
4205                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4206                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4207                 response = intel_dp_autotest_link_training(intel_dp);
4208                 break;
4209         case DP_TEST_LINK_VIDEO_PATTERN:
4210                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4211                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4212                 response = intel_dp_autotest_video_pattern(intel_dp);
4213                 break;
4214         case DP_TEST_LINK_EDID_READ:
4215                 DRM_DEBUG_KMS("EDID test requested\n");
4216                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4217                 response = intel_dp_autotest_edid(intel_dp);
4218                 break;
4219         case DP_TEST_LINK_PHY_TEST_PATTERN:
4220                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4221                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4222                 response = intel_dp_autotest_phy_pattern(intel_dp);
4223                 break;
4224         default:
4225                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4226                 break;
4227         }
4228
4229 update_status:
4230         status = drm_dp_dpcd_write(&intel_dp->aux,
4231                                    DP_TEST_RESPONSE,
4232                                    &response, 1);
4233         if (status <= 0)
4234                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4235 }
4236
4237 static int
4238 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4239 {
4240         bool bret;
4241
4242         if (intel_dp->is_mst) {
4243                 u8 esi[16] = { 0 };
4244                 int ret = 0;
4245                 int retry;
4246                 bool handled;
4247                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4248 go_again:
4249                 if (bret == true) {
4250
4251                         /* check link status - esi[10] = 0x200c */
4252                         if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4253                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4254                                 intel_dp_start_link_train(intel_dp);
4255                                 intel_dp_complete_link_train(intel_dp);
4256                                 intel_dp_stop_link_train(intel_dp);
4257                         }
4258
4259                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4260                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4261
4262                         if (handled) {
4263                                 for (retry = 0; retry < 3; retry++) {
4264                                         int wret;
4265                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4266                                                                  DP_SINK_COUNT_ESI+1,
4267                                                                  &esi[1], 3);
4268                                         if (wret == 3) {
4269                                                 break;
4270                                         }
4271                                 }
4272
4273                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4274                                 if (bret == true) {
4275                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4276                                         goto go_again;
4277                                 }
4278                         } else
4279                                 ret = 0;
4280
4281                         return ret;
4282                 } else {
4283                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4284                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4285                         intel_dp->is_mst = false;
4286                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4287                         /* send a hotplug event */
4288                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4289                 }
4290         }
4291         return -EINVAL;
4292 }
4293
4294 /*
4295  * According to DP spec
4296  * 5.1.2:
4297  *  1. Read DPCD
4298  *  2. Configure link according to Receiver Capabilities
4299  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4300  *  4. Check link status on receipt of hot-plug interrupt
4301  */
4302 static void
4303 intel_dp_check_link_status(struct intel_dp *intel_dp)
4304 {
4305         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4306         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4307         u8 sink_irq_vector;
4308         u8 link_status[DP_LINK_STATUS_SIZE];
4309
4310         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4311
4312         if (!intel_encoder->base.crtc)
4313                 return;
4314
4315         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4316                 return;
4317
4318         /* Try to read receiver status if the link appears to be up */
4319         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4320                 return;
4321         }
4322
4323         /* Now read the DPCD to see if it's actually running */
4324         if (!intel_dp_get_dpcd(intel_dp)) {
4325                 return;
4326         }
4327
4328         /* Try to read the source of the interrupt */
4329         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4330             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4331                 /* Clear interrupt source */
4332                 drm_dp_dpcd_writeb(&intel_dp->aux,
4333                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4334                                    sink_irq_vector);
4335
4336                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4337                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4338                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4339                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4340         }
4341
4342         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4343                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4344                               intel_encoder->base.name);
4345                 intel_dp_start_link_train(intel_dp);
4346                 intel_dp_complete_link_train(intel_dp);
4347                 intel_dp_stop_link_train(intel_dp);
4348         }
4349 }
4350
4351 /* XXX this is probably wrong for multiple downstream ports */
4352 static enum drm_connector_status
4353 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4354 {
4355         uint8_t *dpcd = intel_dp->dpcd;
4356         uint8_t type;
4357
4358         if (!intel_dp_get_dpcd(intel_dp))
4359                 return connector_status_disconnected;
4360
4361         /* if there's no downstream port, we're done */
4362         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4363                 return connector_status_connected;
4364
4365         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4366         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4367             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4368                 uint8_t reg;
4369
4370                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4371                                             &reg, 1) < 0)
4372                         return connector_status_unknown;
4373
4374                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4375                                               : connector_status_disconnected;
4376         }
4377
4378         /* If no HPD, poke DDC gently */
4379         if (drm_probe_ddc(&intel_dp->aux.ddc))
4380                 return connector_status_connected;
4381
4382         /* Well we tried, say unknown for unreliable port types */
4383         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4384                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4385                 if (type == DP_DS_PORT_TYPE_VGA ||
4386                     type == DP_DS_PORT_TYPE_NON_EDID)
4387                         return connector_status_unknown;
4388         } else {
4389                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4390                         DP_DWN_STRM_PORT_TYPE_MASK;
4391                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4392                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4393                         return connector_status_unknown;
4394         }
4395
4396         /* Anything else is out of spec, warn and ignore */
4397         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4398         return connector_status_disconnected;
4399 }
4400
4401 static enum drm_connector_status
4402 edp_detect(struct intel_dp *intel_dp)
4403 {
4404         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4405         enum drm_connector_status status;
4406
4407         status = intel_panel_detect(dev);
4408         if (status == connector_status_unknown)
4409                 status = connector_status_connected;
4410
4411         return status;
4412 }
4413
4414 static enum drm_connector_status
4415 ironlake_dp_detect(struct intel_dp *intel_dp)
4416 {
4417         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4418         struct drm_i915_private *dev_priv = dev->dev_private;
4419         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4420
4421         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4422                 return connector_status_disconnected;
4423
4424         return intel_dp_detect_dpcd(intel_dp);
4425 }
4426
4427 static int g4x_digital_port_connected(struct drm_device *dev,
4428                                        struct intel_digital_port *intel_dig_port)
4429 {
4430         struct drm_i915_private *dev_priv = dev->dev_private;
4431         uint32_t bit;
4432
4433         if (IS_VALLEYVIEW(dev)) {
4434                 switch (intel_dig_port->port) {
4435                 case PORT_B:
4436                         bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4437                         break;
4438                 case PORT_C:
4439                         bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4440                         break;
4441                 case PORT_D:
4442                         bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4443                         break;
4444                 default:
4445                         return -EINVAL;
4446                 }
4447         } else {
4448                 switch (intel_dig_port->port) {
4449                 case PORT_B:
4450                         bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4451                         break;
4452                 case PORT_C:
4453                         bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4454                         break;
4455                 case PORT_D:
4456                         bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4457                         break;
4458                 default:
4459                         return -EINVAL;
4460                 }
4461         }
4462
4463         if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4464                 return 0;
4465         return 1;
4466 }
4467
4468 static enum drm_connector_status
4469 g4x_dp_detect(struct intel_dp *intel_dp)
4470 {
4471         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4472         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4473         int ret;
4474
4475         /* Can't disconnect eDP, but you can close the lid... */
4476         if (is_edp(intel_dp)) {
4477                 enum drm_connector_status status;
4478
4479                 status = intel_panel_detect(dev);
4480                 if (status == connector_status_unknown)
4481                         status = connector_status_connected;
4482                 return status;
4483         }
4484
4485         ret = g4x_digital_port_connected(dev, intel_dig_port);
4486         if (ret == -EINVAL)
4487                 return connector_status_unknown;
4488         else if (ret == 0)
4489                 return connector_status_disconnected;
4490
4491         return intel_dp_detect_dpcd(intel_dp);
4492 }
4493
4494 static struct edid *
4495 intel_dp_get_edid(struct intel_dp *intel_dp)
4496 {
4497         struct intel_connector *intel_connector = intel_dp->attached_connector;
4498
4499         /* use cached edid if we have one */
4500         if (intel_connector->edid) {
4501                 /* invalid edid */
4502                 if (IS_ERR(intel_connector->edid))
4503                         return NULL;
4504
4505                 return drm_edid_duplicate(intel_connector->edid);
4506         } else
4507                 return drm_get_edid(&intel_connector->base,
4508                                     &intel_dp->aux.ddc);
4509 }
4510
4511 static void
4512 intel_dp_set_edid(struct intel_dp *intel_dp)
4513 {
4514         struct intel_connector *intel_connector = intel_dp->attached_connector;
4515         struct edid *edid;
4516
4517         edid = intel_dp_get_edid(intel_dp);
4518         intel_connector->detect_edid = edid;
4519
4520         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4521                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4522         else
4523                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4524 }
4525
4526 static void
4527 intel_dp_unset_edid(struct intel_dp *intel_dp)
4528 {
4529         struct intel_connector *intel_connector = intel_dp->attached_connector;
4530
4531         kfree(intel_connector->detect_edid);
4532         intel_connector->detect_edid = NULL;
4533
4534         intel_dp->has_audio = false;
4535 }
4536
4537 static enum intel_display_power_domain
4538 intel_dp_power_get(struct intel_dp *dp)
4539 {
4540         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4541         enum intel_display_power_domain power_domain;
4542
4543         power_domain = intel_display_port_power_domain(encoder);
4544         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4545
4546         return power_domain;
4547 }
4548
4549 static void
4550 intel_dp_power_put(struct intel_dp *dp,
4551                    enum intel_display_power_domain power_domain)
4552 {
4553         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4554         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4555 }
4556
4557 static enum drm_connector_status
4558 intel_dp_detect(struct drm_connector *connector, bool force)
4559 {
4560         struct intel_dp *intel_dp = intel_attached_dp(connector);
4561         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4562         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4563         struct drm_device *dev = connector->dev;
4564         enum drm_connector_status status;
4565         enum intel_display_power_domain power_domain;
4566         bool ret;
4567         u8 sink_irq_vector;
4568
4569         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4570                       connector->base.id, connector->name);
4571         intel_dp_unset_edid(intel_dp);
4572
4573         if (intel_dp->is_mst) {
4574                 /* MST devices are disconnected from a monitor POV */
4575                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4576                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4577                 return connector_status_disconnected;
4578         }
4579
4580         power_domain = intel_dp_power_get(intel_dp);
4581
4582         /* Can't disconnect eDP, but you can close the lid... */
4583         if (is_edp(intel_dp))
4584                 status = edp_detect(intel_dp);
4585         else if (HAS_PCH_SPLIT(dev))
4586                 status = ironlake_dp_detect(intel_dp);
4587         else
4588                 status = g4x_dp_detect(intel_dp);
4589         if (status != connector_status_connected)
4590                 goto out;
4591
4592         intel_dp_probe_oui(intel_dp);
4593
4594         ret = intel_dp_probe_mst(intel_dp);
4595         if (ret) {
4596                 /* if we are in MST mode then this connector
4597                    won't appear connected or have anything with EDID on it */
4598                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4599                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4600                 status = connector_status_disconnected;
4601                 goto out;
4602         }
4603
4604         intel_dp_set_edid(intel_dp);
4605
4606         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4607                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4608         status = connector_status_connected;
4609
4610         /* Try to read the source of the interrupt */
4611         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4612             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4613                 /* Clear interrupt source */
4614                 drm_dp_dpcd_writeb(&intel_dp->aux,
4615                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4616                                    sink_irq_vector);
4617
4618                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4619                         intel_dp_handle_test_request(intel_dp);
4620                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4621                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4622         }
4623
4624 out:
4625         intel_dp_power_put(intel_dp, power_domain);
4626         return status;
4627 }
4628
4629 static void
4630 intel_dp_force(struct drm_connector *connector)
4631 {
4632         struct intel_dp *intel_dp = intel_attached_dp(connector);
4633         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4634         enum intel_display_power_domain power_domain;
4635
4636         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4637                       connector->base.id, connector->name);
4638         intel_dp_unset_edid(intel_dp);
4639
4640         if (connector->status != connector_status_connected)
4641                 return;
4642
4643         power_domain = intel_dp_power_get(intel_dp);
4644
4645         intel_dp_set_edid(intel_dp);
4646
4647         intel_dp_power_put(intel_dp, power_domain);
4648
4649         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4650                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4651 }
4652
4653 static int intel_dp_get_modes(struct drm_connector *connector)
4654 {
4655         struct intel_connector *intel_connector = to_intel_connector(connector);
4656         struct edid *edid;
4657
4658         edid = intel_connector->detect_edid;
4659         if (edid) {
4660                 int ret = intel_connector_update_modes(connector, edid);
4661                 if (ret)
4662                         return ret;
4663         }
4664
4665         /* if eDP has no EDID, fall back to fixed mode */
4666         if (is_edp(intel_attached_dp(connector)) &&
4667             intel_connector->panel.fixed_mode) {
4668                 struct drm_display_mode *mode;
4669
4670                 mode = drm_mode_duplicate(connector->dev,
4671                                           intel_connector->panel.fixed_mode);
4672                 if (mode) {
4673                         drm_mode_probed_add(connector, mode);
4674                         return 1;
4675                 }
4676         }
4677
4678         return 0;
4679 }
4680
4681 static bool
4682 intel_dp_detect_audio(struct drm_connector *connector)
4683 {
4684         bool has_audio = false;
4685         struct edid *edid;
4686
4687         edid = to_intel_connector(connector)->detect_edid;
4688         if (edid)
4689                 has_audio = drm_detect_monitor_audio(edid);
4690
4691         return has_audio;
4692 }
4693
4694 static int
4695 intel_dp_set_property(struct drm_connector *connector,
4696                       struct drm_property *property,
4697                       uint64_t val)
4698 {
4699         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4700         struct intel_connector *intel_connector = to_intel_connector(connector);
4701         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4702         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4703         int ret;
4704
4705         ret = drm_object_property_set_value(&connector->base, property, val);
4706         if (ret)
4707                 return ret;
4708
4709         if (property == dev_priv->force_audio_property) {
4710                 int i = val;
4711                 bool has_audio;
4712
4713                 if (i == intel_dp->force_audio)
4714                         return 0;
4715
4716                 intel_dp->force_audio = i;
4717
4718                 if (i == HDMI_AUDIO_AUTO)
4719                         has_audio = intel_dp_detect_audio(connector);
4720                 else
4721                         has_audio = (i == HDMI_AUDIO_ON);
4722
4723                 if (has_audio == intel_dp->has_audio)
4724                         return 0;
4725
4726                 intel_dp->has_audio = has_audio;
4727                 goto done;
4728         }
4729
4730         if (property == dev_priv->broadcast_rgb_property) {
4731                 bool old_auto = intel_dp->color_range_auto;
4732                 uint32_t old_range = intel_dp->color_range;
4733
4734                 switch (val) {
4735                 case INTEL_BROADCAST_RGB_AUTO:
4736                         intel_dp->color_range_auto = true;
4737                         break;
4738                 case INTEL_BROADCAST_RGB_FULL:
4739                         intel_dp->color_range_auto = false;
4740                         intel_dp->color_range = 0;
4741                         break;
4742                 case INTEL_BROADCAST_RGB_LIMITED:
4743                         intel_dp->color_range_auto = false;
4744                         intel_dp->color_range = DP_COLOR_RANGE_16_235;
4745                         break;
4746                 default:
4747                         return -EINVAL;
4748                 }
4749
4750                 if (old_auto == intel_dp->color_range_auto &&
4751                     old_range == intel_dp->color_range)
4752                         return 0;
4753
4754                 goto done;
4755         }
4756
4757         if (is_edp(intel_dp) &&
4758             property == connector->dev->mode_config.scaling_mode_property) {
4759                 if (val == DRM_MODE_SCALE_NONE) {
4760                         DRM_DEBUG_KMS("no scaling not supported\n");
4761                         return -EINVAL;
4762                 }
4763
4764                 if (intel_connector->panel.fitting_mode == val) {
4765                         /* the eDP scaling property is not changed */
4766                         return 0;
4767                 }
4768                 intel_connector->panel.fitting_mode = val;
4769
4770                 goto done;
4771         }
4772
4773         return -EINVAL;
4774
4775 done:
4776         if (intel_encoder->base.crtc)
4777                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4778
4779         return 0;
4780 }
4781
4782 static void
4783 intel_dp_connector_destroy(struct drm_connector *connector)
4784 {
4785         struct intel_connector *intel_connector = to_intel_connector(connector);
4786
4787         kfree(intel_connector->detect_edid);
4788
4789         if (!IS_ERR_OR_NULL(intel_connector->edid))
4790                 kfree(intel_connector->edid);
4791
4792         /* Can't call is_edp() since the encoder may have been destroyed
4793          * already. */
4794         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4795                 intel_panel_fini(&intel_connector->panel);
4796
4797         drm_connector_cleanup(connector);
4798         kfree(connector);
4799 }
4800
4801 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4802 {
4803         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4804         struct intel_dp *intel_dp = &intel_dig_port->dp;
4805
4806         drm_dp_aux_unregister(&intel_dp->aux);
4807         intel_dp_mst_encoder_cleanup(intel_dig_port);
4808         if (is_edp(intel_dp)) {
4809                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4810                 /*
4811                  * vdd might still be enabled do to the delayed vdd off.
4812                  * Make sure vdd is actually turned off here.
4813                  */
4814                 pps_lock(intel_dp);
4815                 edp_panel_vdd_off_sync(intel_dp);
4816                 pps_unlock(intel_dp);
4817
4818                 if (intel_dp->edp_notifier.notifier_call) {
4819                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4820                         intel_dp->edp_notifier.notifier_call = NULL;
4821                 }
4822         }
4823         drm_encoder_cleanup(encoder);
4824         kfree(intel_dig_port);
4825 }
4826
4827 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4828 {
4829         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4830
4831         if (!is_edp(intel_dp))
4832                 return;
4833
4834         /*
4835          * vdd might still be enabled do to the delayed vdd off.
4836          * Make sure vdd is actually turned off here.
4837          */
4838         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4839         pps_lock(intel_dp);
4840         edp_panel_vdd_off_sync(intel_dp);
4841         pps_unlock(intel_dp);
4842 }
4843
4844 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4845 {
4846         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4847         struct drm_device *dev = intel_dig_port->base.base.dev;
4848         struct drm_i915_private *dev_priv = dev->dev_private;
4849         enum intel_display_power_domain power_domain;
4850
4851         lockdep_assert_held(&dev_priv->pps_mutex);
4852
4853         if (!edp_have_panel_vdd(intel_dp))
4854                 return;
4855
4856         /*
4857          * The VDD bit needs a power domain reference, so if the bit is
4858          * already enabled when we boot or resume, grab this reference and
4859          * schedule a vdd off, so we don't hold on to the reference
4860          * indefinitely.
4861          */
4862         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4863         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4864         intel_display_power_get(dev_priv, power_domain);
4865
4866         edp_panel_vdd_schedule_off(intel_dp);
4867 }
4868
4869 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4870 {
4871         struct intel_dp *intel_dp;
4872
4873         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4874                 return;
4875
4876         intel_dp = enc_to_intel_dp(encoder);
4877
4878         pps_lock(intel_dp);
4879
4880         /*
4881          * Read out the current power sequencer assignment,
4882          * in case the BIOS did something with it.
4883          */
4884         if (IS_VALLEYVIEW(encoder->dev))
4885                 vlv_initial_power_sequencer_setup(intel_dp);
4886
4887         intel_edp_panel_vdd_sanitize(intel_dp);
4888
4889         pps_unlock(intel_dp);
4890 }
4891
4892 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4893         .dpms = drm_atomic_helper_connector_dpms,
4894         .detect = intel_dp_detect,
4895         .force = intel_dp_force,
4896         .fill_modes = drm_helper_probe_single_connector_modes,
4897         .set_property = intel_dp_set_property,
4898         .atomic_get_property = intel_connector_atomic_get_property,
4899         .destroy = intel_dp_connector_destroy,
4900         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4901         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4902 };
4903
4904 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4905         .get_modes = intel_dp_get_modes,
4906         .mode_valid = intel_dp_mode_valid,
4907         .best_encoder = intel_best_encoder,
4908 };
4909
4910 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4911         .reset = intel_dp_encoder_reset,
4912         .destroy = intel_dp_encoder_destroy,
4913 };
4914
4915 enum irqreturn
4916 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4917 {
4918         struct intel_dp *intel_dp = &intel_dig_port->dp;
4919         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4920         struct drm_device *dev = intel_dig_port->base.base.dev;
4921         struct drm_i915_private *dev_priv = dev->dev_private;
4922         enum intel_display_power_domain power_domain;
4923         enum irqreturn ret = IRQ_NONE;
4924
4925         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4926                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4927
4928         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4929                 /*
4930                  * vdd off can generate a long pulse on eDP which
4931                  * would require vdd on to handle it, and thus we
4932                  * would end up in an endless cycle of
4933                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4934                  */
4935                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4936                               port_name(intel_dig_port->port));
4937                 return IRQ_HANDLED;
4938         }
4939
4940         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4941                       port_name(intel_dig_port->port),
4942                       long_hpd ? "long" : "short");
4943
4944         power_domain = intel_display_port_power_domain(intel_encoder);
4945         intel_display_power_get(dev_priv, power_domain);
4946
4947         if (long_hpd) {
4948                 /* indicate that we need to restart link training */
4949                 intel_dp->train_set_valid = false;
4950
4951                 if (HAS_PCH_SPLIT(dev)) {
4952                         if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4953                                 goto mst_fail;
4954                 } else {
4955                         if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4956                                 goto mst_fail;
4957                 }
4958
4959                 if (!intel_dp_get_dpcd(intel_dp)) {
4960                         goto mst_fail;
4961                 }
4962
4963                 intel_dp_probe_oui(intel_dp);
4964
4965                 if (!intel_dp_probe_mst(intel_dp)) {
4966                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4967                         intel_dp_check_link_status(intel_dp);
4968                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4969                         goto mst_fail;
4970                 }
4971         } else {
4972                 if (intel_dp->is_mst) {
4973                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4974                                 goto mst_fail;
4975                 }
4976
4977                 if (!intel_dp->is_mst) {
4978                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4979                         intel_dp_check_link_status(intel_dp);
4980                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4981                 }
4982         }
4983
4984         ret = IRQ_HANDLED;
4985
4986         goto put_power;
4987 mst_fail:
4988         /* if we were in MST mode, and device is not there get out of MST mode */
4989         if (intel_dp->is_mst) {
4990                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4991                 intel_dp->is_mst = false;
4992                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4993         }
4994 put_power:
4995         intel_display_power_put(dev_priv, power_domain);
4996
4997         return ret;
4998 }
4999
5000 /* Return which DP Port should be selected for Transcoder DP control */
5001 int
5002 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5003 {
5004         struct drm_device *dev = crtc->dev;
5005         struct intel_encoder *intel_encoder;
5006         struct intel_dp *intel_dp;
5007
5008         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5009                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5010
5011                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5012                     intel_encoder->type == INTEL_OUTPUT_EDP)
5013                         return intel_dp->output_reg;
5014         }
5015
5016         return -1;
5017 }
5018
5019 /* check the VBT to see whether the eDP is on another port */
5020 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5021 {
5022         struct drm_i915_private *dev_priv = dev->dev_private;
5023         union child_device_config *p_child;
5024         int i;
5025         static const short port_mapping[] = {
5026                 [PORT_B] = DVO_PORT_DPB,
5027                 [PORT_C] = DVO_PORT_DPC,
5028                 [PORT_D] = DVO_PORT_DPD,
5029                 [PORT_E] = DVO_PORT_DPE,
5030         };
5031
5032         if (port == PORT_A)
5033                 return true;
5034
5035         if (!dev_priv->vbt.child_dev_num)
5036                 return false;
5037
5038         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5039                 p_child = dev_priv->vbt.child_dev + i;
5040
5041                 if (p_child->common.dvo_port == port_mapping[port] &&
5042                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5043                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5044                         return true;
5045         }
5046         return false;
5047 }
5048
5049 void
5050 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5051 {
5052         struct intel_connector *intel_connector = to_intel_connector(connector);
5053
5054         intel_attach_force_audio_property(connector);
5055         intel_attach_broadcast_rgb_property(connector);
5056         intel_dp->color_range_auto = true;
5057
5058         if (is_edp(intel_dp)) {
5059                 drm_mode_create_scaling_mode_property(connector->dev);
5060                 drm_object_attach_property(
5061                         &connector->base,
5062                         connector->dev->mode_config.scaling_mode_property,
5063                         DRM_MODE_SCALE_ASPECT);
5064                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5065         }
5066 }
5067
5068 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5069 {
5070         intel_dp->last_power_cycle = jiffies;
5071         intel_dp->last_power_on = jiffies;
5072         intel_dp->last_backlight_off = jiffies;
5073 }
5074
5075 static void
5076 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5077                                     struct intel_dp *intel_dp)
5078 {
5079         struct drm_i915_private *dev_priv = dev->dev_private;
5080         struct edp_power_seq cur, vbt, spec,
5081                 *final = &intel_dp->pps_delays;
5082         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5083         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5084
5085         lockdep_assert_held(&dev_priv->pps_mutex);
5086
5087         /* already initialized? */
5088         if (final->t11_t12 != 0)
5089                 return;
5090
5091         if (IS_BROXTON(dev)) {
5092                 /*
5093                  * TODO: BXT has 2 sets of PPS registers.
5094                  * Correct Register for Broxton need to be identified
5095                  * using VBT. hardcoding for now
5096                  */
5097                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5098                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5099                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5100         } else if (HAS_PCH_SPLIT(dev)) {
5101                 pp_ctrl_reg = PCH_PP_CONTROL;
5102                 pp_on_reg = PCH_PP_ON_DELAYS;
5103                 pp_off_reg = PCH_PP_OFF_DELAYS;
5104                 pp_div_reg = PCH_PP_DIVISOR;
5105         } else {
5106                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5107
5108                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5109                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5110                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5111                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5112         }
5113
5114         /* Workaround: Need to write PP_CONTROL with the unlock key as
5115          * the very first thing. */
5116         pp_ctl = ironlake_get_pp_control(intel_dp);
5117
5118         pp_on = I915_READ(pp_on_reg);
5119         pp_off = I915_READ(pp_off_reg);
5120         if (!IS_BROXTON(dev)) {
5121                 I915_WRITE(pp_ctrl_reg, pp_ctl);
5122                 pp_div = I915_READ(pp_div_reg);
5123         }
5124
5125         /* Pull timing values out of registers */
5126         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5127                 PANEL_POWER_UP_DELAY_SHIFT;
5128
5129         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5130                 PANEL_LIGHT_ON_DELAY_SHIFT;
5131
5132         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5133                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5134
5135         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5136                 PANEL_POWER_DOWN_DELAY_SHIFT;
5137
5138         if (IS_BROXTON(dev)) {
5139                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5140                         BXT_POWER_CYCLE_DELAY_SHIFT;
5141                 if (tmp > 0)
5142                         cur.t11_t12 = (tmp - 1) * 1000;
5143                 else
5144                         cur.t11_t12 = 0;
5145         } else {
5146                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5147                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5148         }
5149
5150         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5151                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5152
5153         vbt = dev_priv->vbt.edp_pps;
5154
5155         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5156          * our hw here, which are all in 100usec. */
5157         spec.t1_t3 = 210 * 10;
5158         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5159         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5160         spec.t10 = 500 * 10;
5161         /* This one is special and actually in units of 100ms, but zero
5162          * based in the hw (so we need to add 100 ms). But the sw vbt
5163          * table multiplies it with 1000 to make it in units of 100usec,
5164          * too. */
5165         spec.t11_t12 = (510 + 100) * 10;
5166
5167         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5168                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5169
5170         /* Use the max of the register settings and vbt. If both are
5171          * unset, fall back to the spec limits. */
5172 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5173                                        spec.field : \
5174                                        max(cur.field, vbt.field))
5175         assign_final(t1_t3);
5176         assign_final(t8);
5177         assign_final(t9);
5178         assign_final(t10);
5179         assign_final(t11_t12);
5180 #undef assign_final
5181
5182 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5183         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5184         intel_dp->backlight_on_delay = get_delay(t8);
5185         intel_dp->backlight_off_delay = get_delay(t9);
5186         intel_dp->panel_power_down_delay = get_delay(t10);
5187         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5188 #undef get_delay
5189
5190         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5191                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5192                       intel_dp->panel_power_cycle_delay);
5193
5194         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5195                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5196 }
5197
5198 static void
5199 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5200                                               struct intel_dp *intel_dp)
5201 {
5202         struct drm_i915_private *dev_priv = dev->dev_private;
5203         u32 pp_on, pp_off, pp_div, port_sel = 0;
5204         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5205         int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5206         enum port port = dp_to_dig_port(intel_dp)->port;
5207         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5208
5209         lockdep_assert_held(&dev_priv->pps_mutex);
5210
5211         if (IS_BROXTON(dev)) {
5212                 /*
5213                  * TODO: BXT has 2 sets of PPS registers.
5214                  * Correct Register for Broxton need to be identified
5215                  * using VBT. hardcoding for now
5216                  */
5217                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5218                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5219                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5220
5221         } else if (HAS_PCH_SPLIT(dev)) {
5222                 pp_on_reg = PCH_PP_ON_DELAYS;
5223                 pp_off_reg = PCH_PP_OFF_DELAYS;
5224                 pp_div_reg = PCH_PP_DIVISOR;
5225         } else {
5226                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5227
5228                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5229                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5230                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5231         }
5232
5233         /*
5234          * And finally store the new values in the power sequencer. The
5235          * backlight delays are set to 1 because we do manual waits on them. For
5236          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5237          * we'll end up waiting for the backlight off delay twice: once when we
5238          * do the manual sleep, and once when we disable the panel and wait for
5239          * the PP_STATUS bit to become zero.
5240          */
5241         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5242                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5243         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5244                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5245         /* Compute the divisor for the pp clock, simply match the Bspec
5246          * formula. */
5247         if (IS_BROXTON(dev)) {
5248                 pp_div = I915_READ(pp_ctrl_reg);
5249                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5250                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5251                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5252         } else {
5253                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5254                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5255                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5256         }
5257
5258         /* Haswell doesn't have any port selection bits for the panel
5259          * power sequencer any more. */
5260         if (IS_VALLEYVIEW(dev)) {
5261                 port_sel = PANEL_PORT_SELECT_VLV(port);
5262         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5263                 if (port == PORT_A)
5264                         port_sel = PANEL_PORT_SELECT_DPA;
5265                 else
5266                         port_sel = PANEL_PORT_SELECT_DPD;
5267         }
5268
5269         pp_on |= port_sel;
5270
5271         I915_WRITE(pp_on_reg, pp_on);
5272         I915_WRITE(pp_off_reg, pp_off);
5273         if (IS_BROXTON(dev))
5274                 I915_WRITE(pp_ctrl_reg, pp_div);
5275         else
5276                 I915_WRITE(pp_div_reg, pp_div);
5277
5278         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5279                       I915_READ(pp_on_reg),
5280                       I915_READ(pp_off_reg),
5281                       IS_BROXTON(dev) ?
5282                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5283                       I915_READ(pp_div_reg));
5284 }
5285
5286 /**
5287  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5288  * @dev: DRM device
5289  * @refresh_rate: RR to be programmed
5290  *
5291  * This function gets called when refresh rate (RR) has to be changed from
5292  * one frequency to another. Switches can be between high and low RR
5293  * supported by the panel or to any other RR based on media playback (in
5294  * this case, RR value needs to be passed from user space).
5295  *
5296  * The caller of this function needs to take a lock on dev_priv->drrs.
5297  */
5298 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5299 {
5300         struct drm_i915_private *dev_priv = dev->dev_private;
5301         struct intel_encoder *encoder;
5302         struct intel_digital_port *dig_port = NULL;
5303         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5304         struct intel_crtc_state *config = NULL;
5305         struct intel_crtc *intel_crtc = NULL;
5306         u32 reg, val;
5307         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5308
5309         if (refresh_rate <= 0) {
5310                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5311                 return;
5312         }
5313
5314         if (intel_dp == NULL) {
5315                 DRM_DEBUG_KMS("DRRS not supported.\n");
5316                 return;
5317         }
5318
5319         /*
5320          * FIXME: This needs proper synchronization with psr state for some
5321          * platforms that cannot have PSR and DRRS enabled at the same time.
5322          */
5323
5324         dig_port = dp_to_dig_port(intel_dp);
5325         encoder = &dig_port->base;
5326         intel_crtc = to_intel_crtc(encoder->base.crtc);
5327
5328         if (!intel_crtc) {
5329                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5330                 return;
5331         }
5332
5333         config = intel_crtc->config;
5334
5335         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5336                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5337                 return;
5338         }
5339
5340         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5341                         refresh_rate)
5342                 index = DRRS_LOW_RR;
5343
5344         if (index == dev_priv->drrs.refresh_rate_type) {
5345                 DRM_DEBUG_KMS(
5346                         "DRRS requested for previously set RR...ignoring\n");
5347                 return;
5348         }
5349
5350         if (!intel_crtc->active) {
5351                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5352                 return;
5353         }
5354
5355         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5356                 switch (index) {
5357                 case DRRS_HIGH_RR:
5358                         intel_dp_set_m_n(intel_crtc, M1_N1);
5359                         break;
5360                 case DRRS_LOW_RR:
5361                         intel_dp_set_m_n(intel_crtc, M2_N2);
5362                         break;
5363                 case DRRS_MAX_RR:
5364                 default:
5365                         DRM_ERROR("Unsupported refreshrate type\n");
5366                 }
5367         } else if (INTEL_INFO(dev)->gen > 6) {
5368                 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5369                 val = I915_READ(reg);
5370
5371                 if (index > DRRS_HIGH_RR) {
5372                         if (IS_VALLEYVIEW(dev))
5373                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5374                         else
5375                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5376                 } else {
5377                         if (IS_VALLEYVIEW(dev))
5378                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5379                         else
5380                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5381                 }
5382                 I915_WRITE(reg, val);
5383         }
5384
5385         dev_priv->drrs.refresh_rate_type = index;
5386
5387         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5388 }
5389
5390 /**
5391  * intel_edp_drrs_enable - init drrs struct if supported
5392  * @intel_dp: DP struct
5393  *
5394  * Initializes frontbuffer_bits and drrs.dp
5395  */
5396 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5397 {
5398         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5399         struct drm_i915_private *dev_priv = dev->dev_private;
5400         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5401         struct drm_crtc *crtc = dig_port->base.base.crtc;
5402         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5403
5404         if (!intel_crtc->config->has_drrs) {
5405                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5406                 return;
5407         }
5408
5409         mutex_lock(&dev_priv->drrs.mutex);
5410         if (WARN_ON(dev_priv->drrs.dp)) {
5411                 DRM_ERROR("DRRS already enabled\n");
5412                 goto unlock;
5413         }
5414
5415         dev_priv->drrs.busy_frontbuffer_bits = 0;
5416
5417         dev_priv->drrs.dp = intel_dp;
5418
5419 unlock:
5420         mutex_unlock(&dev_priv->drrs.mutex);
5421 }
5422
5423 /**
5424  * intel_edp_drrs_disable - Disable DRRS
5425  * @intel_dp: DP struct
5426  *
5427  */
5428 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5429 {
5430         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5431         struct drm_i915_private *dev_priv = dev->dev_private;
5432         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5433         struct drm_crtc *crtc = dig_port->base.base.crtc;
5434         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5435
5436         if (!intel_crtc->config->has_drrs)
5437                 return;
5438
5439         mutex_lock(&dev_priv->drrs.mutex);
5440         if (!dev_priv->drrs.dp) {
5441                 mutex_unlock(&dev_priv->drrs.mutex);
5442                 return;
5443         }
5444
5445         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5446                 intel_dp_set_drrs_state(dev_priv->dev,
5447                         intel_dp->attached_connector->panel.
5448                         fixed_mode->vrefresh);
5449
5450         dev_priv->drrs.dp = NULL;
5451         mutex_unlock(&dev_priv->drrs.mutex);
5452
5453         cancel_delayed_work_sync(&dev_priv->drrs.work);
5454 }
5455
5456 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5457 {
5458         struct drm_i915_private *dev_priv =
5459                 container_of(work, typeof(*dev_priv), drrs.work.work);
5460         struct intel_dp *intel_dp;
5461
5462         mutex_lock(&dev_priv->drrs.mutex);
5463
5464         intel_dp = dev_priv->drrs.dp;
5465
5466         if (!intel_dp)
5467                 goto unlock;
5468
5469         /*
5470          * The delayed work can race with an invalidate hence we need to
5471          * recheck.
5472          */
5473
5474         if (dev_priv->drrs.busy_frontbuffer_bits)
5475                 goto unlock;
5476
5477         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5478                 intel_dp_set_drrs_state(dev_priv->dev,
5479                         intel_dp->attached_connector->panel.
5480                         downclock_mode->vrefresh);
5481
5482 unlock:
5483         mutex_unlock(&dev_priv->drrs.mutex);
5484 }
5485
5486 /**
5487  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5488  * @dev: DRM device
5489  * @frontbuffer_bits: frontbuffer plane tracking bits
5490  *
5491  * This function gets called everytime rendering on the given planes start.
5492  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5493  *
5494  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5495  */
5496 void intel_edp_drrs_invalidate(struct drm_device *dev,
5497                 unsigned frontbuffer_bits)
5498 {
5499         struct drm_i915_private *dev_priv = dev->dev_private;
5500         struct drm_crtc *crtc;
5501         enum pipe pipe;
5502
5503         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5504                 return;
5505
5506         cancel_delayed_work(&dev_priv->drrs.work);
5507
5508         mutex_lock(&dev_priv->drrs.mutex);
5509         if (!dev_priv->drrs.dp) {
5510                 mutex_unlock(&dev_priv->drrs.mutex);
5511                 return;
5512         }
5513
5514         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5515         pipe = to_intel_crtc(crtc)->pipe;
5516
5517         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5518         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5519
5520         /* invalidate means busy screen hence upclock */
5521         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5522                 intel_dp_set_drrs_state(dev_priv->dev,
5523                                 dev_priv->drrs.dp->attached_connector->panel.
5524                                 fixed_mode->vrefresh);
5525
5526         mutex_unlock(&dev_priv->drrs.mutex);
5527 }
5528
5529 /**
5530  * intel_edp_drrs_flush - Restart Idleness DRRS
5531  * @dev: DRM device
5532  * @frontbuffer_bits: frontbuffer plane tracking bits
5533  *
5534  * This function gets called every time rendering on the given planes has
5535  * completed or flip on a crtc is completed. So DRRS should be upclocked
5536  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5537  * if no other planes are dirty.
5538  *
5539  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5540  */
5541 void intel_edp_drrs_flush(struct drm_device *dev,
5542                 unsigned frontbuffer_bits)
5543 {
5544         struct drm_i915_private *dev_priv = dev->dev_private;
5545         struct drm_crtc *crtc;
5546         enum pipe pipe;
5547
5548         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5549                 return;
5550
5551         cancel_delayed_work(&dev_priv->drrs.work);
5552
5553         mutex_lock(&dev_priv->drrs.mutex);
5554         if (!dev_priv->drrs.dp) {
5555                 mutex_unlock(&dev_priv->drrs.mutex);
5556                 return;
5557         }
5558
5559         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5560         pipe = to_intel_crtc(crtc)->pipe;
5561
5562         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5563         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5564
5565         /* flush means busy screen hence upclock */
5566         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5567                 intel_dp_set_drrs_state(dev_priv->dev,
5568                                 dev_priv->drrs.dp->attached_connector->panel.
5569                                 fixed_mode->vrefresh);
5570
5571         /*
5572          * flush also means no more activity hence schedule downclock, if all
5573          * other fbs are quiescent too
5574          */
5575         if (!dev_priv->drrs.busy_frontbuffer_bits)
5576                 schedule_delayed_work(&dev_priv->drrs.work,
5577                                 msecs_to_jiffies(1000));
5578         mutex_unlock(&dev_priv->drrs.mutex);
5579 }
5580
5581 /**
5582  * DOC: Display Refresh Rate Switching (DRRS)
5583  *
5584  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5585  * which enables swtching between low and high refresh rates,
5586  * dynamically, based on the usage scenario. This feature is applicable
5587  * for internal panels.
5588  *
5589  * Indication that the panel supports DRRS is given by the panel EDID, which
5590  * would list multiple refresh rates for one resolution.
5591  *
5592  * DRRS is of 2 types - static and seamless.
5593  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5594  * (may appear as a blink on screen) and is used in dock-undock scenario.
5595  * Seamless DRRS involves changing RR without any visual effect to the user
5596  * and can be used during normal system usage. This is done by programming
5597  * certain registers.
5598  *
5599  * Support for static/seamless DRRS may be indicated in the VBT based on
5600  * inputs from the panel spec.
5601  *
5602  * DRRS saves power by switching to low RR based on usage scenarios.
5603  *
5604  * eDP DRRS:-
5605  *        The implementation is based on frontbuffer tracking implementation.
5606  * When there is a disturbance on the screen triggered by user activity or a
5607  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5608  * When there is no movement on screen, after a timeout of 1 second, a switch
5609  * to low RR is made.
5610  *        For integration with frontbuffer tracking code,
5611  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5612  *
5613  * DRRS can be further extended to support other internal panels and also
5614  * the scenario of video playback wherein RR is set based on the rate
5615  * requested by userspace.
5616  */
5617
5618 /**
5619  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5620  * @intel_connector: eDP connector
5621  * @fixed_mode: preferred mode of panel
5622  *
5623  * This function is  called only once at driver load to initialize basic
5624  * DRRS stuff.
5625  *
5626  * Returns:
5627  * Downclock mode if panel supports it, else return NULL.
5628  * DRRS support is determined by the presence of downclock mode (apart
5629  * from VBT setting).
5630  */
5631 static struct drm_display_mode *
5632 intel_dp_drrs_init(struct intel_connector *intel_connector,
5633                 struct drm_display_mode *fixed_mode)
5634 {
5635         struct drm_connector *connector = &intel_connector->base;
5636         struct drm_device *dev = connector->dev;
5637         struct drm_i915_private *dev_priv = dev->dev_private;
5638         struct drm_display_mode *downclock_mode = NULL;
5639
5640         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5641         mutex_init(&dev_priv->drrs.mutex);
5642
5643         if (INTEL_INFO(dev)->gen <= 6) {
5644                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5645                 return NULL;
5646         }
5647
5648         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5649                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5650                 return NULL;
5651         }
5652
5653         downclock_mode = intel_find_panel_downclock
5654                                         (dev, fixed_mode, connector);
5655
5656         if (!downclock_mode) {
5657                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5658                 return NULL;
5659         }
5660
5661         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5662
5663         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5664         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5665         return downclock_mode;
5666 }
5667
5668 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5669                                      struct intel_connector *intel_connector)
5670 {
5671         struct drm_connector *connector = &intel_connector->base;
5672         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5673         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5674         struct drm_device *dev = intel_encoder->base.dev;
5675         struct drm_i915_private *dev_priv = dev->dev_private;
5676         struct drm_display_mode *fixed_mode = NULL;
5677         struct drm_display_mode *downclock_mode = NULL;
5678         bool has_dpcd;
5679         struct drm_display_mode *scan;
5680         struct edid *edid;
5681         enum pipe pipe = INVALID_PIPE;
5682
5683         if (!is_edp(intel_dp))
5684                 return true;
5685
5686         pps_lock(intel_dp);
5687         intel_edp_panel_vdd_sanitize(intel_dp);
5688         pps_unlock(intel_dp);
5689
5690         /* Cache DPCD and EDID for edp. */
5691         has_dpcd = intel_dp_get_dpcd(intel_dp);
5692
5693         if (has_dpcd) {
5694                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5695                         dev_priv->no_aux_handshake =
5696                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5697                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5698         } else {
5699                 /* if this fails, presume the device is a ghost */
5700                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5701                 return false;
5702         }
5703
5704         /* We now know it's not a ghost, init power sequence regs. */
5705         pps_lock(intel_dp);
5706         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5707         pps_unlock(intel_dp);
5708
5709         mutex_lock(&dev->mode_config.mutex);
5710         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5711         if (edid) {
5712                 if (drm_add_edid_modes(connector, edid)) {
5713                         drm_mode_connector_update_edid_property(connector,
5714                                                                 edid);
5715                         drm_edid_to_eld(connector, edid);
5716                 } else {
5717                         kfree(edid);
5718                         edid = ERR_PTR(-EINVAL);
5719                 }
5720         } else {
5721                 edid = ERR_PTR(-ENOENT);
5722         }
5723         intel_connector->edid = edid;
5724
5725         /* prefer fixed mode from EDID if available */
5726         list_for_each_entry(scan, &connector->probed_modes, head) {
5727                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5728                         fixed_mode = drm_mode_duplicate(dev, scan);
5729                         downclock_mode = intel_dp_drrs_init(
5730                                                 intel_connector, fixed_mode);
5731                         break;
5732                 }
5733         }
5734
5735         /* fallback to VBT if available for eDP */
5736         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5737                 fixed_mode = drm_mode_duplicate(dev,
5738                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5739                 if (fixed_mode)
5740                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5741         }
5742         mutex_unlock(&dev->mode_config.mutex);
5743
5744         if (IS_VALLEYVIEW(dev)) {
5745                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5746                 register_reboot_notifier(&intel_dp->edp_notifier);
5747
5748                 /*
5749                  * Figure out the current pipe for the initial backlight setup.
5750                  * If the current pipe isn't valid, try the PPS pipe, and if that
5751                  * fails just assume pipe A.
5752                  */
5753                 if (IS_CHERRYVIEW(dev))
5754                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5755                 else
5756                         pipe = PORT_TO_PIPE(intel_dp->DP);
5757
5758                 if (pipe != PIPE_A && pipe != PIPE_B)
5759                         pipe = intel_dp->pps_pipe;
5760
5761                 if (pipe != PIPE_A && pipe != PIPE_B)
5762                         pipe = PIPE_A;
5763
5764                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5765                               pipe_name(pipe));
5766         }
5767
5768         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5769         intel_connector->panel.backlight_power = intel_edp_backlight_power;
5770         intel_panel_setup_backlight(connector, pipe);
5771
5772         return true;
5773 }
5774
5775 bool
5776 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5777                         struct intel_connector *intel_connector)
5778 {
5779         struct drm_connector *connector = &intel_connector->base;
5780         struct intel_dp *intel_dp = &intel_dig_port->dp;
5781         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5782         struct drm_device *dev = intel_encoder->base.dev;
5783         struct drm_i915_private *dev_priv = dev->dev_private;
5784         enum port port = intel_dig_port->port;
5785         int type;
5786
5787         intel_dp->pps_pipe = INVALID_PIPE;
5788
5789         /* intel_dp vfuncs */
5790         if (INTEL_INFO(dev)->gen >= 9)
5791                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5792         else if (IS_VALLEYVIEW(dev))
5793                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5794         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5795                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5796         else if (HAS_PCH_SPLIT(dev))
5797                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5798         else
5799                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5800
5801         if (INTEL_INFO(dev)->gen >= 9)
5802                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5803         else
5804                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5805
5806         /* Preserve the current hw state. */
5807         intel_dp->DP = I915_READ(intel_dp->output_reg);
5808         intel_dp->attached_connector = intel_connector;
5809
5810         if (intel_dp_is_edp(dev, port))
5811                 type = DRM_MODE_CONNECTOR_eDP;
5812         else
5813                 type = DRM_MODE_CONNECTOR_DisplayPort;
5814
5815         /*
5816          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5817          * for DP the encoder type can be set by the caller to
5818          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5819          */
5820         if (type == DRM_MODE_CONNECTOR_eDP)
5821                 intel_encoder->type = INTEL_OUTPUT_EDP;
5822
5823         /* eDP only on port B and/or C on vlv/chv */
5824         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5825                     port != PORT_B && port != PORT_C))
5826                 return false;
5827
5828         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5829                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5830                         port_name(port));
5831
5832         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5833         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5834
5835         connector->interlace_allowed = true;
5836         connector->doublescan_allowed = 0;
5837
5838         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5839                           edp_panel_vdd_work);
5840
5841         intel_connector_attach_encoder(intel_connector, intel_encoder);
5842         drm_connector_register(connector);
5843
5844         if (HAS_DDI(dev))
5845                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5846         else
5847                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5848         intel_connector->unregister = intel_dp_connector_unregister;
5849
5850         /* Set up the hotplug pin. */
5851         switch (port) {
5852         case PORT_A:
5853                 intel_encoder->hpd_pin = HPD_PORT_A;
5854                 break;
5855         case PORT_B:
5856                 intel_encoder->hpd_pin = HPD_PORT_B;
5857                 break;
5858         case PORT_C:
5859                 intel_encoder->hpd_pin = HPD_PORT_C;
5860                 break;
5861         case PORT_D:
5862                 intel_encoder->hpd_pin = HPD_PORT_D;
5863                 break;
5864         case PORT_E:
5865                 intel_encoder->hpd_pin = HPD_PORT_E;
5866                 break;
5867         default:
5868                 BUG();
5869         }
5870
5871         if (is_edp(intel_dp)) {
5872                 pps_lock(intel_dp);
5873                 intel_dp_init_panel_power_timestamps(intel_dp);
5874                 if (IS_VALLEYVIEW(dev))
5875                         vlv_initial_power_sequencer_setup(intel_dp);
5876                 else
5877                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5878                 pps_unlock(intel_dp);
5879         }
5880
5881         intel_dp_aux_init(intel_dp, intel_connector);
5882
5883         /* init MST on ports that can support it */
5884         if (HAS_DP_MST(dev) &&
5885             (port == PORT_B || port == PORT_C || port == PORT_D))
5886                 intel_dp_mst_encoder_init(intel_dig_port,
5887                                           intel_connector->base.base.id);
5888
5889         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5890                 drm_dp_aux_unregister(&intel_dp->aux);
5891                 if (is_edp(intel_dp)) {
5892                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5893                         /*
5894                          * vdd might still be enabled do to the delayed vdd off.
5895                          * Make sure vdd is actually turned off here.
5896                          */
5897                         pps_lock(intel_dp);
5898                         edp_panel_vdd_off_sync(intel_dp);
5899                         pps_unlock(intel_dp);
5900                 }
5901                 drm_connector_unregister(connector);
5902                 drm_connector_cleanup(connector);
5903                 return false;
5904         }
5905
5906         intel_dp_add_properties(intel_dp, connector);
5907
5908         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5909          * 0xd.  Failure to do so will result in spurious interrupts being
5910          * generated on the port when a cable is not attached.
5911          */
5912         if (IS_G4X(dev) && !IS_GM45(dev)) {
5913                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5914                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5915         }
5916
5917         i915_debugfs_connector_add(connector);
5918
5919         return true;
5920 }
5921
5922 void
5923 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5924 {
5925         struct drm_i915_private *dev_priv = dev->dev_private;
5926         struct intel_digital_port *intel_dig_port;
5927         struct intel_encoder *intel_encoder;
5928         struct drm_encoder *encoder;
5929         struct intel_connector *intel_connector;
5930
5931         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5932         if (!intel_dig_port)
5933                 return;
5934
5935         intel_connector = intel_connector_alloc();
5936         if (!intel_connector) {
5937                 kfree(intel_dig_port);
5938                 return;
5939         }
5940
5941         intel_encoder = &intel_dig_port->base;
5942         encoder = &intel_encoder->base;
5943
5944         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5945                          DRM_MODE_ENCODER_TMDS);
5946
5947         intel_encoder->compute_config = intel_dp_compute_config;
5948         intel_encoder->disable = intel_disable_dp;
5949         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5950         intel_encoder->get_config = intel_dp_get_config;
5951         intel_encoder->suspend = intel_dp_encoder_suspend;
5952         if (IS_CHERRYVIEW(dev)) {
5953                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5954                 intel_encoder->pre_enable = chv_pre_enable_dp;
5955                 intel_encoder->enable = vlv_enable_dp;
5956                 intel_encoder->post_disable = chv_post_disable_dp;
5957         } else if (IS_VALLEYVIEW(dev)) {
5958                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5959                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5960                 intel_encoder->enable = vlv_enable_dp;
5961                 intel_encoder->post_disable = vlv_post_disable_dp;
5962         } else {
5963                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5964                 intel_encoder->enable = g4x_enable_dp;
5965                 if (INTEL_INFO(dev)->gen >= 5)
5966                         intel_encoder->post_disable = ilk_post_disable_dp;
5967         }
5968
5969         intel_dig_port->port = port;
5970         intel_dig_port->dp.output_reg = output_reg;
5971
5972         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5973         if (IS_CHERRYVIEW(dev)) {
5974                 if (port == PORT_D)
5975                         intel_encoder->crtc_mask = 1 << 2;
5976                 else
5977                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5978         } else {
5979                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5980         }
5981         intel_encoder->cloneable = 0;
5982
5983         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5984         dev_priv->hotplug.irq_port[port] = intel_dig_port;
5985
5986         if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5987                 drm_encoder_cleanup(encoder);
5988                 kfree(intel_dig_port);
5989                 kfree(intel_connector);
5990         }
5991 }
5992
5993 void intel_dp_mst_suspend(struct drm_device *dev)
5994 {
5995         struct drm_i915_private *dev_priv = dev->dev_private;
5996         int i;
5997
5998         /* disable MST */
5999         for (i = 0; i < I915_MAX_PORTS; i++) {
6000                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6001                 if (!intel_dig_port)
6002                         continue;
6003
6004                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6005                         if (!intel_dig_port->dp.can_mst)
6006                                 continue;
6007                         if (intel_dig_port->dp.is_mst)
6008                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6009                 }
6010         }
6011 }
6012
6013 void intel_dp_mst_resume(struct drm_device *dev)
6014 {
6015         struct drm_i915_private *dev_priv = dev->dev_private;
6016         int i;
6017
6018         for (i = 0; i < I915_MAX_PORTS; i++) {
6019                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6020                 if (!intel_dig_port)
6021                         continue;
6022                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6023                         int ret;
6024
6025                         if (!intel_dig_port->dp.can_mst)
6026                                 continue;
6027
6028                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6029                         if (ret != 0) {
6030                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6031                         }
6032                 }
6033         }
6034 }