drm/i915/dp: abstract rate array length limiting
[linux-block.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_edid.h>
40 #include "intel_drv.h"
41 #include <drm/i915_drm.h>
42 #include "i915_drv.h"
43
44 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
45 #define DP_DPRX_ESI_LEN 14
46
47 /* Compliance test status bits  */
48 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
49 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
52
53 struct dp_link_dpll {
54         int clock;
55         struct dpll dpll;
56 };
57
58 static const struct dp_link_dpll gen4_dpll[] = {
59         { 162000,
60                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
61         { 270000,
62                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
63 };
64
65 static const struct dp_link_dpll pch_dpll[] = {
66         { 162000,
67                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
68         { 270000,
69                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
70 };
71
72 static const struct dp_link_dpll vlv_dpll[] = {
73         { 162000,
74                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
75         { 270000,
76                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 };
78
79 /*
80  * CHV supports eDP 1.4 that have  more link rates.
81  * Below only provides the fixed rate but exclude variable rate.
82  */
83 static const struct dp_link_dpll chv_dpll[] = {
84         /*
85          * CHV requires to program fractional division for m2.
86          * m2 is stored in fixed point format using formula below
87          * (m2_int << 22) | m2_fraction
88          */
89         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
90                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
91         { 270000,       /* m2_int = 27, m2_fraction = 0 */
92                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
93         { 540000,       /* m2_int = 27, m2_fraction = 0 */
94                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
95 };
96
97 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
98                                   324000, 432000, 540000 };
99 static const int skl_rates[] = { 162000, 216000, 270000,
100                                   324000, 432000, 540000 };
101 static const int cnl_rates[] = { 162000, 216000, 270000,
102                                  324000, 432000, 540000,
103                                  648000, 810000 };
104 static const int default_rates[] = { 162000, 270000, 540000 };
105
106 /**
107  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
108  * @intel_dp: DP struct
109  *
110  * If a CPU or PCH DP output is attached to an eDP panel, this function
111  * will return true, and false otherwise.
112  */
113 bool intel_dp_is_edp(struct intel_dp *intel_dp)
114 {
115         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
116
117         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
118 }
119
120 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
121 {
122         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
123
124         return intel_dig_port->base.base.dev;
125 }
126
127 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
128 {
129         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
130 }
131
132 static void intel_dp_link_down(struct intel_encoder *encoder,
133                                const struct intel_crtc_state *old_crtc_state);
134 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
135 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
136 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
137                                            const struct intel_crtc_state *crtc_state);
138 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
139                                       enum pipe pipe);
140 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
141
142 /* update sink rates from dpcd */
143 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
144 {
145         int i, max_rate;
146
147         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
148
149         for (i = 0; i < ARRAY_SIZE(default_rates); i++) {
150                 if (default_rates[i] > max_rate)
151                         break;
152                 intel_dp->sink_rates[i] = default_rates[i];
153         }
154
155         intel_dp->num_sink_rates = i;
156 }
157
158 /* Get length of rates array potentially limited by max_rate. */
159 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
160 {
161         int i;
162
163         /* Limit results by potentially reduced max rate */
164         for (i = 0; i < len; i++) {
165                 if (rates[len - i - 1] <= max_rate)
166                         return len - i;
167         }
168
169         return 0;
170 }
171
172 /* Get length of common rates array potentially limited by max_rate. */
173 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
174                                           int max_rate)
175 {
176         return intel_dp_rate_limit_len(intel_dp->common_rates,
177                                        intel_dp->num_common_rates, max_rate);
178 }
179
180 /* Theoretical max between source and sink */
181 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
182 {
183         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
184 }
185
186 /* Theoretical max between source and sink */
187 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
188 {
189         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
190         int source_max = intel_dig_port->max_lanes;
191         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
192
193         return min(source_max, sink_max);
194 }
195
196 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
197 {
198         return intel_dp->max_link_lane_count;
199 }
200
201 int
202 intel_dp_link_required(int pixel_clock, int bpp)
203 {
204         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
205         return DIV_ROUND_UP(pixel_clock * bpp, 8);
206 }
207
208 int
209 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
210 {
211         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
212          * link rate that is generally expressed in Gbps. Since, 8 bits of data
213          * is transmitted every LS_Clk per lane, there is no need to account for
214          * the channel encoding that is done in the PHY layer here.
215          */
216
217         return max_link_clock * max_lanes;
218 }
219
220 static int
221 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
222 {
223         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
224         struct intel_encoder *encoder = &intel_dig_port->base;
225         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
226         int max_dotclk = dev_priv->max_dotclk_freq;
227         int ds_max_dotclk;
228
229         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
230
231         if (type != DP_DS_PORT_TYPE_VGA)
232                 return max_dotclk;
233
234         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
235                                                     intel_dp->downstream_ports);
236
237         if (ds_max_dotclk != 0)
238                 max_dotclk = min(max_dotclk, ds_max_dotclk);
239
240         return max_dotclk;
241 }
242
243 static int cnl_adjusted_max_rate(struct intel_dp *intel_dp, int size)
244 {
245         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
246         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
247         enum port port = dig_port->base.port;
248
249         u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
250
251         /* Low voltage SKUs are limited to max of 5.4G */
252         if (voltage == VOLTAGE_INFO_0_85V)
253                 return size - 2;
254
255         /* For this SKU 8.1G is supported in all ports */
256         if (IS_CNL_WITH_PORT_F(dev_priv))
257                 return size;
258
259         /* For other SKUs, max rate on ports A and B is 5.4G */
260         if (port == PORT_A || port == PORT_D)
261                 return size - 2;
262
263         return size;
264 }
265
266 static void
267 intel_dp_set_source_rates(struct intel_dp *intel_dp)
268 {
269         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
270         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
271         const int *source_rates;
272         int size;
273
274         /* This should only be done once */
275         WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
276
277         if (IS_GEN9_LP(dev_priv)) {
278                 source_rates = bxt_rates;
279                 size = ARRAY_SIZE(bxt_rates);
280         } else if (IS_CANNONLAKE(dev_priv)) {
281                 source_rates = cnl_rates;
282                 size = cnl_adjusted_max_rate(intel_dp, ARRAY_SIZE(cnl_rates));
283         } else if (IS_GEN9_BC(dev_priv)) {
284                 source_rates = skl_rates;
285                 size = ARRAY_SIZE(skl_rates);
286         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
287                    IS_BROADWELL(dev_priv)) {
288                 source_rates = default_rates;
289                 size = ARRAY_SIZE(default_rates);
290         } else {
291                 source_rates = default_rates;
292                 size = ARRAY_SIZE(default_rates) - 1;
293         }
294
295         intel_dp->source_rates = source_rates;
296         intel_dp->num_source_rates = size;
297 }
298
299 static int intersect_rates(const int *source_rates, int source_len,
300                            const int *sink_rates, int sink_len,
301                            int *common_rates)
302 {
303         int i = 0, j = 0, k = 0;
304
305         while (i < source_len && j < sink_len) {
306                 if (source_rates[i] == sink_rates[j]) {
307                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
308                                 return k;
309                         common_rates[k] = source_rates[i];
310                         ++k;
311                         ++i;
312                         ++j;
313                 } else if (source_rates[i] < sink_rates[j]) {
314                         ++i;
315                 } else {
316                         ++j;
317                 }
318         }
319         return k;
320 }
321
322 /* return index of rate in rates array, or -1 if not found */
323 static int intel_dp_rate_index(const int *rates, int len, int rate)
324 {
325         int i;
326
327         for (i = 0; i < len; i++)
328                 if (rate == rates[i])
329                         return i;
330
331         return -1;
332 }
333
334 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
335 {
336         WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
337
338         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
339                                                      intel_dp->num_source_rates,
340                                                      intel_dp->sink_rates,
341                                                      intel_dp->num_sink_rates,
342                                                      intel_dp->common_rates);
343
344         /* Paranoia, there should always be something in common. */
345         if (WARN_ON(intel_dp->num_common_rates == 0)) {
346                 intel_dp->common_rates[0] = default_rates[0];
347                 intel_dp->num_common_rates = 1;
348         }
349 }
350
351 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
352                                        uint8_t lane_count)
353 {
354         /*
355          * FIXME: we need to synchronize the current link parameters with
356          * hardware readout. Currently fast link training doesn't work on
357          * boot-up.
358          */
359         if (link_rate == 0 ||
360             link_rate > intel_dp->max_link_rate)
361                 return false;
362
363         if (lane_count == 0 ||
364             lane_count > intel_dp_max_lane_count(intel_dp))
365                 return false;
366
367         return true;
368 }
369
370 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
371                                             int link_rate, uint8_t lane_count)
372 {
373         int index;
374
375         index = intel_dp_rate_index(intel_dp->common_rates,
376                                     intel_dp->num_common_rates,
377                                     link_rate);
378         if (index > 0) {
379                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
380                 intel_dp->max_link_lane_count = lane_count;
381         } else if (lane_count > 1) {
382                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
383                 intel_dp->max_link_lane_count = lane_count >> 1;
384         } else {
385                 DRM_ERROR("Link Training Unsuccessful\n");
386                 return -1;
387         }
388
389         return 0;
390 }
391
392 static enum drm_mode_status
393 intel_dp_mode_valid(struct drm_connector *connector,
394                     struct drm_display_mode *mode)
395 {
396         struct intel_dp *intel_dp = intel_attached_dp(connector);
397         struct intel_connector *intel_connector = to_intel_connector(connector);
398         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
399         int target_clock = mode->clock;
400         int max_rate, mode_rate, max_lanes, max_link_clock;
401         int max_dotclk;
402
403         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
404
405         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
406                 if (mode->hdisplay > fixed_mode->hdisplay)
407                         return MODE_PANEL;
408
409                 if (mode->vdisplay > fixed_mode->vdisplay)
410                         return MODE_PANEL;
411
412                 target_clock = fixed_mode->clock;
413         }
414
415         max_link_clock = intel_dp_max_link_rate(intel_dp);
416         max_lanes = intel_dp_max_lane_count(intel_dp);
417
418         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
419         mode_rate = intel_dp_link_required(target_clock, 18);
420
421         if (mode_rate > max_rate || target_clock > max_dotclk)
422                 return MODE_CLOCK_HIGH;
423
424         if (mode->clock < 10000)
425                 return MODE_CLOCK_LOW;
426
427         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
428                 return MODE_H_ILLEGAL;
429
430         return MODE_OK;
431 }
432
433 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
434 {
435         int     i;
436         uint32_t v = 0;
437
438         if (src_bytes > 4)
439                 src_bytes = 4;
440         for (i = 0; i < src_bytes; i++)
441                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
442         return v;
443 }
444
445 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
446 {
447         int i;
448         if (dst_bytes > 4)
449                 dst_bytes = 4;
450         for (i = 0; i < dst_bytes; i++)
451                 dst[i] = src >> ((3-i) * 8);
452 }
453
454 static void
455 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
456 static void
457 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
458                                               bool force_disable_vdd);
459 static void
460 intel_dp_pps_init(struct intel_dp *intel_dp);
461
462 static void pps_lock(struct intel_dp *intel_dp)
463 {
464         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
465
466         /*
467          * See intel_power_sequencer_reset() why we need
468          * a power domain reference here.
469          */
470         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
471
472         mutex_lock(&dev_priv->pps_mutex);
473 }
474
475 static void pps_unlock(struct intel_dp *intel_dp)
476 {
477         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
478
479         mutex_unlock(&dev_priv->pps_mutex);
480
481         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
482 }
483
484 static void
485 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
486 {
487         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
488         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
489         enum pipe pipe = intel_dp->pps_pipe;
490         bool pll_enabled, release_cl_override = false;
491         enum dpio_phy phy = DPIO_PHY(pipe);
492         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
493         uint32_t DP;
494
495         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
496                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
497                  pipe_name(pipe), port_name(intel_dig_port->base.port)))
498                 return;
499
500         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
501                       pipe_name(pipe), port_name(intel_dig_port->base.port));
502
503         /* Preserve the BIOS-computed detected bit. This is
504          * supposed to be read-only.
505          */
506         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
507         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
508         DP |= DP_PORT_WIDTH(1);
509         DP |= DP_LINK_TRAIN_PAT_1;
510
511         if (IS_CHERRYVIEW(dev_priv))
512                 DP |= DP_PIPE_SELECT_CHV(pipe);
513         else if (pipe == PIPE_B)
514                 DP |= DP_PIPEB_SELECT;
515
516         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
517
518         /*
519          * The DPLL for the pipe must be enabled for this to work.
520          * So enable temporarily it if it's not already enabled.
521          */
522         if (!pll_enabled) {
523                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
524                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
525
526                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
527                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
528                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
529                                   pipe_name(pipe));
530                         return;
531                 }
532         }
533
534         /*
535          * Similar magic as in intel_dp_enable_port().
536          * We _must_ do this port enable + disable trick
537          * to make this power seqeuencer lock onto the port.
538          * Otherwise even VDD force bit won't work.
539          */
540         I915_WRITE(intel_dp->output_reg, DP);
541         POSTING_READ(intel_dp->output_reg);
542
543         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
544         POSTING_READ(intel_dp->output_reg);
545
546         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
547         POSTING_READ(intel_dp->output_reg);
548
549         if (!pll_enabled) {
550                 vlv_force_pll_off(dev_priv, pipe);
551
552                 if (release_cl_override)
553                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
554         }
555 }
556
557 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
558 {
559         struct intel_encoder *encoder;
560         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
561
562         /*
563          * We don't have power sequencer currently.
564          * Pick one that's not used by other ports.
565          */
566         for_each_intel_encoder(&dev_priv->drm, encoder) {
567                 struct intel_dp *intel_dp;
568
569                 if (encoder->type != INTEL_OUTPUT_DP &&
570                     encoder->type != INTEL_OUTPUT_EDP)
571                         continue;
572
573                 intel_dp = enc_to_intel_dp(&encoder->base);
574
575                 if (encoder->type == INTEL_OUTPUT_EDP) {
576                         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
577                                 intel_dp->active_pipe != intel_dp->pps_pipe);
578
579                         if (intel_dp->pps_pipe != INVALID_PIPE)
580                                 pipes &= ~(1 << intel_dp->pps_pipe);
581                 } else {
582                         WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
583
584                         if (intel_dp->active_pipe != INVALID_PIPE)
585                                 pipes &= ~(1 << intel_dp->active_pipe);
586                 }
587         }
588
589         if (pipes == 0)
590                 return INVALID_PIPE;
591
592         return ffs(pipes) - 1;
593 }
594
595 static enum pipe
596 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
597 {
598         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
599         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
600         enum pipe pipe;
601
602         lockdep_assert_held(&dev_priv->pps_mutex);
603
604         /* We should never land here with regular DP ports */
605         WARN_ON(!intel_dp_is_edp(intel_dp));
606
607         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
608                 intel_dp->active_pipe != intel_dp->pps_pipe);
609
610         if (intel_dp->pps_pipe != INVALID_PIPE)
611                 return intel_dp->pps_pipe;
612
613         pipe = vlv_find_free_pps(dev_priv);
614
615         /*
616          * Didn't find one. This should not happen since there
617          * are two power sequencers and up to two eDP ports.
618          */
619         if (WARN_ON(pipe == INVALID_PIPE))
620                 pipe = PIPE_A;
621
622         vlv_steal_power_sequencer(dev_priv, pipe);
623         intel_dp->pps_pipe = pipe;
624
625         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
626                       pipe_name(intel_dp->pps_pipe),
627                       port_name(intel_dig_port->base.port));
628
629         /* init power sequencer on this pipe and port */
630         intel_dp_init_panel_power_sequencer(intel_dp);
631         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
632
633         /*
634          * Even vdd force doesn't work until we've made
635          * the power sequencer lock in on the port.
636          */
637         vlv_power_sequencer_kick(intel_dp);
638
639         return intel_dp->pps_pipe;
640 }
641
642 static int
643 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
644 {
645         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
646
647         lockdep_assert_held(&dev_priv->pps_mutex);
648
649         /* We should never land here with regular DP ports */
650         WARN_ON(!intel_dp_is_edp(intel_dp));
651
652         /*
653          * TODO: BXT has 2 PPS instances. The correct port->PPS instance
654          * mapping needs to be retrieved from VBT, for now just hard-code to
655          * use instance #0 always.
656          */
657         if (!intel_dp->pps_reset)
658                 return 0;
659
660         intel_dp->pps_reset = false;
661
662         /*
663          * Only the HW needs to be reprogrammed, the SW state is fixed and
664          * has been setup during connector init.
665          */
666         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
667
668         return 0;
669 }
670
671 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
672                                enum pipe pipe);
673
674 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
675                                enum pipe pipe)
676 {
677         return I915_READ(PP_STATUS(pipe)) & PP_ON;
678 }
679
680 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
681                                 enum pipe pipe)
682 {
683         return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
684 }
685
686 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
687                          enum pipe pipe)
688 {
689         return true;
690 }
691
692 static enum pipe
693 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
694                      enum port port,
695                      vlv_pipe_check pipe_check)
696 {
697         enum pipe pipe;
698
699         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
700                 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
701                         PANEL_PORT_SELECT_MASK;
702
703                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
704                         continue;
705
706                 if (!pipe_check(dev_priv, pipe))
707                         continue;
708
709                 return pipe;
710         }
711
712         return INVALID_PIPE;
713 }
714
715 static void
716 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
717 {
718         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
719         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720         enum port port = intel_dig_port->base.port;
721
722         lockdep_assert_held(&dev_priv->pps_mutex);
723
724         /* try to find a pipe with this port selected */
725         /* first pick one where the panel is on */
726         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
727                                                   vlv_pipe_has_pp_on);
728         /* didn't find one? pick one where vdd is on */
729         if (intel_dp->pps_pipe == INVALID_PIPE)
730                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
731                                                           vlv_pipe_has_vdd_on);
732         /* didn't find one? pick one with just the correct port */
733         if (intel_dp->pps_pipe == INVALID_PIPE)
734                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
735                                                           vlv_pipe_any);
736
737         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
738         if (intel_dp->pps_pipe == INVALID_PIPE) {
739                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
740                               port_name(port));
741                 return;
742         }
743
744         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
745                       port_name(port), pipe_name(intel_dp->pps_pipe));
746
747         intel_dp_init_panel_power_sequencer(intel_dp);
748         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
749 }
750
751 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
752 {
753         struct intel_encoder *encoder;
754
755         if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
756                     !IS_GEN9_LP(dev_priv)))
757                 return;
758
759         /*
760          * We can't grab pps_mutex here due to deadlock with power_domain
761          * mutex when power_domain functions are called while holding pps_mutex.
762          * That also means that in order to use pps_pipe the code needs to
763          * hold both a power domain reference and pps_mutex, and the power domain
764          * reference get/put must be done while _not_ holding pps_mutex.
765          * pps_{lock,unlock}() do these steps in the correct order, so one
766          * should use them always.
767          */
768
769         for_each_intel_encoder(&dev_priv->drm, encoder) {
770                 struct intel_dp *intel_dp;
771
772                 if (encoder->type != INTEL_OUTPUT_DP &&
773                     encoder->type != INTEL_OUTPUT_EDP &&
774                     encoder->type != INTEL_OUTPUT_DDI)
775                         continue;
776
777                 intel_dp = enc_to_intel_dp(&encoder->base);
778
779                 /* Skip pure DVI/HDMI DDI encoders */
780                 if (!i915_mmio_reg_valid(intel_dp->output_reg))
781                         continue;
782
783                 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
784
785                 if (encoder->type != INTEL_OUTPUT_EDP)
786                         continue;
787
788                 if (IS_GEN9_LP(dev_priv))
789                         intel_dp->pps_reset = true;
790                 else
791                         intel_dp->pps_pipe = INVALID_PIPE;
792         }
793 }
794
795 struct pps_registers {
796         i915_reg_t pp_ctrl;
797         i915_reg_t pp_stat;
798         i915_reg_t pp_on;
799         i915_reg_t pp_off;
800         i915_reg_t pp_div;
801 };
802
803 static void intel_pps_get_registers(struct intel_dp *intel_dp,
804                                     struct pps_registers *regs)
805 {
806         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
807         int pps_idx = 0;
808
809         memset(regs, 0, sizeof(*regs));
810
811         if (IS_GEN9_LP(dev_priv))
812                 pps_idx = bxt_power_sequencer_idx(intel_dp);
813         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
814                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
815
816         regs->pp_ctrl = PP_CONTROL(pps_idx);
817         regs->pp_stat = PP_STATUS(pps_idx);
818         regs->pp_on = PP_ON_DELAYS(pps_idx);
819         regs->pp_off = PP_OFF_DELAYS(pps_idx);
820         if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
821             !HAS_PCH_ICP(dev_priv))
822                 regs->pp_div = PP_DIVISOR(pps_idx);
823 }
824
825 static i915_reg_t
826 _pp_ctrl_reg(struct intel_dp *intel_dp)
827 {
828         struct pps_registers regs;
829
830         intel_pps_get_registers(intel_dp, &regs);
831
832         return regs.pp_ctrl;
833 }
834
835 static i915_reg_t
836 _pp_stat_reg(struct intel_dp *intel_dp)
837 {
838         struct pps_registers regs;
839
840         intel_pps_get_registers(intel_dp, &regs);
841
842         return regs.pp_stat;
843 }
844
845 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
846    This function only applicable when panel PM state is not to be tracked */
847 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
848                               void *unused)
849 {
850         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
851                                                  edp_notifier);
852         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
853
854         if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
855                 return 0;
856
857         pps_lock(intel_dp);
858
859         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
860                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
861                 i915_reg_t pp_ctrl_reg, pp_div_reg;
862                 u32 pp_div;
863
864                 pp_ctrl_reg = PP_CONTROL(pipe);
865                 pp_div_reg  = PP_DIVISOR(pipe);
866                 pp_div = I915_READ(pp_div_reg);
867                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
868
869                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
870                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
871                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
872                 msleep(intel_dp->panel_power_cycle_delay);
873         }
874
875         pps_unlock(intel_dp);
876
877         return 0;
878 }
879
880 static bool edp_have_panel_power(struct intel_dp *intel_dp)
881 {
882         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
883
884         lockdep_assert_held(&dev_priv->pps_mutex);
885
886         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
887             intel_dp->pps_pipe == INVALID_PIPE)
888                 return false;
889
890         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
891 }
892
893 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
894 {
895         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
896
897         lockdep_assert_held(&dev_priv->pps_mutex);
898
899         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
900             intel_dp->pps_pipe == INVALID_PIPE)
901                 return false;
902
903         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
904 }
905
906 static void
907 intel_dp_check_edp(struct intel_dp *intel_dp)
908 {
909         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
910
911         if (!intel_dp_is_edp(intel_dp))
912                 return;
913
914         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
915                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
916                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
917                               I915_READ(_pp_stat_reg(intel_dp)),
918                               I915_READ(_pp_ctrl_reg(intel_dp)));
919         }
920 }
921
922 static uint32_t
923 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
924 {
925         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
926         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
927         uint32_t status;
928         bool done;
929
930 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
931         if (has_aux_irq)
932                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
933                                           msecs_to_jiffies_timeout(10));
934         else
935                 done = wait_for(C, 10) == 0;
936         if (!done)
937                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
938                           has_aux_irq);
939 #undef C
940
941         return status;
942 }
943
944 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
945 {
946         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
947         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
948
949         if (index)
950                 return 0;
951
952         /*
953          * The clock divider is based off the hrawclk, and would like to run at
954          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
955          */
956         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
957 }
958
959 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
960 {
961         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
962         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
963
964         if (index)
965                 return 0;
966
967         /*
968          * The clock divider is based off the cdclk or PCH rawclk, and would
969          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
970          * divide by 2000 and use that
971          */
972         if (intel_dig_port->base.port == PORT_A)
973                 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
974         else
975                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
976 }
977
978 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
979 {
980         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
981         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
982
983         if (intel_dig_port->base.port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
984                 /* Workaround for non-ULT HSW */
985                 switch (index) {
986                 case 0: return 63;
987                 case 1: return 72;
988                 default: return 0;
989                 }
990         }
991
992         return ilk_get_aux_clock_divider(intel_dp, index);
993 }
994
995 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
996 {
997         /*
998          * SKL doesn't need us to program the AUX clock divider (Hardware will
999          * derive the clock from CDCLK automatically). We still implement the
1000          * get_aux_clock_divider vfunc to plug-in into the existing code.
1001          */
1002         return index ? 0 : 1;
1003 }
1004
1005 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1006                                      bool has_aux_irq,
1007                                      int send_bytes,
1008                                      uint32_t aux_clock_divider)
1009 {
1010         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1011         struct drm_i915_private *dev_priv =
1012                         to_i915(intel_dig_port->base.base.dev);
1013         uint32_t precharge, timeout;
1014
1015         if (IS_GEN6(dev_priv))
1016                 precharge = 3;
1017         else
1018                 precharge = 5;
1019
1020         if (IS_BROADWELL(dev_priv))
1021                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1022         else
1023                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1024
1025         return DP_AUX_CH_CTL_SEND_BUSY |
1026                DP_AUX_CH_CTL_DONE |
1027                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
1028                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1029                timeout |
1030                DP_AUX_CH_CTL_RECEIVE_ERROR |
1031                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1032                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1033                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1034 }
1035
1036 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1037                                       bool has_aux_irq,
1038                                       int send_bytes,
1039                                       uint32_t unused)
1040 {
1041         return DP_AUX_CH_CTL_SEND_BUSY |
1042                DP_AUX_CH_CTL_DONE |
1043                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
1044                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1045                DP_AUX_CH_CTL_TIME_OUT_MAX |
1046                DP_AUX_CH_CTL_RECEIVE_ERROR |
1047                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1048                DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1049                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1050 }
1051
1052 static int
1053 intel_dp_aux_ch(struct intel_dp *intel_dp,
1054                 const uint8_t *send, int send_bytes,
1055                 uint8_t *recv, int recv_size)
1056 {
1057         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1058         struct drm_i915_private *dev_priv =
1059                         to_i915(intel_dig_port->base.base.dev);
1060         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
1061         uint32_t aux_clock_divider;
1062         int i, ret, recv_bytes;
1063         uint32_t status;
1064         int try, clock = 0;
1065         bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
1066         bool vdd;
1067
1068         pps_lock(intel_dp);
1069
1070         /*
1071          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1072          * In such cases we want to leave VDD enabled and it's up to upper layers
1073          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1074          * ourselves.
1075          */
1076         vdd = edp_panel_vdd_on(intel_dp);
1077
1078         /* dp aux is extremely sensitive to irq latency, hence request the
1079          * lowest possible wakeup latency and so prevent the cpu from going into
1080          * deep sleep states.
1081          */
1082         pm_qos_update_request(&dev_priv->pm_qos, 0);
1083
1084         intel_dp_check_edp(intel_dp);
1085
1086         /* Try to wait for any previous AUX channel activity */
1087         for (try = 0; try < 3; try++) {
1088                 status = I915_READ_NOTRACE(ch_ctl);
1089                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1090                         break;
1091                 msleep(1);
1092         }
1093
1094         if (try == 3) {
1095                 static u32 last_status = -1;
1096                 const u32 status = I915_READ(ch_ctl);
1097
1098                 if (status != last_status) {
1099                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
1100                              status);
1101                         last_status = status;
1102                 }
1103
1104                 ret = -EBUSY;
1105                 goto out;
1106         }
1107
1108         /* Only 5 data registers! */
1109         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1110                 ret = -E2BIG;
1111                 goto out;
1112         }
1113
1114         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1115                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1116                                                           has_aux_irq,
1117                                                           send_bytes,
1118                                                           aux_clock_divider);
1119
1120                 /* Must try at least 3 times according to DP spec */
1121                 for (try = 0; try < 5; try++) {
1122                         /* Load the send data into the aux channel data registers */
1123                         for (i = 0; i < send_bytes; i += 4)
1124                                 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
1125                                            intel_dp_pack_aux(send + i,
1126                                                              send_bytes - i));
1127
1128                         /* Send the command and wait for it to complete */
1129                         I915_WRITE(ch_ctl, send_ctl);
1130
1131                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1132
1133                         /* Clear done status and any errors */
1134                         I915_WRITE(ch_ctl,
1135                                    status |
1136                                    DP_AUX_CH_CTL_DONE |
1137                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
1138                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
1139
1140                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1141                                 continue;
1142
1143                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1144                          *   400us delay required for errors and timeouts
1145                          *   Timeout errors from the HW already meet this
1146                          *   requirement so skip to next iteration
1147                          */
1148                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1149                                 usleep_range(400, 500);
1150                                 continue;
1151                         }
1152                         if (status & DP_AUX_CH_CTL_DONE)
1153                                 goto done;
1154                 }
1155         }
1156
1157         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1158                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1159                 ret = -EBUSY;
1160                 goto out;
1161         }
1162
1163 done:
1164         /* Check for timeout or receive error.
1165          * Timeouts occur when the sink is not connected
1166          */
1167         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1168                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1169                 ret = -EIO;
1170                 goto out;
1171         }
1172
1173         /* Timeouts occur when the device isn't connected, so they're
1174          * "normal" -- don't fill the kernel log with these */
1175         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1176                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1177                 ret = -ETIMEDOUT;
1178                 goto out;
1179         }
1180
1181         /* Unload any bytes sent back from the other side */
1182         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1183                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1184
1185         /*
1186          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1187          * We have no idea of what happened so we return -EBUSY so
1188          * drm layer takes care for the necessary retries.
1189          */
1190         if (recv_bytes == 0 || recv_bytes > 20) {
1191                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1192                               recv_bytes);
1193                 /*
1194                  * FIXME: This patch was created on top of a series that
1195                  * organize the retries at drm level. There EBUSY should
1196                  * also take care for 1ms wait before retrying.
1197                  * That aux retries re-org is still needed and after that is
1198                  * merged we remove this sleep from here.
1199                  */
1200                 usleep_range(1000, 1500);
1201                 ret = -EBUSY;
1202                 goto out;
1203         }
1204
1205         if (recv_bytes > recv_size)
1206                 recv_bytes = recv_size;
1207
1208         for (i = 0; i < recv_bytes; i += 4)
1209                 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
1210                                     recv + i, recv_bytes - i);
1211
1212         ret = recv_bytes;
1213 out:
1214         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1215
1216         if (vdd)
1217                 edp_panel_vdd_off(intel_dp, false);
1218
1219         pps_unlock(intel_dp);
1220
1221         return ret;
1222 }
1223
1224 #define BARE_ADDRESS_SIZE       3
1225 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1226 static ssize_t
1227 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1228 {
1229         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1230         uint8_t txbuf[20], rxbuf[20];
1231         size_t txsize, rxsize;
1232         int ret;
1233
1234         txbuf[0] = (msg->request << 4) |
1235                 ((msg->address >> 16) & 0xf);
1236         txbuf[1] = (msg->address >> 8) & 0xff;
1237         txbuf[2] = msg->address & 0xff;
1238         txbuf[3] = msg->size - 1;
1239
1240         switch (msg->request & ~DP_AUX_I2C_MOT) {
1241         case DP_AUX_NATIVE_WRITE:
1242         case DP_AUX_I2C_WRITE:
1243         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1244                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1245                 rxsize = 2; /* 0 or 1 data bytes */
1246
1247                 if (WARN_ON(txsize > 20))
1248                         return -E2BIG;
1249
1250                 WARN_ON(!msg->buffer != !msg->size);
1251
1252                 if (msg->buffer)
1253                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1254
1255                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1256                 if (ret > 0) {
1257                         msg->reply = rxbuf[0] >> 4;
1258
1259                         if (ret > 1) {
1260                                 /* Number of bytes written in a short write. */
1261                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1262                         } else {
1263                                 /* Return payload size. */
1264                                 ret = msg->size;
1265                         }
1266                 }
1267                 break;
1268
1269         case DP_AUX_NATIVE_READ:
1270         case DP_AUX_I2C_READ:
1271                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1272                 rxsize = msg->size + 1;
1273
1274                 if (WARN_ON(rxsize > 20))
1275                         return -E2BIG;
1276
1277                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1278                 if (ret > 0) {
1279                         msg->reply = rxbuf[0] >> 4;
1280                         /*
1281                          * Assume happy day, and copy the data. The caller is
1282                          * expected to check msg->reply before touching it.
1283                          *
1284                          * Return payload size.
1285                          */
1286                         ret--;
1287                         memcpy(msg->buffer, rxbuf + 1, ret);
1288                 }
1289                 break;
1290
1291         default:
1292                 ret = -EINVAL;
1293                 break;
1294         }
1295
1296         return ret;
1297 }
1298
1299 static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1300                                 enum port port)
1301 {
1302         const struct ddi_vbt_port_info *info =
1303                 &dev_priv->vbt.ddi_port_info[port];
1304         enum port aux_port;
1305
1306         if (!info->alternate_aux_channel) {
1307                 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1308                               port_name(port), port_name(port));
1309                 return port;
1310         }
1311
1312         switch (info->alternate_aux_channel) {
1313         case DP_AUX_A:
1314                 aux_port = PORT_A;
1315                 break;
1316         case DP_AUX_B:
1317                 aux_port = PORT_B;
1318                 break;
1319         case DP_AUX_C:
1320                 aux_port = PORT_C;
1321                 break;
1322         case DP_AUX_D:
1323                 aux_port = PORT_D;
1324                 break;
1325         case DP_AUX_F:
1326                 aux_port = PORT_F;
1327                 break;
1328         default:
1329                 MISSING_CASE(info->alternate_aux_channel);
1330                 aux_port = PORT_A;
1331                 break;
1332         }
1333
1334         DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1335                       port_name(aux_port), port_name(port));
1336
1337         return aux_port;
1338 }
1339
1340 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1341                                   enum port port)
1342 {
1343         switch (port) {
1344         case PORT_B:
1345         case PORT_C:
1346         case PORT_D:
1347                 return DP_AUX_CH_CTL(port);
1348         default:
1349                 MISSING_CASE(port);
1350                 return DP_AUX_CH_CTL(PORT_B);
1351         }
1352 }
1353
1354 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1355                                    enum port port, int index)
1356 {
1357         switch (port) {
1358         case PORT_B:
1359         case PORT_C:
1360         case PORT_D:
1361                 return DP_AUX_CH_DATA(port, index);
1362         default:
1363                 MISSING_CASE(port);
1364                 return DP_AUX_CH_DATA(PORT_B, index);
1365         }
1366 }
1367
1368 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1369                                   enum port port)
1370 {
1371         switch (port) {
1372         case PORT_A:
1373                 return DP_AUX_CH_CTL(port);
1374         case PORT_B:
1375         case PORT_C:
1376         case PORT_D:
1377                 return PCH_DP_AUX_CH_CTL(port);
1378         default:
1379                 MISSING_CASE(port);
1380                 return DP_AUX_CH_CTL(PORT_A);
1381         }
1382 }
1383
1384 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1385                                    enum port port, int index)
1386 {
1387         switch (port) {
1388         case PORT_A:
1389                 return DP_AUX_CH_DATA(port, index);
1390         case PORT_B:
1391         case PORT_C:
1392         case PORT_D:
1393                 return PCH_DP_AUX_CH_DATA(port, index);
1394         default:
1395                 MISSING_CASE(port);
1396                 return DP_AUX_CH_DATA(PORT_A, index);
1397         }
1398 }
1399
1400 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1401                                   enum port port)
1402 {
1403         switch (port) {
1404         case PORT_A:
1405         case PORT_B:
1406         case PORT_C:
1407         case PORT_D:
1408         case PORT_F:
1409                 return DP_AUX_CH_CTL(port);
1410         default:
1411                 MISSING_CASE(port);
1412                 return DP_AUX_CH_CTL(PORT_A);
1413         }
1414 }
1415
1416 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1417                                    enum port port, int index)
1418 {
1419         switch (port) {
1420         case PORT_A:
1421         case PORT_B:
1422         case PORT_C:
1423         case PORT_D:
1424         case PORT_F:
1425                 return DP_AUX_CH_DATA(port, index);
1426         default:
1427                 MISSING_CASE(port);
1428                 return DP_AUX_CH_DATA(PORT_A, index);
1429         }
1430 }
1431
1432 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1433                                     enum port port)
1434 {
1435         if (INTEL_INFO(dev_priv)->gen >= 9)
1436                 return skl_aux_ctl_reg(dev_priv, port);
1437         else if (HAS_PCH_SPLIT(dev_priv))
1438                 return ilk_aux_ctl_reg(dev_priv, port);
1439         else
1440                 return g4x_aux_ctl_reg(dev_priv, port);
1441 }
1442
1443 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1444                                      enum port port, int index)
1445 {
1446         if (INTEL_INFO(dev_priv)->gen >= 9)
1447                 return skl_aux_data_reg(dev_priv, port, index);
1448         else if (HAS_PCH_SPLIT(dev_priv))
1449                 return ilk_aux_data_reg(dev_priv, port, index);
1450         else
1451                 return g4x_aux_data_reg(dev_priv, port, index);
1452 }
1453
1454 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1455 {
1456         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1457         enum port port = intel_aux_port(dev_priv,
1458                                         dp_to_dig_port(intel_dp)->base.port);
1459         int i;
1460
1461         intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1462         for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1463                 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1464 }
1465
1466 static void
1467 intel_dp_aux_fini(struct intel_dp *intel_dp)
1468 {
1469         kfree(intel_dp->aux.name);
1470 }
1471
1472 static void
1473 intel_dp_aux_init(struct intel_dp *intel_dp)
1474 {
1475         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1476         enum port port = intel_dig_port->base.port;
1477
1478         intel_aux_reg_init(intel_dp);
1479         drm_dp_aux_init(&intel_dp->aux);
1480
1481         /* Failure to allocate our preferred name is not critical */
1482         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1483         intel_dp->aux.transfer = intel_dp_aux_transfer;
1484 }
1485
1486 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1487 {
1488         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1489
1490         return max_rate >= 540000;
1491 }
1492
1493 static void
1494 intel_dp_set_clock(struct intel_encoder *encoder,
1495                    struct intel_crtc_state *pipe_config)
1496 {
1497         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1498         const struct dp_link_dpll *divisor = NULL;
1499         int i, count = 0;
1500
1501         if (IS_G4X(dev_priv)) {
1502                 divisor = gen4_dpll;
1503                 count = ARRAY_SIZE(gen4_dpll);
1504         } else if (HAS_PCH_SPLIT(dev_priv)) {
1505                 divisor = pch_dpll;
1506                 count = ARRAY_SIZE(pch_dpll);
1507         } else if (IS_CHERRYVIEW(dev_priv)) {
1508                 divisor = chv_dpll;
1509                 count = ARRAY_SIZE(chv_dpll);
1510         } else if (IS_VALLEYVIEW(dev_priv)) {
1511                 divisor = vlv_dpll;
1512                 count = ARRAY_SIZE(vlv_dpll);
1513         }
1514
1515         if (divisor && count) {
1516                 for (i = 0; i < count; i++) {
1517                         if (pipe_config->port_clock == divisor[i].clock) {
1518                                 pipe_config->dpll = divisor[i].dpll;
1519                                 pipe_config->clock_set = true;
1520                                 break;
1521                         }
1522                 }
1523         }
1524 }
1525
1526 static void snprintf_int_array(char *str, size_t len,
1527                                const int *array, int nelem)
1528 {
1529         int i;
1530
1531         str[0] = '\0';
1532
1533         for (i = 0; i < nelem; i++) {
1534                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1535                 if (r >= len)
1536                         return;
1537                 str += r;
1538                 len -= r;
1539         }
1540 }
1541
1542 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1543 {
1544         char str[128]; /* FIXME: too big for stack? */
1545
1546         if ((drm_debug & DRM_UT_KMS) == 0)
1547                 return;
1548
1549         snprintf_int_array(str, sizeof(str),
1550                            intel_dp->source_rates, intel_dp->num_source_rates);
1551         DRM_DEBUG_KMS("source rates: %s\n", str);
1552
1553         snprintf_int_array(str, sizeof(str),
1554                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1555         DRM_DEBUG_KMS("sink rates: %s\n", str);
1556
1557         snprintf_int_array(str, sizeof(str),
1558                            intel_dp->common_rates, intel_dp->num_common_rates);
1559         DRM_DEBUG_KMS("common rates: %s\n", str);
1560 }
1561
1562 int
1563 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1564 {
1565         int len;
1566
1567         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1568         if (WARN_ON(len <= 0))
1569                 return 162000;
1570
1571         return intel_dp->common_rates[len - 1];
1572 }
1573
1574 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1575 {
1576         int i = intel_dp_rate_index(intel_dp->sink_rates,
1577                                     intel_dp->num_sink_rates, rate);
1578
1579         if (WARN_ON(i < 0))
1580                 i = 0;
1581
1582         return i;
1583 }
1584
1585 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1586                            uint8_t *link_bw, uint8_t *rate_select)
1587 {
1588         /* eDP 1.4 rate select method. */
1589         if (intel_dp->use_rate_select) {
1590                 *link_bw = 0;
1591                 *rate_select =
1592                         intel_dp_rate_select(intel_dp, port_clock);
1593         } else {
1594                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1595                 *rate_select = 0;
1596         }
1597 }
1598
1599 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1600                                 struct intel_crtc_state *pipe_config)
1601 {
1602         int bpp, bpc;
1603
1604         bpp = pipe_config->pipe_bpp;
1605         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1606
1607         if (bpc > 0)
1608                 bpp = min(bpp, 3*bpc);
1609
1610         /* For DP Compliance we override the computed bpp for the pipe */
1611         if (intel_dp->compliance.test_data.bpc != 0) {
1612                 pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
1613                 pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
1614                 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
1615                               pipe_config->pipe_bpp);
1616         }
1617         return bpp;
1618 }
1619
1620 static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
1621                                        struct drm_display_mode *m2)
1622 {
1623         bool bres = false;
1624
1625         if (m1 && m2)
1626                 bres = (m1->hdisplay == m2->hdisplay &&
1627                         m1->hsync_start == m2->hsync_start &&
1628                         m1->hsync_end == m2->hsync_end &&
1629                         m1->htotal == m2->htotal &&
1630                         m1->vdisplay == m2->vdisplay &&
1631                         m1->vsync_start == m2->vsync_start &&
1632                         m1->vsync_end == m2->vsync_end &&
1633                         m1->vtotal == m2->vtotal);
1634         return bres;
1635 }
1636
1637 bool
1638 intel_dp_compute_config(struct intel_encoder *encoder,
1639                         struct intel_crtc_state *pipe_config,
1640                         struct drm_connector_state *conn_state)
1641 {
1642         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1643         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1644         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1645         enum port port = encoder->port;
1646         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1647         struct intel_connector *intel_connector = intel_dp->attached_connector;
1648         struct intel_digital_connector_state *intel_conn_state =
1649                 to_intel_digital_connector_state(conn_state);
1650         int lane_count, clock;
1651         int min_lane_count = 1;
1652         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1653         /* Conveniently, the link BW constants become indices with a shift...*/
1654         int min_clock = 0;
1655         int max_clock;
1656         int bpp, mode_rate;
1657         int link_avail, link_clock;
1658         int common_len;
1659         uint8_t link_bw, rate_select;
1660         bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
1661                                            DP_DPCD_QUIRK_LIMITED_M_N);
1662
1663         common_len = intel_dp_common_len_rate_limit(intel_dp,
1664                                                     intel_dp->max_link_rate);
1665
1666         /* No common link rates between source and sink */
1667         WARN_ON(common_len <= 0);
1668
1669         max_clock = common_len - 1;
1670
1671         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1672                 pipe_config->has_pch_encoder = true;
1673
1674         pipe_config->has_drrs = false;
1675         if (IS_G4X(dev_priv) || port == PORT_A)
1676                 pipe_config->has_audio = false;
1677         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1678                 pipe_config->has_audio = intel_dp->has_audio;
1679         else
1680                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
1681
1682         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1683                 struct drm_display_mode *panel_mode =
1684                         intel_connector->panel.alt_fixed_mode;
1685                 struct drm_display_mode *req_mode = &pipe_config->base.mode;
1686
1687                 if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
1688                         panel_mode = intel_connector->panel.fixed_mode;
1689
1690                 drm_mode_debug_printmodeline(panel_mode);
1691
1692                 intel_fixed_panel_mode(panel_mode, adjusted_mode);
1693
1694                 if (INTEL_GEN(dev_priv) >= 9) {
1695                         int ret;
1696                         ret = skl_update_scaler_crtc(pipe_config);
1697                         if (ret)
1698                                 return ret;
1699                 }
1700
1701                 if (HAS_GMCH_DISPLAY(dev_priv))
1702                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1703                                                  conn_state->scaling_mode);
1704                 else
1705                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1706                                                 conn_state->scaling_mode);
1707         }
1708
1709         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1710             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
1711                 return false;
1712
1713         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1714                 return false;
1715
1716         /* Use values requested by Compliance Test Request */
1717         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1718                 int index;
1719
1720                 /* Validate the compliance test data since max values
1721                  * might have changed due to link train fallback.
1722                  */
1723                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1724                                                intel_dp->compliance.test_lane_count)) {
1725                         index = intel_dp_rate_index(intel_dp->common_rates,
1726                                                     intel_dp->num_common_rates,
1727                                                     intel_dp->compliance.test_link_rate);
1728                         if (index >= 0)
1729                                 min_clock = max_clock = index;
1730                         min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
1731                 }
1732         }
1733         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1734                       "max bw %d pixel clock %iKHz\n",
1735                       max_lane_count, intel_dp->common_rates[max_clock],
1736                       adjusted_mode->crtc_clock);
1737
1738         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1739          * bpc in between. */
1740         bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1741         if (intel_dp_is_edp(intel_dp)) {
1742
1743                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1744                 if (intel_connector->base.display_info.bpc == 0 &&
1745                         (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1746                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1747                                       dev_priv->vbt.edp.bpp);
1748                         bpp = dev_priv->vbt.edp.bpp;
1749                 }
1750
1751                 /*
1752                  * Use the maximum clock and number of lanes the eDP panel
1753                  * advertizes being capable of. The panels are generally
1754                  * designed to support only a single clock and lane
1755                  * configuration, and typically these values correspond to the
1756                  * native resolution of the panel.
1757                  */
1758                 min_lane_count = max_lane_count;
1759                 min_clock = max_clock;
1760         }
1761
1762         for (; bpp >= 6*3; bpp -= 2*3) {
1763                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1764                                                    bpp);
1765
1766                 for (clock = min_clock; clock <= max_clock; clock++) {
1767                         for (lane_count = min_lane_count;
1768                                 lane_count <= max_lane_count;
1769                                 lane_count <<= 1) {
1770
1771                                 link_clock = intel_dp->common_rates[clock];
1772                                 link_avail = intel_dp_max_data_rate(link_clock,
1773                                                                     lane_count);
1774
1775                                 if (mode_rate <= link_avail) {
1776                                         goto found;
1777                                 }
1778                         }
1779                 }
1780         }
1781
1782         return false;
1783
1784 found:
1785         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1786                 /*
1787                  * See:
1788                  * CEA-861-E - 5.1 Default Encoding Parameters
1789                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1790                  */
1791                 pipe_config->limited_color_range =
1792                         bpp != 18 &&
1793                         drm_default_rgb_quant_range(adjusted_mode) ==
1794                         HDMI_QUANTIZATION_RANGE_LIMITED;
1795         } else {
1796                 pipe_config->limited_color_range =
1797                         intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
1798         }
1799
1800         pipe_config->lane_count = lane_count;
1801
1802         pipe_config->pipe_bpp = bpp;
1803         pipe_config->port_clock = intel_dp->common_rates[clock];
1804
1805         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1806                               &link_bw, &rate_select);
1807
1808         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1809                       link_bw, rate_select, pipe_config->lane_count,
1810                       pipe_config->port_clock, bpp);
1811         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1812                       mode_rate, link_avail);
1813
1814         intel_link_compute_m_n(bpp, lane_count,
1815                                adjusted_mode->crtc_clock,
1816                                pipe_config->port_clock,
1817                                &pipe_config->dp_m_n,
1818                                reduce_m_n);
1819
1820         if (intel_connector->panel.downclock_mode != NULL &&
1821                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1822                         pipe_config->has_drrs = true;
1823                         intel_link_compute_m_n(bpp, lane_count,
1824                                 intel_connector->panel.downclock_mode->clock,
1825                                 pipe_config->port_clock,
1826                                 &pipe_config->dp_m2_n2,
1827                                 reduce_m_n);
1828         }
1829
1830         /*
1831          * DPLL0 VCO may need to be adjusted to get the correct
1832          * clock for eDP. This will affect cdclk as well.
1833          */
1834         if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
1835                 int vco;
1836
1837                 switch (pipe_config->port_clock / 2) {
1838                 case 108000:
1839                 case 216000:
1840                         vco = 8640000;
1841                         break;
1842                 default:
1843                         vco = 8100000;
1844                         break;
1845                 }
1846
1847                 to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
1848         }
1849
1850         if (!HAS_DDI(dev_priv))
1851                 intel_dp_set_clock(encoder, pipe_config);
1852
1853         intel_psr_compute_config(intel_dp, pipe_config);
1854
1855         return true;
1856 }
1857
1858 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1859                               int link_rate, uint8_t lane_count,
1860                               bool link_mst)
1861 {
1862         intel_dp->link_rate = link_rate;
1863         intel_dp->lane_count = lane_count;
1864         intel_dp->link_mst = link_mst;
1865 }
1866
1867 static void intel_dp_prepare(struct intel_encoder *encoder,
1868                              const struct intel_crtc_state *pipe_config)
1869 {
1870         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1871         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1872         enum port port = encoder->port;
1873         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
1874         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1875
1876         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1877                                  pipe_config->lane_count,
1878                                  intel_crtc_has_type(pipe_config,
1879                                                      INTEL_OUTPUT_DP_MST));
1880
1881         /*
1882          * There are four kinds of DP registers:
1883          *
1884          *      IBX PCH
1885          *      SNB CPU
1886          *      IVB CPU
1887          *      CPT PCH
1888          *
1889          * IBX PCH and CPU are the same for almost everything,
1890          * except that the CPU DP PLL is configured in this
1891          * register
1892          *
1893          * CPT PCH is quite different, having many bits moved
1894          * to the TRANS_DP_CTL register instead. That
1895          * configuration happens (oddly) in ironlake_pch_enable
1896          */
1897
1898         /* Preserve the BIOS-computed detected bit. This is
1899          * supposed to be read-only.
1900          */
1901         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1902
1903         /* Handle DP bits in common between all three register formats */
1904         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1905         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1906
1907         /* Split out the IBX/CPU vs CPT settings */
1908
1909         if (IS_GEN7(dev_priv) && port == PORT_A) {
1910                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1911                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1912                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1913                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1914                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1915
1916                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1917                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1918
1919                 intel_dp->DP |= crtc->pipe << 29;
1920         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1921                 u32 trans_dp;
1922
1923                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1924
1925                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1926                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1927                         trans_dp |= TRANS_DP_ENH_FRAMING;
1928                 else
1929                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1930                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1931         } else {
1932                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1933                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1934
1935                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1936                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1937                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1938                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1939                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1940
1941                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1942                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1943
1944                 if (IS_CHERRYVIEW(dev_priv))
1945                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1946                 else if (crtc->pipe == PIPE_B)
1947                         intel_dp->DP |= DP_PIPEB_SELECT;
1948         }
1949 }
1950
1951 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1952 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1953
1954 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1955 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1956
1957 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1958 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1959
1960 static void intel_pps_verify_state(struct intel_dp *intel_dp);
1961
1962 static void wait_panel_status(struct intel_dp *intel_dp,
1963                                        u32 mask,
1964                                        u32 value)
1965 {
1966         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1967         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1968
1969         lockdep_assert_held(&dev_priv->pps_mutex);
1970
1971         intel_pps_verify_state(intel_dp);
1972
1973         pp_stat_reg = _pp_stat_reg(intel_dp);
1974         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1975
1976         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1977                         mask, value,
1978                         I915_READ(pp_stat_reg),
1979                         I915_READ(pp_ctrl_reg));
1980
1981         if (intel_wait_for_register(dev_priv,
1982                                     pp_stat_reg, mask, value,
1983                                     5000))
1984                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1985                                 I915_READ(pp_stat_reg),
1986                                 I915_READ(pp_ctrl_reg));
1987
1988         DRM_DEBUG_KMS("Wait complete\n");
1989 }
1990
1991 static void wait_panel_on(struct intel_dp *intel_dp)
1992 {
1993         DRM_DEBUG_KMS("Wait for panel power on\n");
1994         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1995 }
1996
1997 static void wait_panel_off(struct intel_dp *intel_dp)
1998 {
1999         DRM_DEBUG_KMS("Wait for panel power off time\n");
2000         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2001 }
2002
2003 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2004 {
2005         ktime_t panel_power_on_time;
2006         s64 panel_power_off_duration;
2007
2008         DRM_DEBUG_KMS("Wait for panel power cycle\n");
2009
2010         /* take the difference of currrent time and panel power off time
2011          * and then make panel wait for t11_t12 if needed. */
2012         panel_power_on_time = ktime_get_boottime();
2013         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2014
2015         /* When we disable the VDD override bit last we have to do the manual
2016          * wait. */
2017         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2018                 wait_remaining_ms_from_jiffies(jiffies,
2019                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2020
2021         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2022 }
2023
2024 static void wait_backlight_on(struct intel_dp *intel_dp)
2025 {
2026         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2027                                        intel_dp->backlight_on_delay);
2028 }
2029
2030 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2031 {
2032         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2033                                        intel_dp->backlight_off_delay);
2034 }
2035
2036 /* Read the current pp_control value, unlocking the register if it
2037  * is locked
2038  */
2039
2040 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2041 {
2042         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2043         u32 control;
2044
2045         lockdep_assert_held(&dev_priv->pps_mutex);
2046
2047         control = I915_READ(_pp_ctrl_reg(intel_dp));
2048         if (WARN_ON(!HAS_DDI(dev_priv) &&
2049                     (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2050                 control &= ~PANEL_UNLOCK_MASK;
2051                 control |= PANEL_UNLOCK_REGS;
2052         }
2053         return control;
2054 }
2055
2056 /*
2057  * Must be paired with edp_panel_vdd_off().
2058  * Must hold pps_mutex around the whole on/off sequence.
2059  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2060  */
2061 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2062 {
2063         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2064         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2065         u32 pp;
2066         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2067         bool need_to_disable = !intel_dp->want_panel_vdd;
2068
2069         lockdep_assert_held(&dev_priv->pps_mutex);
2070
2071         if (!intel_dp_is_edp(intel_dp))
2072                 return false;
2073
2074         cancel_delayed_work(&intel_dp->panel_vdd_work);
2075         intel_dp->want_panel_vdd = true;
2076
2077         if (edp_have_panel_vdd(intel_dp))
2078                 return need_to_disable;
2079
2080         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
2081
2082         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2083                       port_name(intel_dig_port->base.port));
2084
2085         if (!edp_have_panel_power(intel_dp))
2086                 wait_panel_power_cycle(intel_dp);
2087
2088         pp = ironlake_get_pp_control(intel_dp);
2089         pp |= EDP_FORCE_VDD;
2090
2091         pp_stat_reg = _pp_stat_reg(intel_dp);
2092         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2093
2094         I915_WRITE(pp_ctrl_reg, pp);
2095         POSTING_READ(pp_ctrl_reg);
2096         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2097                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2098         /*
2099          * If the panel wasn't on, delay before accessing aux channel
2100          */
2101         if (!edp_have_panel_power(intel_dp)) {
2102                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2103                               port_name(intel_dig_port->base.port));
2104                 msleep(intel_dp->panel_power_up_delay);
2105         }
2106
2107         return need_to_disable;
2108 }
2109
2110 /*
2111  * Must be paired with intel_edp_panel_vdd_off() or
2112  * intel_edp_panel_off().
2113  * Nested calls to these functions are not allowed since
2114  * we drop the lock. Caller must use some higher level
2115  * locking to prevent nested calls from other threads.
2116  */
2117 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2118 {
2119         bool vdd;
2120
2121         if (!intel_dp_is_edp(intel_dp))
2122                 return;
2123
2124         pps_lock(intel_dp);
2125         vdd = edp_panel_vdd_on(intel_dp);
2126         pps_unlock(intel_dp);
2127
2128         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2129              port_name(dp_to_dig_port(intel_dp)->base.port));
2130 }
2131
2132 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2133 {
2134         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2135         struct intel_digital_port *intel_dig_port =
2136                 dp_to_dig_port(intel_dp);
2137         u32 pp;
2138         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2139
2140         lockdep_assert_held(&dev_priv->pps_mutex);
2141
2142         WARN_ON(intel_dp->want_panel_vdd);
2143
2144         if (!edp_have_panel_vdd(intel_dp))
2145                 return;
2146
2147         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2148                       port_name(intel_dig_port->base.port));
2149
2150         pp = ironlake_get_pp_control(intel_dp);
2151         pp &= ~EDP_FORCE_VDD;
2152
2153         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2154         pp_stat_reg = _pp_stat_reg(intel_dp);
2155
2156         I915_WRITE(pp_ctrl_reg, pp);
2157         POSTING_READ(pp_ctrl_reg);
2158
2159         /* Make sure sequencer is idle before allowing subsequent activity */
2160         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2161         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2162
2163         if ((pp & PANEL_POWER_ON) == 0)
2164                 intel_dp->panel_power_off_time = ktime_get_boottime();
2165
2166         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2167 }
2168
2169 static void edp_panel_vdd_work(struct work_struct *__work)
2170 {
2171         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2172                                                  struct intel_dp, panel_vdd_work);
2173
2174         pps_lock(intel_dp);
2175         if (!intel_dp->want_panel_vdd)
2176                 edp_panel_vdd_off_sync(intel_dp);
2177         pps_unlock(intel_dp);
2178 }
2179
2180 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2181 {
2182         unsigned long delay;
2183
2184         /*
2185          * Queue the timer to fire a long time from now (relative to the power
2186          * down delay) to keep the panel power up across a sequence of
2187          * operations.
2188          */
2189         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2190         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2191 }
2192
2193 /*
2194  * Must be paired with edp_panel_vdd_on().
2195  * Must hold pps_mutex around the whole on/off sequence.
2196  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2197  */
2198 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2199 {
2200         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2201
2202         lockdep_assert_held(&dev_priv->pps_mutex);
2203
2204         if (!intel_dp_is_edp(intel_dp))
2205                 return;
2206
2207         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2208              port_name(dp_to_dig_port(intel_dp)->base.port));
2209
2210         intel_dp->want_panel_vdd = false;
2211
2212         if (sync)
2213                 edp_panel_vdd_off_sync(intel_dp);
2214         else
2215                 edp_panel_vdd_schedule_off(intel_dp);
2216 }
2217
2218 static void edp_panel_on(struct intel_dp *intel_dp)
2219 {
2220         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2221         u32 pp;
2222         i915_reg_t pp_ctrl_reg;
2223
2224         lockdep_assert_held(&dev_priv->pps_mutex);
2225
2226         if (!intel_dp_is_edp(intel_dp))
2227                 return;
2228
2229         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2230                       port_name(dp_to_dig_port(intel_dp)->base.port));
2231
2232         if (WARN(edp_have_panel_power(intel_dp),
2233                  "eDP port %c panel power already on\n",
2234                  port_name(dp_to_dig_port(intel_dp)->base.port)))
2235                 return;
2236
2237         wait_panel_power_cycle(intel_dp);
2238
2239         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2240         pp = ironlake_get_pp_control(intel_dp);
2241         if (IS_GEN5(dev_priv)) {
2242                 /* ILK workaround: disable reset around power sequence */
2243                 pp &= ~PANEL_POWER_RESET;
2244                 I915_WRITE(pp_ctrl_reg, pp);
2245                 POSTING_READ(pp_ctrl_reg);
2246         }
2247
2248         pp |= PANEL_POWER_ON;
2249         if (!IS_GEN5(dev_priv))
2250                 pp |= PANEL_POWER_RESET;
2251
2252         I915_WRITE(pp_ctrl_reg, pp);
2253         POSTING_READ(pp_ctrl_reg);
2254
2255         wait_panel_on(intel_dp);
2256         intel_dp->last_power_on = jiffies;
2257
2258         if (IS_GEN5(dev_priv)) {
2259                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2260                 I915_WRITE(pp_ctrl_reg, pp);
2261                 POSTING_READ(pp_ctrl_reg);
2262         }
2263 }
2264
2265 void intel_edp_panel_on(struct intel_dp *intel_dp)
2266 {
2267         if (!intel_dp_is_edp(intel_dp))
2268                 return;
2269
2270         pps_lock(intel_dp);
2271         edp_panel_on(intel_dp);
2272         pps_unlock(intel_dp);
2273 }
2274
2275
2276 static void edp_panel_off(struct intel_dp *intel_dp)
2277 {
2278         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2279         u32 pp;
2280         i915_reg_t pp_ctrl_reg;
2281
2282         lockdep_assert_held(&dev_priv->pps_mutex);
2283
2284         if (!intel_dp_is_edp(intel_dp))
2285                 return;
2286
2287         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2288                       port_name(dp_to_dig_port(intel_dp)->base.port));
2289
2290         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2291              port_name(dp_to_dig_port(intel_dp)->base.port));
2292
2293         pp = ironlake_get_pp_control(intel_dp);
2294         /* We need to switch off panel power _and_ force vdd, for otherwise some
2295          * panels get very unhappy and cease to work. */
2296         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2297                 EDP_BLC_ENABLE);
2298
2299         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2300
2301         intel_dp->want_panel_vdd = false;
2302
2303         I915_WRITE(pp_ctrl_reg, pp);
2304         POSTING_READ(pp_ctrl_reg);
2305
2306         wait_panel_off(intel_dp);
2307         intel_dp->panel_power_off_time = ktime_get_boottime();
2308
2309         /* We got a reference when we enabled the VDD. */
2310         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2311 }
2312
2313 void intel_edp_panel_off(struct intel_dp *intel_dp)
2314 {
2315         if (!intel_dp_is_edp(intel_dp))
2316                 return;
2317
2318         pps_lock(intel_dp);
2319         edp_panel_off(intel_dp);
2320         pps_unlock(intel_dp);
2321 }
2322
2323 /* Enable backlight in the panel power control. */
2324 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2325 {
2326         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2327         u32 pp;
2328         i915_reg_t pp_ctrl_reg;
2329
2330         /*
2331          * If we enable the backlight right away following a panel power
2332          * on, we may see slight flicker as the panel syncs with the eDP
2333          * link.  So delay a bit to make sure the image is solid before
2334          * allowing it to appear.
2335          */
2336         wait_backlight_on(intel_dp);
2337
2338         pps_lock(intel_dp);
2339
2340         pp = ironlake_get_pp_control(intel_dp);
2341         pp |= EDP_BLC_ENABLE;
2342
2343         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2344
2345         I915_WRITE(pp_ctrl_reg, pp);
2346         POSTING_READ(pp_ctrl_reg);
2347
2348         pps_unlock(intel_dp);
2349 }
2350
2351 /* Enable backlight PWM and backlight PP control. */
2352 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2353                             const struct drm_connector_state *conn_state)
2354 {
2355         struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2356
2357         if (!intel_dp_is_edp(intel_dp))
2358                 return;
2359
2360         DRM_DEBUG_KMS("\n");
2361
2362         intel_panel_enable_backlight(crtc_state, conn_state);
2363         _intel_edp_backlight_on(intel_dp);
2364 }
2365
2366 /* Disable backlight in the panel power control. */
2367 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2368 {
2369         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2370         u32 pp;
2371         i915_reg_t pp_ctrl_reg;
2372
2373         if (!intel_dp_is_edp(intel_dp))
2374                 return;
2375
2376         pps_lock(intel_dp);
2377
2378         pp = ironlake_get_pp_control(intel_dp);
2379         pp &= ~EDP_BLC_ENABLE;
2380
2381         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2382
2383         I915_WRITE(pp_ctrl_reg, pp);
2384         POSTING_READ(pp_ctrl_reg);
2385
2386         pps_unlock(intel_dp);
2387
2388         intel_dp->last_backlight_off = jiffies;
2389         edp_wait_backlight_off(intel_dp);
2390 }
2391
2392 /* Disable backlight PP control and backlight PWM. */
2393 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2394 {
2395         struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2396
2397         if (!intel_dp_is_edp(intel_dp))
2398                 return;
2399
2400         DRM_DEBUG_KMS("\n");
2401
2402         _intel_edp_backlight_off(intel_dp);
2403         intel_panel_disable_backlight(old_conn_state);
2404 }
2405
2406 /*
2407  * Hook for controlling the panel power control backlight through the bl_power
2408  * sysfs attribute. Take care to handle multiple calls.
2409  */
2410 static void intel_edp_backlight_power(struct intel_connector *connector,
2411                                       bool enable)
2412 {
2413         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2414         bool is_enabled;
2415
2416         pps_lock(intel_dp);
2417         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2418         pps_unlock(intel_dp);
2419
2420         if (is_enabled == enable)
2421                 return;
2422
2423         DRM_DEBUG_KMS("panel power control backlight %s\n",
2424                       enable ? "enable" : "disable");
2425
2426         if (enable)
2427                 _intel_edp_backlight_on(intel_dp);
2428         else
2429                 _intel_edp_backlight_off(intel_dp);
2430 }
2431
2432 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2433 {
2434         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2435         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2436         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2437
2438         I915_STATE_WARN(cur_state != state,
2439                         "DP port %c state assertion failure (expected %s, current %s)\n",
2440                         port_name(dig_port->base.port),
2441                         onoff(state), onoff(cur_state));
2442 }
2443 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2444
2445 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2446 {
2447         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2448
2449         I915_STATE_WARN(cur_state != state,
2450                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2451                         onoff(state), onoff(cur_state));
2452 }
2453 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2454 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2455
2456 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2457                                 const struct intel_crtc_state *pipe_config)
2458 {
2459         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2461
2462         assert_pipe_disabled(dev_priv, crtc->pipe);
2463         assert_dp_port_disabled(intel_dp);
2464         assert_edp_pll_disabled(dev_priv);
2465
2466         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2467                       pipe_config->port_clock);
2468
2469         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2470
2471         if (pipe_config->port_clock == 162000)
2472                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2473         else
2474                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2475
2476         I915_WRITE(DP_A, intel_dp->DP);
2477         POSTING_READ(DP_A);
2478         udelay(500);
2479
2480         /*
2481          * [DevILK] Work around required when enabling DP PLL
2482          * while a pipe is enabled going to FDI:
2483          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2484          * 2. Program DP PLL enable
2485          */
2486         if (IS_GEN5(dev_priv))
2487                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2488
2489         intel_dp->DP |= DP_PLL_ENABLE;
2490
2491         I915_WRITE(DP_A, intel_dp->DP);
2492         POSTING_READ(DP_A);
2493         udelay(200);
2494 }
2495
2496 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2497                                  const struct intel_crtc_state *old_crtc_state)
2498 {
2499         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2500         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2501
2502         assert_pipe_disabled(dev_priv, crtc->pipe);
2503         assert_dp_port_disabled(intel_dp);
2504         assert_edp_pll_enabled(dev_priv);
2505
2506         DRM_DEBUG_KMS("disabling eDP PLL\n");
2507
2508         intel_dp->DP &= ~DP_PLL_ENABLE;
2509
2510         I915_WRITE(DP_A, intel_dp->DP);
2511         POSTING_READ(DP_A);
2512         udelay(200);
2513 }
2514
2515 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2516 {
2517         /*
2518          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2519          * be capable of signalling downstream hpd with a long pulse.
2520          * Whether or not that means D3 is safe to use is not clear,
2521          * but let's assume so until proven otherwise.
2522          *
2523          * FIXME should really check all downstream ports...
2524          */
2525         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2526                 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2527                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2528 }
2529
2530 /* If the sink supports it, try to set the power state appropriately */
2531 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2532 {
2533         int ret, i;
2534
2535         /* Should have a valid DPCD by this point */
2536         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2537                 return;
2538
2539         if (mode != DRM_MODE_DPMS_ON) {
2540                 if (downstream_hpd_needs_d0(intel_dp))
2541                         return;
2542
2543                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2544                                          DP_SET_POWER_D3);
2545         } else {
2546                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2547
2548                 /*
2549                  * When turning on, we need to retry for 1ms to give the sink
2550                  * time to wake up.
2551                  */
2552                 for (i = 0; i < 3; i++) {
2553                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2554                                                  DP_SET_POWER_D0);
2555                         if (ret == 1)
2556                                 break;
2557                         msleep(1);
2558                 }
2559
2560                 if (ret == 1 && lspcon->active)
2561                         lspcon_wait_pcon_mode(lspcon);
2562         }
2563
2564         if (ret != 1)
2565                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2566                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2567 }
2568
2569 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2570                                   enum pipe *pipe)
2571 {
2572         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2573         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2574         enum port port = encoder->port;
2575         u32 tmp;
2576         bool ret;
2577
2578         if (!intel_display_power_get_if_enabled(dev_priv,
2579                                                 encoder->power_domain))
2580                 return false;
2581
2582         ret = false;
2583
2584         tmp = I915_READ(intel_dp->output_reg);
2585
2586         if (!(tmp & DP_PORT_EN))
2587                 goto out;
2588
2589         if (IS_GEN7(dev_priv) && port == PORT_A) {
2590                 *pipe = PORT_TO_PIPE_CPT(tmp);
2591         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2592                 enum pipe p;
2593
2594                 for_each_pipe(dev_priv, p) {
2595                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2596                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2597                                 *pipe = p;
2598                                 ret = true;
2599
2600                                 goto out;
2601                         }
2602                 }
2603
2604                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2605                               i915_mmio_reg_offset(intel_dp->output_reg));
2606         } else if (IS_CHERRYVIEW(dev_priv)) {
2607                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2608         } else {
2609                 *pipe = PORT_TO_PIPE(tmp);
2610         }
2611
2612         ret = true;
2613
2614 out:
2615         intel_display_power_put(dev_priv, encoder->power_domain);
2616
2617         return ret;
2618 }
2619
2620 static void intel_dp_get_config(struct intel_encoder *encoder,
2621                                 struct intel_crtc_state *pipe_config)
2622 {
2623         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2624         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2625         u32 tmp, flags = 0;
2626         enum port port = encoder->port;
2627         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2628
2629         if (encoder->type == INTEL_OUTPUT_EDP)
2630                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
2631         else
2632                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
2633
2634         tmp = I915_READ(intel_dp->output_reg);
2635
2636         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2637
2638         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2639                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2640
2641                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2642                         flags |= DRM_MODE_FLAG_PHSYNC;
2643                 else
2644                         flags |= DRM_MODE_FLAG_NHSYNC;
2645
2646                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2647                         flags |= DRM_MODE_FLAG_PVSYNC;
2648                 else
2649                         flags |= DRM_MODE_FLAG_NVSYNC;
2650         } else {
2651                 if (tmp & DP_SYNC_HS_HIGH)
2652                         flags |= DRM_MODE_FLAG_PHSYNC;
2653                 else
2654                         flags |= DRM_MODE_FLAG_NHSYNC;
2655
2656                 if (tmp & DP_SYNC_VS_HIGH)
2657                         flags |= DRM_MODE_FLAG_PVSYNC;
2658                 else
2659                         flags |= DRM_MODE_FLAG_NVSYNC;
2660         }
2661
2662         pipe_config->base.adjusted_mode.flags |= flags;
2663
2664         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2665                 pipe_config->limited_color_range = true;
2666
2667         pipe_config->lane_count =
2668                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2669
2670         intel_dp_get_m_n(crtc, pipe_config);
2671
2672         if (port == PORT_A) {
2673                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2674                         pipe_config->port_clock = 162000;
2675                 else
2676                         pipe_config->port_clock = 270000;
2677         }
2678
2679         pipe_config->base.adjusted_mode.crtc_clock =
2680                 intel_dotclock_calculate(pipe_config->port_clock,
2681                                          &pipe_config->dp_m_n);
2682
2683         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2684             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2685                 /*
2686                  * This is a big fat ugly hack.
2687                  *
2688                  * Some machines in UEFI boot mode provide us a VBT that has 18
2689                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2690                  * unknown we fail to light up. Yet the same BIOS boots up with
2691                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2692                  * max, not what it tells us to use.
2693                  *
2694                  * Note: This will still be broken if the eDP panel is not lit
2695                  * up by the BIOS, and thus we can't get the mode at module
2696                  * load.
2697                  */
2698                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2699                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2700                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2701         }
2702 }
2703
2704 static void intel_disable_dp(struct intel_encoder *encoder,
2705                              const struct intel_crtc_state *old_crtc_state,
2706                              const struct drm_connector_state *old_conn_state)
2707 {
2708         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2709
2710         if (old_crtc_state->has_audio)
2711                 intel_audio_codec_disable(encoder,
2712                                           old_crtc_state, old_conn_state);
2713
2714         /* Make sure the panel is off before trying to change the mode. But also
2715          * ensure that we have vdd while we switch off the panel. */
2716         intel_edp_panel_vdd_on(intel_dp);
2717         intel_edp_backlight_off(old_conn_state);
2718         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2719         intel_edp_panel_off(intel_dp);
2720 }
2721
2722 static void g4x_disable_dp(struct intel_encoder *encoder,
2723                            const struct intel_crtc_state *old_crtc_state,
2724                            const struct drm_connector_state *old_conn_state)
2725 {
2726         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2727
2728         /* disable the port before the pipe on g4x */
2729         intel_dp_link_down(encoder, old_crtc_state);
2730 }
2731
2732 static void ilk_disable_dp(struct intel_encoder *encoder,
2733                            const struct intel_crtc_state *old_crtc_state,
2734                            const struct drm_connector_state *old_conn_state)
2735 {
2736         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2737 }
2738
2739 static void vlv_disable_dp(struct intel_encoder *encoder,
2740                            const struct intel_crtc_state *old_crtc_state,
2741                            const struct drm_connector_state *old_conn_state)
2742 {
2743         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2744
2745         intel_psr_disable(intel_dp, old_crtc_state);
2746
2747         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2748 }
2749
2750 static void ilk_post_disable_dp(struct intel_encoder *encoder,
2751                                 const struct intel_crtc_state *old_crtc_state,
2752                                 const struct drm_connector_state *old_conn_state)
2753 {
2754         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2755         enum port port = encoder->port;
2756
2757         intel_dp_link_down(encoder, old_crtc_state);
2758
2759         /* Only ilk+ has port A */
2760         if (port == PORT_A)
2761                 ironlake_edp_pll_off(intel_dp, old_crtc_state);
2762 }
2763
2764 static void vlv_post_disable_dp(struct intel_encoder *encoder,
2765                                 const struct intel_crtc_state *old_crtc_state,
2766                                 const struct drm_connector_state *old_conn_state)
2767 {
2768         intel_dp_link_down(encoder, old_crtc_state);
2769 }
2770
2771 static void chv_post_disable_dp(struct intel_encoder *encoder,
2772                                 const struct intel_crtc_state *old_crtc_state,
2773                                 const struct drm_connector_state *old_conn_state)
2774 {
2775         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2776
2777         intel_dp_link_down(encoder, old_crtc_state);
2778
2779         mutex_lock(&dev_priv->sb_lock);
2780
2781         /* Assert data lane reset */
2782         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
2783
2784         mutex_unlock(&dev_priv->sb_lock);
2785 }
2786
2787 static void
2788 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2789                          uint32_t *DP,
2790                          uint8_t dp_train_pat)
2791 {
2792         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2793         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2794         enum port port = intel_dig_port->base.port;
2795
2796         if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2797                 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2798                               dp_train_pat & DP_TRAINING_PATTERN_MASK);
2799
2800         if (HAS_DDI(dev_priv)) {
2801                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2802
2803                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2804                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2805                 else
2806                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2807
2808                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2809                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2810                 case DP_TRAINING_PATTERN_DISABLE:
2811                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2812
2813                         break;
2814                 case DP_TRAINING_PATTERN_1:
2815                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2816                         break;
2817                 case DP_TRAINING_PATTERN_2:
2818                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2819                         break;
2820                 case DP_TRAINING_PATTERN_3:
2821                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2822                         break;
2823                 }
2824                 I915_WRITE(DP_TP_CTL(port), temp);
2825
2826         } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2827                    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2828                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2829
2830                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2831                 case DP_TRAINING_PATTERN_DISABLE:
2832                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2833                         break;
2834                 case DP_TRAINING_PATTERN_1:
2835                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2836                         break;
2837                 case DP_TRAINING_PATTERN_2:
2838                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2839                         break;
2840                 case DP_TRAINING_PATTERN_3:
2841                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2842                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2843                         break;
2844                 }
2845
2846         } else {
2847                 if (IS_CHERRYVIEW(dev_priv))
2848                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2849                 else
2850                         *DP &= ~DP_LINK_TRAIN_MASK;
2851
2852                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2853                 case DP_TRAINING_PATTERN_DISABLE:
2854                         *DP |= DP_LINK_TRAIN_OFF;
2855                         break;
2856                 case DP_TRAINING_PATTERN_1:
2857                         *DP |= DP_LINK_TRAIN_PAT_1;
2858                         break;
2859                 case DP_TRAINING_PATTERN_2:
2860                         *DP |= DP_LINK_TRAIN_PAT_2;
2861                         break;
2862                 case DP_TRAINING_PATTERN_3:
2863                         if (IS_CHERRYVIEW(dev_priv)) {
2864                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2865                         } else {
2866                                 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2867                                 *DP |= DP_LINK_TRAIN_PAT_2;
2868                         }
2869                         break;
2870                 }
2871         }
2872 }
2873
2874 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2875                                  const struct intel_crtc_state *old_crtc_state)
2876 {
2877         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2878
2879         /* enable with pattern 1 (as per spec) */
2880
2881         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2882
2883         /*
2884          * Magic for VLV/CHV. We _must_ first set up the register
2885          * without actually enabling the port, and then do another
2886          * write to enable the port. Otherwise link training will
2887          * fail when the power sequencer is freshly used for this port.
2888          */
2889         intel_dp->DP |= DP_PORT_EN;
2890         if (old_crtc_state->has_audio)
2891                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2892
2893         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2894         POSTING_READ(intel_dp->output_reg);
2895 }
2896
2897 static void intel_enable_dp(struct intel_encoder *encoder,
2898                             const struct intel_crtc_state *pipe_config,
2899                             const struct drm_connector_state *conn_state)
2900 {
2901         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2902         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2903         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2904         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2905         enum pipe pipe = crtc->pipe;
2906
2907         if (WARN_ON(dp_reg & DP_PORT_EN))
2908                 return;
2909
2910         pps_lock(intel_dp);
2911
2912         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2913                 vlv_init_panel_power_sequencer(encoder, pipe_config);
2914
2915         intel_dp_enable_port(intel_dp, pipe_config);
2916
2917         edp_panel_vdd_on(intel_dp);
2918         edp_panel_on(intel_dp);
2919         edp_panel_vdd_off(intel_dp, true);
2920
2921         pps_unlock(intel_dp);
2922
2923         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2924                 unsigned int lane_mask = 0x0;
2925
2926                 if (IS_CHERRYVIEW(dev_priv))
2927                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2928
2929                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2930                                     lane_mask);
2931         }
2932
2933         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2934         intel_dp_start_link_train(intel_dp);
2935         intel_dp_stop_link_train(intel_dp);
2936
2937         if (pipe_config->has_audio) {
2938                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2939                                  pipe_name(pipe));
2940                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
2941         }
2942 }
2943
2944 static void g4x_enable_dp(struct intel_encoder *encoder,
2945                           const struct intel_crtc_state *pipe_config,
2946                           const struct drm_connector_state *conn_state)
2947 {
2948         intel_enable_dp(encoder, pipe_config, conn_state);
2949         intel_edp_backlight_on(pipe_config, conn_state);
2950 }
2951
2952 static void vlv_enable_dp(struct intel_encoder *encoder,
2953                           const struct intel_crtc_state *pipe_config,
2954                           const struct drm_connector_state *conn_state)
2955 {
2956         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2957
2958         intel_edp_backlight_on(pipe_config, conn_state);
2959         intel_psr_enable(intel_dp, pipe_config);
2960 }
2961
2962 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
2963                               const struct intel_crtc_state *pipe_config,
2964                               const struct drm_connector_state *conn_state)
2965 {
2966         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2967         enum port port = encoder->port;
2968
2969         intel_dp_prepare(encoder, pipe_config);
2970
2971         /* Only ilk+ has port A */
2972         if (port == PORT_A)
2973                 ironlake_edp_pll_on(intel_dp, pipe_config);
2974 }
2975
2976 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2977 {
2978         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2979         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2980         enum pipe pipe = intel_dp->pps_pipe;
2981         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
2982
2983         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2984
2985         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2986                 return;
2987
2988         edp_panel_vdd_off_sync(intel_dp);
2989
2990         /*
2991          * VLV seems to get confused when multiple power seqeuencers
2992          * have the same port selected (even if only one has power/vdd
2993          * enabled). The failure manifests as vlv_wait_port_ready() failing
2994          * CHV on the other hand doesn't seem to mind having the same port
2995          * selected in multiple power seqeuencers, but let's clear the
2996          * port select always when logically disconnecting a power sequencer
2997          * from a port.
2998          */
2999         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3000                       pipe_name(pipe), port_name(intel_dig_port->base.port));
3001         I915_WRITE(pp_on_reg, 0);
3002         POSTING_READ(pp_on_reg);
3003
3004         intel_dp->pps_pipe = INVALID_PIPE;
3005 }
3006
3007 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3008                                       enum pipe pipe)
3009 {
3010         struct intel_encoder *encoder;
3011
3012         lockdep_assert_held(&dev_priv->pps_mutex);
3013
3014         for_each_intel_encoder(&dev_priv->drm, encoder) {
3015                 struct intel_dp *intel_dp;
3016                 enum port port;
3017
3018                 if (encoder->type != INTEL_OUTPUT_DP &&
3019                     encoder->type != INTEL_OUTPUT_EDP)
3020                         continue;
3021
3022                 intel_dp = enc_to_intel_dp(&encoder->base);
3023                 port = dp_to_dig_port(intel_dp)->base.port;
3024
3025                 WARN(intel_dp->active_pipe == pipe,
3026                      "stealing pipe %c power sequencer from active (e)DP port %c\n",
3027                      pipe_name(pipe), port_name(port));
3028
3029                 if (intel_dp->pps_pipe != pipe)
3030                         continue;
3031
3032                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3033                               pipe_name(pipe), port_name(port));
3034
3035                 /* make sure vdd is off before we steal it */
3036                 vlv_detach_power_sequencer(intel_dp);
3037         }
3038 }
3039
3040 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3041                                            const struct intel_crtc_state *crtc_state)
3042 {
3043         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3044         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3045         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3046
3047         lockdep_assert_held(&dev_priv->pps_mutex);
3048
3049         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3050
3051         if (intel_dp->pps_pipe != INVALID_PIPE &&
3052             intel_dp->pps_pipe != crtc->pipe) {
3053                 /*
3054                  * If another power sequencer was being used on this
3055                  * port previously make sure to turn off vdd there while
3056                  * we still have control of it.
3057                  */
3058                 vlv_detach_power_sequencer(intel_dp);
3059         }
3060
3061         /*
3062          * We may be stealing the power
3063          * sequencer from another port.
3064          */
3065         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3066
3067         intel_dp->active_pipe = crtc->pipe;
3068
3069         if (!intel_dp_is_edp(intel_dp))
3070                 return;
3071
3072         /* now it's all ours */
3073         intel_dp->pps_pipe = crtc->pipe;
3074
3075         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3076                       pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3077
3078         /* init power sequencer on this pipe and port */
3079         intel_dp_init_panel_power_sequencer(intel_dp);
3080         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3081 }
3082
3083 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3084                               const struct intel_crtc_state *pipe_config,
3085                               const struct drm_connector_state *conn_state)
3086 {
3087         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3088
3089         intel_enable_dp(encoder, pipe_config, conn_state);
3090 }
3091
3092 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3093                                   const struct intel_crtc_state *pipe_config,
3094                                   const struct drm_connector_state *conn_state)
3095 {
3096         intel_dp_prepare(encoder, pipe_config);
3097
3098         vlv_phy_pre_pll_enable(encoder, pipe_config);
3099 }
3100
3101 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3102                               const struct intel_crtc_state *pipe_config,
3103                               const struct drm_connector_state *conn_state)
3104 {
3105         chv_phy_pre_encoder_enable(encoder, pipe_config);
3106
3107         intel_enable_dp(encoder, pipe_config, conn_state);
3108
3109         /* Second common lane will stay alive on its own now */
3110         chv_phy_release_cl2_override(encoder);
3111 }
3112
3113 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3114                                   const struct intel_crtc_state *pipe_config,
3115                                   const struct drm_connector_state *conn_state)
3116 {
3117         intel_dp_prepare(encoder, pipe_config);
3118
3119         chv_phy_pre_pll_enable(encoder, pipe_config);
3120 }
3121
3122 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3123                                     const struct intel_crtc_state *old_crtc_state,
3124                                     const struct drm_connector_state *old_conn_state)
3125 {
3126         chv_phy_post_pll_disable(encoder, old_crtc_state);
3127 }
3128
3129 /*
3130  * Fetch AUX CH registers 0x202 - 0x207 which contain
3131  * link status information
3132  */
3133 bool
3134 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3135 {
3136         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3137                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3138 }
3139
3140 static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
3141 {
3142         uint8_t psr_caps = 0;
3143
3144         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
3145                 return false;
3146         return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
3147 }
3148
3149 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3150 {
3151         uint8_t dprx = 0;
3152
3153         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
3154                               &dprx) != 1)
3155                 return false;
3156         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3157 }
3158
3159 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
3160 {
3161         uint8_t alpm_caps = 0;
3162
3163         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
3164                               &alpm_caps) != 1)
3165                 return false;
3166         return alpm_caps & DP_ALPM_CAP;
3167 }
3168
3169 /* These are source-specific values. */
3170 uint8_t
3171 intel_dp_voltage_max(struct intel_dp *intel_dp)
3172 {
3173         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3174         enum port port = dp_to_dig_port(intel_dp)->base.port;
3175
3176         if (INTEL_GEN(dev_priv) >= 9) {
3177                 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3178                 return intel_ddi_dp_voltage_max(encoder);
3179         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3180                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3181         else if (IS_GEN7(dev_priv) && port == PORT_A)
3182                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3183         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3184                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3185         else
3186                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3187 }
3188
3189 uint8_t
3190 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3191 {
3192         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3193         enum port port = dp_to_dig_port(intel_dp)->base.port;
3194
3195         if (INTEL_GEN(dev_priv) >= 9) {
3196                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3197                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3198                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3199                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3200                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3201                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3202                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3203                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3204                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3205                 default:
3206                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3207                 }
3208         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3209                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3210                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3211                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3212                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3213                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3214                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3215                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3216                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3217                 default:
3218                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3219                 }
3220         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3221                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3222                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3223                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3224                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3225                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3226                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3227                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3228                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3229                 default:
3230                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3231                 }
3232         } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3233                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3234                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3235                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3236                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3237                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3238                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3239                 default:
3240                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3241                 }
3242         } else {
3243                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3244                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3245                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3246                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3247                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3248                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3249                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3250                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3251                 default:
3252                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3253                 }
3254         }
3255 }
3256
3257 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3258 {
3259         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3260         unsigned long demph_reg_value, preemph_reg_value,
3261                 uniqtranscale_reg_value;
3262         uint8_t train_set = intel_dp->train_set[0];
3263
3264         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3265         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3266                 preemph_reg_value = 0x0004000;
3267                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3268                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3269                         demph_reg_value = 0x2B405555;
3270                         uniqtranscale_reg_value = 0x552AB83A;
3271                         break;
3272                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3273                         demph_reg_value = 0x2B404040;
3274                         uniqtranscale_reg_value = 0x5548B83A;
3275                         break;
3276                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3277                         demph_reg_value = 0x2B245555;
3278                         uniqtranscale_reg_value = 0x5560B83A;
3279                         break;
3280                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3281                         demph_reg_value = 0x2B405555;
3282                         uniqtranscale_reg_value = 0x5598DA3A;
3283                         break;
3284                 default:
3285                         return 0;
3286                 }
3287                 break;
3288         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3289                 preemph_reg_value = 0x0002000;
3290                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3291                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3292                         demph_reg_value = 0x2B404040;
3293                         uniqtranscale_reg_value = 0x5552B83A;
3294                         break;
3295                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3296                         demph_reg_value = 0x2B404848;
3297                         uniqtranscale_reg_value = 0x5580B83A;
3298                         break;
3299                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3300                         demph_reg_value = 0x2B404040;
3301                         uniqtranscale_reg_value = 0x55ADDA3A;
3302                         break;
3303                 default:
3304                         return 0;
3305                 }
3306                 break;
3307         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3308                 preemph_reg_value = 0x0000000;
3309                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3310                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3311                         demph_reg_value = 0x2B305555;
3312                         uniqtranscale_reg_value = 0x5570B83A;
3313                         break;
3314                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3315                         demph_reg_value = 0x2B2B4040;
3316                         uniqtranscale_reg_value = 0x55ADDA3A;
3317                         break;
3318                 default:
3319                         return 0;
3320                 }
3321                 break;
3322         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3323                 preemph_reg_value = 0x0006000;
3324                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3325                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3326                         demph_reg_value = 0x1B405555;
3327                         uniqtranscale_reg_value = 0x55ADDA3A;
3328                         break;
3329                 default:
3330                         return 0;
3331                 }
3332                 break;
3333         default:
3334                 return 0;
3335         }
3336
3337         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3338                                  uniqtranscale_reg_value, 0);
3339
3340         return 0;
3341 }
3342
3343 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3344 {
3345         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3346         u32 deemph_reg_value, margin_reg_value;
3347         bool uniq_trans_scale = false;
3348         uint8_t train_set = intel_dp->train_set[0];
3349
3350         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3351         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3352                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3353                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3354                         deemph_reg_value = 128;
3355                         margin_reg_value = 52;
3356                         break;
3357                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3358                         deemph_reg_value = 128;
3359                         margin_reg_value = 77;
3360                         break;
3361                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3362                         deemph_reg_value = 128;
3363                         margin_reg_value = 102;
3364                         break;
3365                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3366                         deemph_reg_value = 128;
3367                         margin_reg_value = 154;
3368                         uniq_trans_scale = true;
3369                         break;
3370                 default:
3371                         return 0;
3372                 }
3373                 break;
3374         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3375                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3376                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3377                         deemph_reg_value = 85;
3378                         margin_reg_value = 78;
3379                         break;
3380                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3381                         deemph_reg_value = 85;
3382                         margin_reg_value = 116;
3383                         break;
3384                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3385                         deemph_reg_value = 85;
3386                         margin_reg_value = 154;
3387                         break;
3388                 default:
3389                         return 0;
3390                 }
3391                 break;
3392         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3393                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3394                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3395                         deemph_reg_value = 64;
3396                         margin_reg_value = 104;
3397                         break;
3398                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3399                         deemph_reg_value = 64;
3400                         margin_reg_value = 154;
3401                         break;
3402                 default:
3403                         return 0;
3404                 }
3405                 break;
3406         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3407                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3408                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3409                         deemph_reg_value = 43;
3410                         margin_reg_value = 154;
3411                         break;
3412                 default:
3413                         return 0;
3414                 }
3415                 break;
3416         default:
3417                 return 0;
3418         }
3419
3420         chv_set_phy_signal_level(encoder, deemph_reg_value,
3421                                  margin_reg_value, uniq_trans_scale);
3422
3423         return 0;
3424 }
3425
3426 static uint32_t
3427 gen4_signal_levels(uint8_t train_set)
3428 {
3429         uint32_t        signal_levels = 0;
3430
3431         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3432         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3433         default:
3434                 signal_levels |= DP_VOLTAGE_0_4;
3435                 break;
3436         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3437                 signal_levels |= DP_VOLTAGE_0_6;
3438                 break;
3439         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3440                 signal_levels |= DP_VOLTAGE_0_8;
3441                 break;
3442         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3443                 signal_levels |= DP_VOLTAGE_1_2;
3444                 break;
3445         }
3446         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3447         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3448         default:
3449                 signal_levels |= DP_PRE_EMPHASIS_0;
3450                 break;
3451         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3452                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3453                 break;
3454         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3455                 signal_levels |= DP_PRE_EMPHASIS_6;
3456                 break;
3457         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3458                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3459                 break;
3460         }
3461         return signal_levels;
3462 }
3463
3464 /* Gen6's DP voltage swing and pre-emphasis control */
3465 static uint32_t
3466 gen6_edp_signal_levels(uint8_t train_set)
3467 {
3468         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3469                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3470         switch (signal_levels) {
3471         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3472         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3473                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3474         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3475                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3476         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3477         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3478                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3479         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3480         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3481                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3482         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3483         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3484                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3485         default:
3486                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3487                               "0x%x\n", signal_levels);
3488                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3489         }
3490 }
3491
3492 /* Gen7's DP voltage swing and pre-emphasis control */
3493 static uint32_t
3494 gen7_edp_signal_levels(uint8_t train_set)
3495 {
3496         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3497                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3498         switch (signal_levels) {
3499         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3500                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3501         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3502                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3503         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3504                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3505
3506         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3507                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3508         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3509                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3510
3511         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3512                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3513         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3514                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3515
3516         default:
3517                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3518                               "0x%x\n", signal_levels);
3519                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3520         }
3521 }
3522
3523 void
3524 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3525 {
3526         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3527         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3528         enum port port = intel_dig_port->base.port;
3529         uint32_t signal_levels, mask = 0;
3530         uint8_t train_set = intel_dp->train_set[0];
3531
3532         if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) {
3533                 signal_levels = bxt_signal_levels(intel_dp);
3534         } else if (HAS_DDI(dev_priv)) {
3535                 signal_levels = ddi_signal_levels(intel_dp);
3536                 mask = DDI_BUF_EMP_MASK;
3537         } else if (IS_CHERRYVIEW(dev_priv)) {
3538                 signal_levels = chv_signal_levels(intel_dp);
3539         } else if (IS_VALLEYVIEW(dev_priv)) {
3540                 signal_levels = vlv_signal_levels(intel_dp);
3541         } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3542                 signal_levels = gen7_edp_signal_levels(train_set);
3543                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3544         } else if (IS_GEN6(dev_priv) && port == PORT_A) {
3545                 signal_levels = gen6_edp_signal_levels(train_set);
3546                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3547         } else {
3548                 signal_levels = gen4_signal_levels(train_set);
3549                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3550         }
3551
3552         if (mask)
3553                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3554
3555         DRM_DEBUG_KMS("Using vswing level %d\n",
3556                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3557         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3558                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3559                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3560
3561         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3562
3563         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3564         POSTING_READ(intel_dp->output_reg);
3565 }
3566
3567 void
3568 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3569                                        uint8_t dp_train_pat)
3570 {
3571         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3572         struct drm_i915_private *dev_priv =
3573                 to_i915(intel_dig_port->base.base.dev);
3574
3575         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3576
3577         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3578         POSTING_READ(intel_dp->output_reg);
3579 }
3580
3581 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3582 {
3583         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3584         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3585         enum port port = intel_dig_port->base.port;
3586         uint32_t val;
3587
3588         if (!HAS_DDI(dev_priv))
3589                 return;
3590
3591         val = I915_READ(DP_TP_CTL(port));
3592         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3593         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3594         I915_WRITE(DP_TP_CTL(port), val);
3595
3596         /*
3597          * On PORT_A we can have only eDP in SST mode. There the only reason
3598          * we need to set idle transmission mode is to work around a HW issue
3599          * where we enable the pipe while not in idle link-training mode.
3600          * In this case there is requirement to wait for a minimum number of
3601          * idle patterns to be sent.
3602          */
3603         if (port == PORT_A)
3604                 return;
3605
3606         if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3607                                     DP_TP_STATUS_IDLE_DONE,
3608                                     DP_TP_STATUS_IDLE_DONE,
3609                                     1))
3610                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3611 }
3612
3613 static void
3614 intel_dp_link_down(struct intel_encoder *encoder,
3615                    const struct intel_crtc_state *old_crtc_state)
3616 {
3617         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3618         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3619         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3620         enum port port = encoder->port;
3621         uint32_t DP = intel_dp->DP;
3622
3623         if (WARN_ON(HAS_DDI(dev_priv)))
3624                 return;
3625
3626         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3627                 return;
3628
3629         DRM_DEBUG_KMS("\n");
3630
3631         if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3632             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3633                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3634                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3635         } else {
3636                 if (IS_CHERRYVIEW(dev_priv))
3637                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3638                 else
3639                         DP &= ~DP_LINK_TRAIN_MASK;
3640                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3641         }
3642         I915_WRITE(intel_dp->output_reg, DP);
3643         POSTING_READ(intel_dp->output_reg);
3644
3645         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3646         I915_WRITE(intel_dp->output_reg, DP);
3647         POSTING_READ(intel_dp->output_reg);
3648
3649         /*
3650          * HW workaround for IBX, we need to move the port
3651          * to transcoder A after disabling it to allow the
3652          * matching HDMI port to be enabled on transcoder A.
3653          */
3654         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3655                 /*
3656                  * We get CPU/PCH FIFO underruns on the other pipe when
3657                  * doing the workaround. Sweep them under the rug.
3658                  */
3659                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3660                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3661
3662                 /* always enable with pattern 1 (as per spec) */
3663                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3664                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3665                 I915_WRITE(intel_dp->output_reg, DP);
3666                 POSTING_READ(intel_dp->output_reg);
3667
3668                 DP &= ~DP_PORT_EN;
3669                 I915_WRITE(intel_dp->output_reg, DP);
3670                 POSTING_READ(intel_dp->output_reg);
3671
3672                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3673                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3674                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3675         }
3676
3677         msleep(intel_dp->panel_power_down_delay);
3678
3679         intel_dp->DP = DP;
3680
3681         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3682                 pps_lock(intel_dp);
3683                 intel_dp->active_pipe = INVALID_PIPE;
3684                 pps_unlock(intel_dp);
3685         }
3686 }
3687
3688 bool
3689 intel_dp_read_dpcd(struct intel_dp *intel_dp)
3690 {
3691         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3692                              sizeof(intel_dp->dpcd)) < 0)
3693                 return false; /* aux transfer failed */
3694
3695         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3696
3697         return intel_dp->dpcd[DP_DPCD_REV] != 0;
3698 }
3699
3700 static bool
3701 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3702 {
3703         struct drm_i915_private *dev_priv =
3704                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3705
3706         /* this function is meant to be called only once */
3707         WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3708
3709         if (!intel_dp_read_dpcd(intel_dp))
3710                 return false;
3711
3712         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3713                          drm_dp_is_branch(intel_dp->dpcd));
3714
3715         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3716                 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3717                         DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3718
3719         /* Check if the panel supports PSR */
3720         drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3721                          intel_dp->psr_dpcd,
3722                          sizeof(intel_dp->psr_dpcd));
3723         if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3724                 dev_priv->psr.sink_support = true;
3725                 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3726         }
3727
3728         if (INTEL_GEN(dev_priv) >= 9 &&
3729             (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3730                 uint8_t frame_sync_cap;
3731
3732                 dev_priv->psr.sink_support = true;
3733                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3734                                       DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3735                                       &frame_sync_cap) != 1)
3736                         frame_sync_cap = 0;
3737                 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3738                 /* PSR2 needs frame sync as well */
3739                 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3740                 DRM_DEBUG_KMS("PSR2 %s on sink",
3741                               dev_priv->psr.psr2_support ? "supported" : "not supported");
3742
3743                 if (dev_priv->psr.psr2_support) {
3744                         dev_priv->psr.y_cord_support =
3745                                 intel_dp_get_y_cord_status(intel_dp);
3746                         dev_priv->psr.colorimetry_support =
3747                                 intel_dp_get_colorimetry_status(intel_dp);
3748                         dev_priv->psr.alpm =
3749                                 intel_dp_get_alpm_status(intel_dp);
3750                 }
3751
3752         }
3753
3754         /*
3755          * Read the eDP display control registers.
3756          *
3757          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
3758          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
3759          * set, but require eDP 1.4+ detection (e.g. for supported link rates
3760          * method). The display control registers should read zero if they're
3761          * not supported anyway.
3762          */
3763         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3764                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3765                              sizeof(intel_dp->edp_dpcd))
3766                 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3767                               intel_dp->edp_dpcd);
3768
3769         /* Read the eDP 1.4+ supported link rates. */
3770         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3771                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3772                 int i;
3773
3774                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3775                                 sink_rates, sizeof(sink_rates));
3776
3777                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3778                         int val = le16_to_cpu(sink_rates[i]);
3779
3780                         if (val == 0)
3781                                 break;
3782
3783                         /* Value read multiplied by 200kHz gives the per-lane
3784                          * link rate in kHz. The source rates are, however,
3785                          * stored in terms of LS_Clk kHz. The full conversion
3786                          * back to symbols is
3787                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3788                          */
3789                         intel_dp->sink_rates[i] = (val * 200) / 10;
3790                 }
3791                 intel_dp->num_sink_rates = i;
3792         }
3793
3794         /*
3795          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
3796          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
3797          */
3798         if (intel_dp->num_sink_rates)
3799                 intel_dp->use_rate_select = true;
3800         else
3801                 intel_dp_set_sink_rates(intel_dp);
3802
3803         intel_dp_set_common_rates(intel_dp);
3804
3805         return true;
3806 }
3807
3808
3809 static bool
3810 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3811 {
3812         u8 sink_count;
3813
3814         if (!intel_dp_read_dpcd(intel_dp))
3815                 return false;
3816
3817         /* Don't clobber cached eDP rates. */
3818         if (!intel_dp_is_edp(intel_dp)) {
3819                 intel_dp_set_sink_rates(intel_dp);
3820                 intel_dp_set_common_rates(intel_dp);
3821         }
3822
3823         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
3824                 return false;
3825
3826         /*
3827          * Sink count can change between short pulse hpd hence
3828          * a member variable in intel_dp will track any changes
3829          * between short pulse interrupts.
3830          */
3831         intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
3832
3833         /*
3834          * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3835          * a dongle is present but no display. Unless we require to know
3836          * if a dongle is present or not, we don't need to update
3837          * downstream port information. So, an early return here saves
3838          * time from performing other operations which are not required.
3839          */
3840         if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
3841                 return false;
3842
3843         if (!drm_dp_is_branch(intel_dp->dpcd))
3844                 return true; /* native DP sink */
3845
3846         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3847                 return true; /* no per-port downstream info */
3848
3849         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3850                              intel_dp->downstream_ports,
3851                              DP_MAX_DOWNSTREAM_PORTS) < 0)
3852                 return false; /* downstream port status fetch failed */
3853
3854         return true;
3855 }
3856
3857 static bool
3858 intel_dp_can_mst(struct intel_dp *intel_dp)
3859 {
3860         u8 mstm_cap;
3861
3862         if (!i915_modparams.enable_dp_mst)
3863                 return false;
3864
3865         if (!intel_dp->can_mst)
3866                 return false;
3867
3868         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3869                 return false;
3870
3871         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
3872                 return false;
3873
3874         return mstm_cap & DP_MST_CAP;
3875 }
3876
3877 static void
3878 intel_dp_configure_mst(struct intel_dp *intel_dp)
3879 {
3880         if (!i915_modparams.enable_dp_mst)
3881                 return;
3882
3883         if (!intel_dp->can_mst)
3884                 return;
3885
3886         intel_dp->is_mst = intel_dp_can_mst(intel_dp);
3887
3888         if (intel_dp->is_mst)
3889                 DRM_DEBUG_KMS("Sink is MST capable\n");
3890         else
3891                 DRM_DEBUG_KMS("Sink is not MST capable\n");
3892
3893         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3894                                         intel_dp->is_mst);
3895 }
3896
3897 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
3898                                   struct intel_crtc_state *crtc_state, bool disable_wa)
3899 {
3900         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3901         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3902         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3903         u8 buf;
3904         int ret = 0;
3905         int count = 0;
3906         int attempts = 10;
3907
3908         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3909                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3910                 ret = -EIO;
3911                 goto out;
3912         }
3913
3914         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3915                                buf & ~DP_TEST_SINK_START) < 0) {
3916                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3917                 ret = -EIO;
3918                 goto out;
3919         }
3920
3921         do {
3922                 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3923
3924                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3925                                       DP_TEST_SINK_MISC, &buf) < 0) {
3926                         ret = -EIO;
3927                         goto out;
3928                 }
3929                 count = buf & DP_TEST_COUNT_MASK;
3930         } while (--attempts && count);
3931
3932         if (attempts == 0) {
3933                 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3934                 ret = -ETIMEDOUT;
3935         }
3936
3937  out:
3938         if (disable_wa)
3939                 hsw_enable_ips(crtc_state);
3940         return ret;
3941 }
3942
3943 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
3944                                    struct intel_crtc_state *crtc_state)
3945 {
3946         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3947         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3948         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3949         u8 buf;
3950         int ret;
3951
3952         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3953                 return -EIO;
3954
3955         if (!(buf & DP_TEST_CRC_SUPPORTED))
3956                 return -ENOTTY;
3957
3958         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3959                 return -EIO;
3960
3961         if (buf & DP_TEST_SINK_START) {
3962                 ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
3963                 if (ret)
3964                         return ret;
3965         }
3966
3967         hsw_disable_ips(crtc_state);
3968
3969         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3970                                buf | DP_TEST_SINK_START) < 0) {
3971                 hsw_enable_ips(crtc_state);
3972                 return -EIO;
3973         }
3974
3975         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3976         return 0;
3977 }
3978
3979 int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
3980 {
3981         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3982         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3983         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3984         u8 buf;
3985         int count, ret;
3986         int attempts = 6;
3987
3988         ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
3989         if (ret)
3990                 return ret;
3991
3992         do {
3993                 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3994
3995                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3996                                       DP_TEST_SINK_MISC, &buf) < 0) {
3997                         ret = -EIO;
3998                         goto stop;
3999                 }
4000                 count = buf & DP_TEST_COUNT_MASK;
4001
4002         } while (--attempts && count == 0);
4003
4004         if (attempts == 0) {
4005                 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4006                 ret = -ETIMEDOUT;
4007                 goto stop;
4008         }
4009
4010         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4011                 ret = -EIO;
4012                 goto stop;
4013         }
4014
4015 stop:
4016         intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
4017         return ret;
4018 }
4019
4020 static bool
4021 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4022 {
4023         return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
4024                                  sink_irq_vector) == 1;
4025 }
4026
4027 static bool
4028 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4029 {
4030         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4031                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4032                 DP_DPRX_ESI_LEN;
4033 }
4034
4035 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4036 {
4037         int status = 0;
4038         int test_link_rate;
4039         uint8_t test_lane_count, test_link_bw;
4040         /* (DP CTS 1.2)
4041          * 4.3.1.11
4042          */
4043         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4044         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4045                                    &test_lane_count);
4046
4047         if (status <= 0) {
4048                 DRM_DEBUG_KMS("Lane count read failed\n");
4049                 return DP_TEST_NAK;
4050         }
4051         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4052
4053         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4054                                    &test_link_bw);
4055         if (status <= 0) {
4056                 DRM_DEBUG_KMS("Link Rate read failed\n");
4057                 return DP_TEST_NAK;
4058         }
4059         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4060
4061         /* Validate the requested link rate and lane count */
4062         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4063                                         test_lane_count))
4064                 return DP_TEST_NAK;
4065
4066         intel_dp->compliance.test_lane_count = test_lane_count;
4067         intel_dp->compliance.test_link_rate = test_link_rate;
4068
4069         return DP_TEST_ACK;
4070 }
4071
4072 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4073 {
4074         uint8_t test_pattern;
4075         uint8_t test_misc;
4076         __be16 h_width, v_height;
4077         int status = 0;
4078
4079         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4080         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4081                                    &test_pattern);
4082         if (status <= 0) {
4083                 DRM_DEBUG_KMS("Test pattern read failed\n");
4084                 return DP_TEST_NAK;
4085         }
4086         if (test_pattern != DP_COLOR_RAMP)
4087                 return DP_TEST_NAK;
4088
4089         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4090                                   &h_width, 2);
4091         if (status <= 0) {
4092                 DRM_DEBUG_KMS("H Width read failed\n");
4093                 return DP_TEST_NAK;
4094         }
4095
4096         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4097                                   &v_height, 2);
4098         if (status <= 0) {
4099                 DRM_DEBUG_KMS("V Height read failed\n");
4100                 return DP_TEST_NAK;
4101         }
4102
4103         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4104                                    &test_misc);
4105         if (status <= 0) {
4106                 DRM_DEBUG_KMS("TEST MISC read failed\n");
4107                 return DP_TEST_NAK;
4108         }
4109         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4110                 return DP_TEST_NAK;
4111         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4112                 return DP_TEST_NAK;
4113         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4114         case DP_TEST_BIT_DEPTH_6:
4115                 intel_dp->compliance.test_data.bpc = 6;
4116                 break;
4117         case DP_TEST_BIT_DEPTH_8:
4118                 intel_dp->compliance.test_data.bpc = 8;
4119                 break;
4120         default:
4121                 return DP_TEST_NAK;
4122         }
4123
4124         intel_dp->compliance.test_data.video_pattern = test_pattern;
4125         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4126         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4127         /* Set test active flag here so userspace doesn't interrupt things */
4128         intel_dp->compliance.test_active = 1;
4129
4130         return DP_TEST_ACK;
4131 }
4132
4133 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4134 {
4135         uint8_t test_result = DP_TEST_ACK;
4136         struct intel_connector *intel_connector = intel_dp->attached_connector;
4137         struct drm_connector *connector = &intel_connector->base;
4138
4139         if (intel_connector->detect_edid == NULL ||
4140             connector->edid_corrupt ||
4141             intel_dp->aux.i2c_defer_count > 6) {
4142                 /* Check EDID read for NACKs, DEFERs and corruption
4143                  * (DP CTS 1.2 Core r1.1)
4144                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4145                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4146                  *    4.2.2.6 : EDID corruption detected
4147                  * Use failsafe mode for all cases
4148                  */
4149                 if (intel_dp->aux.i2c_nack_count > 0 ||
4150                         intel_dp->aux.i2c_defer_count > 0)
4151                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4152                                       intel_dp->aux.i2c_nack_count,
4153                                       intel_dp->aux.i2c_defer_count);
4154                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4155         } else {
4156                 struct edid *block = intel_connector->detect_edid;
4157
4158                 /* We have to write the checksum
4159                  * of the last block read
4160                  */
4161                 block += intel_connector->detect_edid->extensions;
4162
4163                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4164                                        block->checksum) <= 0)
4165                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4166
4167                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4168                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4169         }
4170
4171         /* Set test active flag here so userspace doesn't interrupt things */
4172         intel_dp->compliance.test_active = 1;
4173
4174         return test_result;
4175 }
4176
4177 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4178 {
4179         uint8_t test_result = DP_TEST_NAK;
4180         return test_result;
4181 }
4182
4183 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4184 {
4185         uint8_t response = DP_TEST_NAK;
4186         uint8_t request = 0;
4187         int status;
4188
4189         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4190         if (status <= 0) {
4191                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4192                 goto update_status;
4193         }
4194
4195         switch (request) {
4196         case DP_TEST_LINK_TRAINING:
4197                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4198                 response = intel_dp_autotest_link_training(intel_dp);
4199                 break;
4200         case DP_TEST_LINK_VIDEO_PATTERN:
4201                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4202                 response = intel_dp_autotest_video_pattern(intel_dp);
4203                 break;
4204         case DP_TEST_LINK_EDID_READ:
4205                 DRM_DEBUG_KMS("EDID test requested\n");
4206                 response = intel_dp_autotest_edid(intel_dp);
4207                 break;
4208         case DP_TEST_LINK_PHY_TEST_PATTERN:
4209                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4210                 response = intel_dp_autotest_phy_pattern(intel_dp);
4211                 break;
4212         default:
4213                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4214                 break;
4215         }
4216
4217         if (response & DP_TEST_ACK)
4218                 intel_dp->compliance.test_type = request;
4219
4220 update_status:
4221         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4222         if (status <= 0)
4223                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4224 }
4225
4226 static int
4227 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4228 {
4229         bool bret;
4230
4231         if (intel_dp->is_mst) {
4232                 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4233                 int ret = 0;
4234                 int retry;
4235                 bool handled;
4236                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4237 go_again:
4238                 if (bret == true) {
4239
4240                         /* check link status - esi[10] = 0x200c */
4241                         if (intel_dp->active_mst_links &&
4242                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4243                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4244                                 intel_dp_start_link_train(intel_dp);
4245                                 intel_dp_stop_link_train(intel_dp);
4246                         }
4247
4248                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4249                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4250
4251                         if (handled) {
4252                                 for (retry = 0; retry < 3; retry++) {
4253                                         int wret;
4254                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4255                                                                  DP_SINK_COUNT_ESI+1,
4256                                                                  &esi[1], 3);
4257                                         if (wret == 3) {
4258                                                 break;
4259                                         }
4260                                 }
4261
4262                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4263                                 if (bret == true) {
4264                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4265                                         goto go_again;
4266                                 }
4267                         } else
4268                                 ret = 0;
4269
4270                         return ret;
4271                 } else {
4272                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4273                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4274                         intel_dp->is_mst = false;
4275                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4276                         /* send a hotplug event */
4277                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4278                 }
4279         }
4280         return -EINVAL;
4281 }
4282
4283 static void
4284 intel_dp_retrain_link(struct intel_dp *intel_dp)
4285 {
4286         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4287         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4288         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4289
4290         /* Suppress underruns caused by re-training */
4291         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4292         if (crtc->config->has_pch_encoder)
4293                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4294                                                       intel_crtc_pch_transcoder(crtc), false);
4295
4296         intel_dp_start_link_train(intel_dp);
4297         intel_dp_stop_link_train(intel_dp);
4298
4299         /* Keep underrun reporting disabled until things are stable */
4300         intel_wait_for_vblank(dev_priv, crtc->pipe);
4301
4302         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4303         if (crtc->config->has_pch_encoder)
4304                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4305                                                       intel_crtc_pch_transcoder(crtc), true);
4306 }
4307
4308 static void
4309 intel_dp_check_link_status(struct intel_dp *intel_dp)
4310 {
4311         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4312         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4313         struct drm_connector_state *conn_state =
4314                 intel_dp->attached_connector->base.state;
4315         u8 link_status[DP_LINK_STATUS_SIZE];
4316
4317         WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
4318
4319         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4320                 DRM_ERROR("Failed to get link status\n");
4321                 return;
4322         }
4323
4324         if (!conn_state->crtc)
4325                 return;
4326
4327         WARN_ON(!drm_modeset_is_locked(&conn_state->crtc->mutex));
4328
4329         if (!conn_state->crtc->state->active)
4330                 return;
4331
4332         if (conn_state->commit &&
4333             !try_wait_for_completion(&conn_state->commit->hw_done))
4334                 return;
4335
4336         /*
4337          * Validate the cached values of intel_dp->link_rate and
4338          * intel_dp->lane_count before attempting to retrain.
4339          */
4340         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4341                                         intel_dp->lane_count))
4342                 return;
4343
4344         /* Retrain if Channel EQ or CR not ok */
4345         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4346                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4347                               intel_encoder->base.name);
4348
4349                 intel_dp_retrain_link(intel_dp);
4350         }
4351 }
4352
4353 /*
4354  * According to DP spec
4355  * 5.1.2:
4356  *  1. Read DPCD
4357  *  2. Configure link according to Receiver Capabilities
4358  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4359  *  4. Check link status on receipt of hot-plug interrupt
4360  *
4361  * intel_dp_short_pulse -  handles short pulse interrupts
4362  * when full detection is not required.
4363  * Returns %true if short pulse is handled and full detection
4364  * is NOT required and %false otherwise.
4365  */
4366 static bool
4367 intel_dp_short_pulse(struct intel_dp *intel_dp)
4368 {
4369         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4370         u8 sink_irq_vector = 0;
4371         u8 old_sink_count = intel_dp->sink_count;
4372         bool ret;
4373
4374         /*
4375          * Clearing compliance test variables to allow capturing
4376          * of values for next automated test request.
4377          */
4378         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4379
4380         /*
4381          * Now read the DPCD to see if it's actually running
4382          * If the current value of sink count doesn't match with
4383          * the value that was stored earlier or dpcd read failed
4384          * we need to do full detection
4385          */
4386         ret = intel_dp_get_dpcd(intel_dp);
4387
4388         if ((old_sink_count != intel_dp->sink_count) || !ret) {
4389                 /* No need to proceed if we are going to do full detect */
4390                 return false;
4391         }
4392
4393         /* Try to read the source of the interrupt */
4394         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4395             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4396             sink_irq_vector != 0) {
4397                 /* Clear interrupt source */
4398                 drm_dp_dpcd_writeb(&intel_dp->aux,
4399                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4400                                    sink_irq_vector);
4401
4402                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4403                         intel_dp_handle_test_request(intel_dp);
4404                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4405                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4406         }
4407
4408         intel_dp_check_link_status(intel_dp);
4409
4410         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4411                 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4412                 /* Send a Hotplug Uevent to userspace to start modeset */
4413                 drm_kms_helper_hotplug_event(&dev_priv->drm);
4414         }
4415
4416         return true;
4417 }
4418
4419 /* XXX this is probably wrong for multiple downstream ports */
4420 static enum drm_connector_status
4421 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4422 {
4423         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4424         uint8_t *dpcd = intel_dp->dpcd;
4425         uint8_t type;
4426
4427         if (lspcon->active)
4428                 lspcon_resume(lspcon);
4429
4430         if (!intel_dp_get_dpcd(intel_dp))
4431                 return connector_status_disconnected;
4432
4433         if (intel_dp_is_edp(intel_dp))
4434                 return connector_status_connected;
4435
4436         /* if there's no downstream port, we're done */
4437         if (!drm_dp_is_branch(dpcd))
4438                 return connector_status_connected;
4439
4440         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4441         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4442             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4443
4444                 return intel_dp->sink_count ?
4445                 connector_status_connected : connector_status_disconnected;
4446         }
4447
4448         if (intel_dp_can_mst(intel_dp))
4449                 return connector_status_connected;
4450
4451         /* If no HPD, poke DDC gently */
4452         if (drm_probe_ddc(&intel_dp->aux.ddc))
4453                 return connector_status_connected;
4454
4455         /* Well we tried, say unknown for unreliable port types */
4456         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4457                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4458                 if (type == DP_DS_PORT_TYPE_VGA ||
4459                     type == DP_DS_PORT_TYPE_NON_EDID)
4460                         return connector_status_unknown;
4461         } else {
4462                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4463                         DP_DWN_STRM_PORT_TYPE_MASK;
4464                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4465                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4466                         return connector_status_unknown;
4467         }
4468
4469         /* Anything else is out of spec, warn and ignore */
4470         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4471         return connector_status_disconnected;
4472 }
4473
4474 static enum drm_connector_status
4475 edp_detect(struct intel_dp *intel_dp)
4476 {
4477         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4478         enum drm_connector_status status;
4479
4480         status = intel_panel_detect(dev_priv);
4481         if (status == connector_status_unknown)
4482                 status = connector_status_connected;
4483
4484         return status;
4485 }
4486
4487 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
4488 {
4489         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4490         u32 bit;
4491
4492         switch (encoder->hpd_pin) {
4493         case HPD_PORT_B:
4494                 bit = SDE_PORTB_HOTPLUG;
4495                 break;
4496         case HPD_PORT_C:
4497                 bit = SDE_PORTC_HOTPLUG;
4498                 break;
4499         case HPD_PORT_D:
4500                 bit = SDE_PORTD_HOTPLUG;
4501                 break;
4502         default:
4503                 MISSING_CASE(encoder->hpd_pin);
4504                 return false;
4505         }
4506
4507         return I915_READ(SDEISR) & bit;
4508 }
4509
4510 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
4511 {
4512         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4513         u32 bit;
4514
4515         switch (encoder->hpd_pin) {
4516         case HPD_PORT_B:
4517                 bit = SDE_PORTB_HOTPLUG_CPT;
4518                 break;
4519         case HPD_PORT_C:
4520                 bit = SDE_PORTC_HOTPLUG_CPT;
4521                 break;
4522         case HPD_PORT_D:
4523                 bit = SDE_PORTD_HOTPLUG_CPT;
4524                 break;
4525         default:
4526                 MISSING_CASE(encoder->hpd_pin);
4527                 return false;
4528         }
4529
4530         return I915_READ(SDEISR) & bit;
4531 }
4532
4533 static bool spt_digital_port_connected(struct intel_encoder *encoder)
4534 {
4535         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4536         u32 bit;
4537
4538         switch (encoder->hpd_pin) {
4539         case HPD_PORT_A:
4540                 bit = SDE_PORTA_HOTPLUG_SPT;
4541                 break;
4542         case HPD_PORT_E:
4543                 bit = SDE_PORTE_HOTPLUG_SPT;
4544                 break;
4545         default:
4546                 return cpt_digital_port_connected(encoder);
4547         }
4548
4549         return I915_READ(SDEISR) & bit;
4550 }
4551
4552 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
4553 {
4554         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4555         u32 bit;
4556
4557         switch (encoder->hpd_pin) {
4558         case HPD_PORT_B:
4559                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4560                 break;
4561         case HPD_PORT_C:
4562                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4563                 break;
4564         case HPD_PORT_D:
4565                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4566                 break;
4567         default:
4568                 MISSING_CASE(encoder->hpd_pin);
4569                 return false;
4570         }
4571
4572         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4573 }
4574
4575 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
4576 {
4577         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4578         u32 bit;
4579
4580         switch (encoder->hpd_pin) {
4581         case HPD_PORT_B:
4582                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4583                 break;
4584         case HPD_PORT_C:
4585                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4586                 break;
4587         case HPD_PORT_D:
4588                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4589                 break;
4590         default:
4591                 MISSING_CASE(encoder->hpd_pin);
4592                 return false;
4593         }
4594
4595         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4596 }
4597
4598 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
4599 {
4600         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4601
4602         if (encoder->hpd_pin == HPD_PORT_A)
4603                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
4604         else
4605                 return ibx_digital_port_connected(encoder);
4606 }
4607
4608 static bool snb_digital_port_connected(struct intel_encoder *encoder)
4609 {
4610         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4611
4612         if (encoder->hpd_pin == HPD_PORT_A)
4613                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
4614         else
4615                 return cpt_digital_port_connected(encoder);
4616 }
4617
4618 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
4619 {
4620         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4621
4622         if (encoder->hpd_pin == HPD_PORT_A)
4623                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
4624         else
4625                 return cpt_digital_port_connected(encoder);
4626 }
4627
4628 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
4629 {
4630         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4631
4632         if (encoder->hpd_pin == HPD_PORT_A)
4633                 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
4634         else
4635                 return cpt_digital_port_connected(encoder);
4636 }
4637
4638 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
4639 {
4640         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4641         u32 bit;
4642
4643         switch (encoder->hpd_pin) {
4644         case HPD_PORT_A:
4645                 bit = BXT_DE_PORT_HP_DDIA;
4646                 break;
4647         case HPD_PORT_B:
4648                 bit = BXT_DE_PORT_HP_DDIB;
4649                 break;
4650         case HPD_PORT_C:
4651                 bit = BXT_DE_PORT_HP_DDIC;
4652                 break;
4653         default:
4654                 MISSING_CASE(encoder->hpd_pin);
4655                 return false;
4656         }
4657
4658         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4659 }
4660
4661 /*
4662  * intel_digital_port_connected - is the specified port connected?
4663  * @encoder: intel_encoder
4664  *
4665  * Return %true if port is connected, %false otherwise.
4666  */
4667 bool intel_digital_port_connected(struct intel_encoder *encoder)
4668 {
4669         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4670
4671         if (HAS_GMCH_DISPLAY(dev_priv)) {
4672                 if (IS_GM45(dev_priv))
4673                         return gm45_digital_port_connected(encoder);
4674                 else
4675                         return g4x_digital_port_connected(encoder);
4676         }
4677
4678         if (IS_GEN5(dev_priv))
4679                 return ilk_digital_port_connected(encoder);
4680         else if (IS_GEN6(dev_priv))
4681                 return snb_digital_port_connected(encoder);
4682         else if (IS_GEN7(dev_priv))
4683                 return ivb_digital_port_connected(encoder);
4684         else if (IS_GEN8(dev_priv))
4685                 return bdw_digital_port_connected(encoder);
4686         else if (IS_GEN9_LP(dev_priv))
4687                 return bxt_digital_port_connected(encoder);
4688         else
4689                 return spt_digital_port_connected(encoder);
4690 }
4691
4692 static struct edid *
4693 intel_dp_get_edid(struct intel_dp *intel_dp)
4694 {
4695         struct intel_connector *intel_connector = intel_dp->attached_connector;
4696
4697         /* use cached edid if we have one */
4698         if (intel_connector->edid) {
4699                 /* invalid edid */
4700                 if (IS_ERR(intel_connector->edid))
4701                         return NULL;
4702
4703                 return drm_edid_duplicate(intel_connector->edid);
4704         } else
4705                 return drm_get_edid(&intel_connector->base,
4706                                     &intel_dp->aux.ddc);
4707 }
4708
4709 static void
4710 intel_dp_set_edid(struct intel_dp *intel_dp)
4711 {
4712         struct intel_connector *intel_connector = intel_dp->attached_connector;
4713         struct edid *edid;
4714
4715         intel_dp_unset_edid(intel_dp);
4716         edid = intel_dp_get_edid(intel_dp);
4717         intel_connector->detect_edid = edid;
4718
4719         intel_dp->has_audio = drm_detect_monitor_audio(edid);
4720 }
4721
4722 static void
4723 intel_dp_unset_edid(struct intel_dp *intel_dp)
4724 {
4725         struct intel_connector *intel_connector = intel_dp->attached_connector;
4726
4727         kfree(intel_connector->detect_edid);
4728         intel_connector->detect_edid = NULL;
4729
4730         intel_dp->has_audio = false;
4731 }
4732
4733 static int
4734 intel_dp_long_pulse(struct intel_connector *connector)
4735 {
4736         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4737         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
4738         enum drm_connector_status status;
4739         u8 sink_irq_vector = 0;
4740
4741         WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
4742
4743         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4744
4745         /* Can't disconnect eDP, but you can close the lid... */
4746         if (intel_dp_is_edp(intel_dp))
4747                 status = edp_detect(intel_dp);
4748         else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
4749                 status = intel_dp_detect_dpcd(intel_dp);
4750         else
4751                 status = connector_status_disconnected;
4752
4753         if (status == connector_status_disconnected) {
4754                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4755
4756                 if (intel_dp->is_mst) {
4757                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4758                                       intel_dp->is_mst,
4759                                       intel_dp->mst_mgr.mst_state);
4760                         intel_dp->is_mst = false;
4761                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4762                                                         intel_dp->is_mst);
4763                 }
4764
4765                 goto out;
4766         }
4767
4768         if (intel_dp->reset_link_params) {
4769                 /* Initial max link lane count */
4770                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
4771
4772                 /* Initial max link rate */
4773                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
4774
4775                 intel_dp->reset_link_params = false;
4776         }
4777
4778         intel_dp_print_rates(intel_dp);
4779
4780         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4781                          drm_dp_is_branch(intel_dp->dpcd));
4782
4783         intel_dp_configure_mst(intel_dp);
4784
4785         if (intel_dp->is_mst) {
4786                 /*
4787                  * If we are in MST mode then this connector
4788                  * won't appear connected or have anything
4789                  * with EDID on it
4790                  */
4791                 status = connector_status_disconnected;
4792                 goto out;
4793         } else {
4794                 /*
4795                  * If display is now connected check links status,
4796                  * there has been known issues of link loss triggerring
4797                  * long pulse.
4798                  *
4799                  * Some sinks (eg. ASUS PB287Q) seem to perform some
4800                  * weird HPD ping pong during modesets. So we can apparently
4801                  * end up with HPD going low during a modeset, and then
4802                  * going back up soon after. And once that happens we must
4803                  * retrain the link to get a picture. That's in case no
4804                  * userspace component reacted to intermittent HPD dip.
4805                  */
4806                 intel_dp_check_link_status(intel_dp);
4807         }
4808
4809         /*
4810          * Clearing NACK and defer counts to get their exact values
4811          * while reading EDID which are required by Compliance tests
4812          * 4.2.2.4 and 4.2.2.5
4813          */
4814         intel_dp->aux.i2c_nack_count = 0;
4815         intel_dp->aux.i2c_defer_count = 0;
4816
4817         intel_dp_set_edid(intel_dp);
4818         if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
4819                 status = connector_status_connected;
4820         intel_dp->detect_done = true;
4821
4822         /* Try to read the source of the interrupt */
4823         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4824             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4825             sink_irq_vector != 0) {
4826                 /* Clear interrupt source */
4827                 drm_dp_dpcd_writeb(&intel_dp->aux,
4828                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4829                                    sink_irq_vector);
4830
4831                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4832                         intel_dp_handle_test_request(intel_dp);
4833                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4834                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4835         }
4836
4837 out:
4838         if (status != connector_status_connected && !intel_dp->is_mst)
4839                 intel_dp_unset_edid(intel_dp);
4840
4841         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4842         return status;
4843 }
4844
4845 static int
4846 intel_dp_detect(struct drm_connector *connector,
4847                 struct drm_modeset_acquire_ctx *ctx,
4848                 bool force)
4849 {
4850         struct intel_dp *intel_dp = intel_attached_dp(connector);
4851         int status = connector->status;
4852
4853         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4854                       connector->base.id, connector->name);
4855
4856         /* If full detect is not performed yet, do a full detect */
4857         if (!intel_dp->detect_done) {
4858                 struct drm_crtc *crtc;
4859                 int ret;
4860
4861                 crtc = connector->state->crtc;
4862                 if (crtc) {
4863                         ret = drm_modeset_lock(&crtc->mutex, ctx);
4864                         if (ret)
4865                                 return ret;
4866                 }
4867
4868                 status = intel_dp_long_pulse(intel_dp->attached_connector);
4869         }
4870
4871         intel_dp->detect_done = false;
4872
4873         return status;
4874 }
4875
4876 static void
4877 intel_dp_force(struct drm_connector *connector)
4878 {
4879         struct intel_dp *intel_dp = intel_attached_dp(connector);
4880         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4881         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4882
4883         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4884                       connector->base.id, connector->name);
4885         intel_dp_unset_edid(intel_dp);
4886
4887         if (connector->status != connector_status_connected)
4888                 return;
4889
4890         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4891
4892         intel_dp_set_edid(intel_dp);
4893
4894         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4895 }
4896
4897 static int intel_dp_get_modes(struct drm_connector *connector)
4898 {
4899         struct intel_connector *intel_connector = to_intel_connector(connector);
4900         struct edid *edid;
4901
4902         edid = intel_connector->detect_edid;
4903         if (edid) {
4904                 int ret = intel_connector_update_modes(connector, edid);
4905                 if (ret)
4906                         return ret;
4907         }
4908
4909         /* if eDP has no EDID, fall back to fixed mode */
4910         if (intel_dp_is_edp(intel_attached_dp(connector)) &&
4911             intel_connector->panel.fixed_mode) {
4912                 struct drm_display_mode *mode;
4913
4914                 mode = drm_mode_duplicate(connector->dev,
4915                                           intel_connector->panel.fixed_mode);
4916                 if (mode) {
4917                         drm_mode_probed_add(connector, mode);
4918                         return 1;
4919                 }
4920         }
4921
4922         return 0;
4923 }
4924
4925 static int
4926 intel_dp_connector_register(struct drm_connector *connector)
4927 {
4928         struct intel_dp *intel_dp = intel_attached_dp(connector);
4929         int ret;
4930
4931         ret = intel_connector_register(connector);
4932         if (ret)
4933                 return ret;
4934
4935         i915_debugfs_connector_add(connector);
4936
4937         DRM_DEBUG_KMS("registering %s bus for %s\n",
4938                       intel_dp->aux.name, connector->kdev->kobj.name);
4939
4940         intel_dp->aux.dev = connector->kdev;
4941         return drm_dp_aux_register(&intel_dp->aux);
4942 }
4943
4944 static void
4945 intel_dp_connector_unregister(struct drm_connector *connector)
4946 {
4947         drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4948         intel_connector_unregister(connector);
4949 }
4950
4951 static void
4952 intel_dp_connector_destroy(struct drm_connector *connector)
4953 {
4954         struct intel_connector *intel_connector = to_intel_connector(connector);
4955
4956         kfree(intel_connector->detect_edid);
4957
4958         if (!IS_ERR_OR_NULL(intel_connector->edid))
4959                 kfree(intel_connector->edid);
4960
4961         /*
4962          * Can't call intel_dp_is_edp() since the encoder may have been
4963          * destroyed already.
4964          */
4965         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4966                 intel_panel_fini(&intel_connector->panel);
4967
4968         drm_connector_cleanup(connector);
4969         kfree(connector);
4970 }
4971
4972 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4973 {
4974         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4975         struct intel_dp *intel_dp = &intel_dig_port->dp;
4976
4977         intel_dp_mst_encoder_cleanup(intel_dig_port);
4978         if (intel_dp_is_edp(intel_dp)) {
4979                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4980                 /*
4981                  * vdd might still be enabled do to the delayed vdd off.
4982                  * Make sure vdd is actually turned off here.
4983                  */
4984                 pps_lock(intel_dp);
4985                 edp_panel_vdd_off_sync(intel_dp);
4986                 pps_unlock(intel_dp);
4987
4988                 if (intel_dp->edp_notifier.notifier_call) {
4989                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4990                         intel_dp->edp_notifier.notifier_call = NULL;
4991                 }
4992         }
4993
4994         intel_dp_aux_fini(intel_dp);
4995
4996         drm_encoder_cleanup(encoder);
4997         kfree(intel_dig_port);
4998 }
4999
5000 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5001 {
5002         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5003
5004         if (!intel_dp_is_edp(intel_dp))
5005                 return;
5006
5007         /*
5008          * vdd might still be enabled do to the delayed vdd off.
5009          * Make sure vdd is actually turned off here.
5010          */
5011         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5012         pps_lock(intel_dp);
5013         edp_panel_vdd_off_sync(intel_dp);
5014         pps_unlock(intel_dp);
5015 }
5016
5017 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5018 {
5019         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5020
5021         lockdep_assert_held(&dev_priv->pps_mutex);
5022
5023         if (!edp_have_panel_vdd(intel_dp))
5024                 return;
5025
5026         /*
5027          * The VDD bit needs a power domain reference, so if the bit is
5028          * already enabled when we boot or resume, grab this reference and
5029          * schedule a vdd off, so we don't hold on to the reference
5030          * indefinitely.
5031          */
5032         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5033         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5034
5035         edp_panel_vdd_schedule_off(intel_dp);
5036 }
5037
5038 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
5039 {
5040         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5041
5042         if ((intel_dp->DP & DP_PORT_EN) == 0)
5043                 return INVALID_PIPE;
5044
5045         if (IS_CHERRYVIEW(dev_priv))
5046                 return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5047         else
5048                 return PORT_TO_PIPE(intel_dp->DP);
5049 }
5050
5051 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5052 {
5053         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5054         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5055         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5056
5057         if (!HAS_DDI(dev_priv))
5058                 intel_dp->DP = I915_READ(intel_dp->output_reg);
5059
5060         if (lspcon->active)
5061                 lspcon_resume(lspcon);
5062
5063         intel_dp->reset_link_params = true;
5064
5065         pps_lock(intel_dp);
5066
5067         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5068                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5069
5070         if (intel_dp_is_edp(intel_dp)) {
5071                 /* Reinit the power sequencer, in case BIOS did something with it. */
5072                 intel_dp_pps_init(intel_dp);
5073                 intel_edp_panel_vdd_sanitize(intel_dp);
5074         }
5075
5076         pps_unlock(intel_dp);
5077 }
5078
5079 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5080         .force = intel_dp_force,
5081         .fill_modes = drm_helper_probe_single_connector_modes,
5082         .atomic_get_property = intel_digital_connector_atomic_get_property,
5083         .atomic_set_property = intel_digital_connector_atomic_set_property,
5084         .late_register = intel_dp_connector_register,
5085         .early_unregister = intel_dp_connector_unregister,
5086         .destroy = intel_dp_connector_destroy,
5087         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5088         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
5089 };
5090
5091 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5092         .detect_ctx = intel_dp_detect,
5093         .get_modes = intel_dp_get_modes,
5094         .mode_valid = intel_dp_mode_valid,
5095         .atomic_check = intel_digital_connector_atomic_check,
5096 };
5097
5098 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5099         .reset = intel_dp_encoder_reset,
5100         .destroy = intel_dp_encoder_destroy,
5101 };
5102
5103 enum irqreturn
5104 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5105 {
5106         struct intel_dp *intel_dp = &intel_dig_port->dp;
5107         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5108         enum irqreturn ret = IRQ_NONE;
5109
5110         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5111                 /*
5112                  * vdd off can generate a long pulse on eDP which
5113                  * would require vdd on to handle it, and thus we
5114                  * would end up in an endless cycle of
5115                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5116                  */
5117                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5118                               port_name(intel_dig_port->base.port));
5119                 return IRQ_HANDLED;
5120         }
5121
5122         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5123                       port_name(intel_dig_port->base.port),
5124                       long_hpd ? "long" : "short");
5125
5126         if (long_hpd) {
5127                 intel_dp->reset_link_params = true;
5128                 intel_dp->detect_done = false;
5129                 return IRQ_NONE;
5130         }
5131
5132         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5133
5134         if (intel_dp->is_mst) {
5135                 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5136                         /*
5137                          * If we were in MST mode, and device is not
5138                          * there, get out of MST mode
5139                          */
5140                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5141                                       intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5142                         intel_dp->is_mst = false;
5143                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5144                                                         intel_dp->is_mst);
5145                         intel_dp->detect_done = false;
5146                         goto put_power;
5147                 }
5148         }
5149
5150         if (!intel_dp->is_mst) {
5151                 struct drm_modeset_acquire_ctx ctx;
5152                 struct drm_connector *connector = &intel_dp->attached_connector->base;
5153                 struct drm_crtc *crtc;
5154                 int iret;
5155                 bool handled = false;
5156
5157                 drm_modeset_acquire_init(&ctx, 0);
5158 retry:
5159                 iret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, &ctx);
5160                 if (iret)
5161                         goto err;
5162
5163                 crtc = connector->state->crtc;
5164                 if (crtc) {
5165                         iret = drm_modeset_lock(&crtc->mutex, &ctx);
5166                         if (iret)
5167                                 goto err;
5168                 }
5169
5170                 handled = intel_dp_short_pulse(intel_dp);
5171
5172 err:
5173                 if (iret == -EDEADLK) {
5174                         drm_modeset_backoff(&ctx);
5175                         goto retry;
5176                 }
5177
5178                 drm_modeset_drop_locks(&ctx);
5179                 drm_modeset_acquire_fini(&ctx);
5180                 WARN(iret, "Acquiring modeset locks failed with %i\n", iret);
5181
5182                 if (!handled) {
5183                         intel_dp->detect_done = false;
5184                         goto put_power;
5185                 }
5186         }
5187
5188         ret = IRQ_HANDLED;
5189
5190 put_power:
5191         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
5192
5193         return ret;
5194 }
5195
5196 /* check the VBT to see whether the eDP is on another port */
5197 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
5198 {
5199         /*
5200          * eDP not supported on g4x. so bail out early just
5201          * for a bit extra safety in case the VBT is bonkers.
5202          */
5203         if (INTEL_GEN(dev_priv) < 5)
5204                 return false;
5205
5206         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5207                 return true;
5208
5209         return intel_bios_is_port_edp(dev_priv, port);
5210 }
5211
5212 static void
5213 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5214 {
5215         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5216         enum port port = dp_to_dig_port(intel_dp)->base.port;
5217
5218         if (!IS_G4X(dev_priv) && port != PORT_A)
5219                 intel_attach_force_audio_property(connector);
5220
5221         intel_attach_broadcast_rgb_property(connector);
5222
5223         if (intel_dp_is_edp(intel_dp)) {
5224                 u32 allowed_scalers;
5225
5226                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
5227                 if (!HAS_GMCH_DISPLAY(dev_priv))
5228                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
5229
5230                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
5231
5232                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
5233
5234         }
5235 }
5236
5237 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5238 {
5239         intel_dp->panel_power_off_time = ktime_get_boottime();
5240         intel_dp->last_power_on = jiffies;
5241         intel_dp->last_backlight_off = jiffies;
5242 }
5243
5244 static void
5245 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
5246 {
5247         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5248         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5249         struct pps_registers regs;
5250
5251         intel_pps_get_registers(intel_dp, &regs);
5252
5253         /* Workaround: Need to write PP_CONTROL with the unlock key as
5254          * the very first thing. */
5255         pp_ctl = ironlake_get_pp_control(intel_dp);
5256
5257         pp_on = I915_READ(regs.pp_on);
5258         pp_off = I915_READ(regs.pp_off);
5259         if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
5260             !HAS_PCH_ICP(dev_priv)) {
5261                 I915_WRITE(regs.pp_ctrl, pp_ctl);
5262                 pp_div = I915_READ(regs.pp_div);
5263         }
5264
5265         /* Pull timing values out of registers */
5266         seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5267                      PANEL_POWER_UP_DELAY_SHIFT;
5268
5269         seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5270                   PANEL_LIGHT_ON_DELAY_SHIFT;
5271
5272         seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5273                   PANEL_LIGHT_OFF_DELAY_SHIFT;
5274
5275         seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5276                    PANEL_POWER_DOWN_DELAY_SHIFT;
5277
5278         if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5279             HAS_PCH_ICP(dev_priv)) {
5280                 seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5281                                 BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
5282         } else {
5283                 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5284                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5285         }
5286 }
5287
5288 static void
5289 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
5290 {
5291         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5292                       state_name,
5293                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
5294 }
5295
5296 static void
5297 intel_pps_verify_state(struct intel_dp *intel_dp)
5298 {
5299         struct edp_power_seq hw;
5300         struct edp_power_seq *sw = &intel_dp->pps_delays;
5301
5302         intel_pps_readout_hw_state(intel_dp, &hw);
5303
5304         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
5305             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
5306                 DRM_ERROR("PPS state mismatch\n");
5307                 intel_pps_dump_state("sw", sw);
5308                 intel_pps_dump_state("hw", &hw);
5309         }
5310 }
5311
5312 static void
5313 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
5314 {
5315         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5316         struct edp_power_seq cur, vbt, spec,
5317                 *final = &intel_dp->pps_delays;
5318
5319         lockdep_assert_held(&dev_priv->pps_mutex);
5320
5321         /* already initialized? */
5322         if (final->t11_t12 != 0)
5323                 return;
5324
5325         intel_pps_readout_hw_state(intel_dp, &cur);
5326
5327         intel_pps_dump_state("cur", &cur);
5328
5329         vbt = dev_priv->vbt.edp.pps;
5330         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
5331          * of 500ms appears to be too short. Ocassionally the panel
5332          * just fails to power back on. Increasing the delay to 800ms
5333          * seems sufficient to avoid this problem.
5334          */
5335         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
5336                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
5337                 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
5338                               vbt.t11_t12);
5339         }
5340         /* T11_T12 delay is special and actually in units of 100ms, but zero
5341          * based in the hw (so we need to add 100 ms). But the sw vbt
5342          * table multiplies it with 1000 to make it in units of 100usec,
5343          * too. */
5344         vbt.t11_t12 += 100 * 10;
5345
5346         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5347          * our hw here, which are all in 100usec. */
5348         spec.t1_t3 = 210 * 10;
5349         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5350         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5351         spec.t10 = 500 * 10;
5352         /* This one is special and actually in units of 100ms, but zero
5353          * based in the hw (so we need to add 100 ms). But the sw vbt
5354          * table multiplies it with 1000 to make it in units of 100usec,
5355          * too. */
5356         spec.t11_t12 = (510 + 100) * 10;
5357
5358         intel_pps_dump_state("vbt", &vbt);
5359
5360         /* Use the max of the register settings and vbt. If both are
5361          * unset, fall back to the spec limits. */
5362 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5363                                        spec.field : \
5364                                        max(cur.field, vbt.field))
5365         assign_final(t1_t3);
5366         assign_final(t8);
5367         assign_final(t9);
5368         assign_final(t10);
5369         assign_final(t11_t12);
5370 #undef assign_final
5371
5372 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5373         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5374         intel_dp->backlight_on_delay = get_delay(t8);
5375         intel_dp->backlight_off_delay = get_delay(t9);
5376         intel_dp->panel_power_down_delay = get_delay(t10);
5377         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5378 #undef get_delay
5379
5380         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5381                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5382                       intel_dp->panel_power_cycle_delay);
5383
5384         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5385                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5386
5387         /*
5388          * We override the HW backlight delays to 1 because we do manual waits
5389          * on them. For T8, even BSpec recommends doing it. For T9, if we
5390          * don't do this, we'll end up waiting for the backlight off delay
5391          * twice: once when we do the manual sleep, and once when we disable
5392          * the panel and wait for the PP_STATUS bit to become zero.
5393          */
5394         final->t8 = 1;
5395         final->t9 = 1;
5396
5397         /*
5398          * HW has only a 100msec granularity for t11_t12 so round it up
5399          * accordingly.
5400          */
5401         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
5402 }
5403
5404 static void
5405 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5406                                               bool force_disable_vdd)
5407 {
5408         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5409         u32 pp_on, pp_off, pp_div, port_sel = 0;
5410         int div = dev_priv->rawclk_freq / 1000;
5411         struct pps_registers regs;
5412         enum port port = dp_to_dig_port(intel_dp)->base.port;
5413         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5414
5415         lockdep_assert_held(&dev_priv->pps_mutex);
5416
5417         intel_pps_get_registers(intel_dp, &regs);
5418
5419         /*
5420          * On some VLV machines the BIOS can leave the VDD
5421          * enabled even on power seqeuencers which aren't
5422          * hooked up to any port. This would mess up the
5423          * power domain tracking the first time we pick
5424          * one of these power sequencers for use since
5425          * edp_panel_vdd_on() would notice that the VDD was
5426          * already on and therefore wouldn't grab the power
5427          * domain reference. Disable VDD first to avoid this.
5428          * This also avoids spuriously turning the VDD on as
5429          * soon as the new power seqeuencer gets initialized.
5430          */
5431         if (force_disable_vdd) {
5432                 u32 pp = ironlake_get_pp_control(intel_dp);
5433
5434                 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5435
5436                 if (pp & EDP_FORCE_VDD)
5437                         DRM_DEBUG_KMS("VDD already on, disabling first\n");
5438
5439                 pp &= ~EDP_FORCE_VDD;
5440
5441                 I915_WRITE(regs.pp_ctrl, pp);
5442         }
5443
5444         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5445                 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5446         pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5447                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5448         /* Compute the divisor for the pp clock, simply match the Bspec
5449          * formula. */
5450         if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5451             HAS_PCH_ICP(dev_priv)) {
5452                 pp_div = I915_READ(regs.pp_ctrl);
5453                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5454                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5455                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5456         } else {
5457                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5458                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5459                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5460         }
5461
5462         /* Haswell doesn't have any port selection bits for the panel
5463          * power sequencer any more. */
5464         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5465                 port_sel = PANEL_PORT_SELECT_VLV(port);
5466         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5467                 if (port == PORT_A)
5468                         port_sel = PANEL_PORT_SELECT_DPA;
5469                 else
5470                         port_sel = PANEL_PORT_SELECT_DPD;
5471         }
5472
5473         pp_on |= port_sel;
5474
5475         I915_WRITE(regs.pp_on, pp_on);
5476         I915_WRITE(regs.pp_off, pp_off);
5477         if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5478             HAS_PCH_ICP(dev_priv))
5479                 I915_WRITE(regs.pp_ctrl, pp_div);
5480         else
5481                 I915_WRITE(regs.pp_div, pp_div);
5482
5483         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5484                       I915_READ(regs.pp_on),
5485                       I915_READ(regs.pp_off),
5486                       (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)  ||
5487                        HAS_PCH_ICP(dev_priv)) ?
5488                       (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5489                       I915_READ(regs.pp_div));
5490 }
5491
5492 static void intel_dp_pps_init(struct intel_dp *intel_dp)
5493 {
5494         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5495
5496         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5497                 vlv_initial_power_sequencer_setup(intel_dp);
5498         } else {
5499                 intel_dp_init_panel_power_sequencer(intel_dp);
5500                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
5501         }
5502 }
5503
5504 /**
5505  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5506  * @dev_priv: i915 device
5507  * @crtc_state: a pointer to the active intel_crtc_state
5508  * @refresh_rate: RR to be programmed
5509  *
5510  * This function gets called when refresh rate (RR) has to be changed from
5511  * one frequency to another. Switches can be between high and low RR
5512  * supported by the panel or to any other RR based on media playback (in
5513  * this case, RR value needs to be passed from user space).
5514  *
5515  * The caller of this function needs to take a lock on dev_priv->drrs.
5516  */
5517 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5518                                     const struct intel_crtc_state *crtc_state,
5519                                     int refresh_rate)
5520 {
5521         struct intel_encoder *encoder;
5522         struct intel_digital_port *dig_port = NULL;
5523         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5524         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5525         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5526
5527         if (refresh_rate <= 0) {
5528                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5529                 return;
5530         }
5531
5532         if (intel_dp == NULL) {
5533                 DRM_DEBUG_KMS("DRRS not supported.\n");
5534                 return;
5535         }
5536
5537         dig_port = dp_to_dig_port(intel_dp);
5538         encoder = &dig_port->base;
5539
5540         if (!intel_crtc) {
5541                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5542                 return;
5543         }
5544
5545         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5546                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5547                 return;
5548         }
5549
5550         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5551                         refresh_rate)
5552                 index = DRRS_LOW_RR;
5553
5554         if (index == dev_priv->drrs.refresh_rate_type) {
5555                 DRM_DEBUG_KMS(
5556                         "DRRS requested for previously set RR...ignoring\n");
5557                 return;
5558         }
5559
5560         if (!crtc_state->base.active) {
5561                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5562                 return;
5563         }
5564
5565         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5566                 switch (index) {
5567                 case DRRS_HIGH_RR:
5568                         intel_dp_set_m_n(intel_crtc, M1_N1);
5569                         break;
5570                 case DRRS_LOW_RR:
5571                         intel_dp_set_m_n(intel_crtc, M2_N2);
5572                         break;
5573                 case DRRS_MAX_RR:
5574                 default:
5575                         DRM_ERROR("Unsupported refreshrate type\n");
5576                 }
5577         } else if (INTEL_GEN(dev_priv) > 6) {
5578                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5579                 u32 val;
5580
5581                 val = I915_READ(reg);
5582                 if (index > DRRS_HIGH_RR) {
5583                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5584                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5585                         else
5586                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5587                 } else {
5588                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5589                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5590                         else
5591                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5592                 }
5593                 I915_WRITE(reg, val);
5594         }
5595
5596         dev_priv->drrs.refresh_rate_type = index;
5597
5598         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5599 }
5600
5601 /**
5602  * intel_edp_drrs_enable - init drrs struct if supported
5603  * @intel_dp: DP struct
5604  * @crtc_state: A pointer to the active crtc state.
5605  *
5606  * Initializes frontbuffer_bits and drrs.dp
5607  */
5608 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5609                            const struct intel_crtc_state *crtc_state)
5610 {
5611         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5612
5613         if (!crtc_state->has_drrs) {
5614                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5615                 return;
5616         }
5617
5618         if (dev_priv->psr.enabled) {
5619                 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
5620                 return;
5621         }
5622
5623         mutex_lock(&dev_priv->drrs.mutex);
5624         if (WARN_ON(dev_priv->drrs.dp)) {
5625                 DRM_ERROR("DRRS already enabled\n");
5626                 goto unlock;
5627         }
5628
5629         dev_priv->drrs.busy_frontbuffer_bits = 0;
5630
5631         dev_priv->drrs.dp = intel_dp;
5632
5633 unlock:
5634         mutex_unlock(&dev_priv->drrs.mutex);
5635 }
5636
5637 /**
5638  * intel_edp_drrs_disable - Disable DRRS
5639  * @intel_dp: DP struct
5640  * @old_crtc_state: Pointer to old crtc_state.
5641  *
5642  */
5643 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5644                             const struct intel_crtc_state *old_crtc_state)
5645 {
5646         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5647
5648         if (!old_crtc_state->has_drrs)
5649                 return;
5650
5651         mutex_lock(&dev_priv->drrs.mutex);
5652         if (!dev_priv->drrs.dp) {
5653                 mutex_unlock(&dev_priv->drrs.mutex);
5654                 return;
5655         }
5656
5657         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5658                 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5659                         intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5660
5661         dev_priv->drrs.dp = NULL;
5662         mutex_unlock(&dev_priv->drrs.mutex);
5663
5664         cancel_delayed_work_sync(&dev_priv->drrs.work);
5665 }
5666
5667 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5668 {
5669         struct drm_i915_private *dev_priv =
5670                 container_of(work, typeof(*dev_priv), drrs.work.work);
5671         struct intel_dp *intel_dp;
5672
5673         mutex_lock(&dev_priv->drrs.mutex);
5674
5675         intel_dp = dev_priv->drrs.dp;
5676
5677         if (!intel_dp)
5678                 goto unlock;
5679
5680         /*
5681          * The delayed work can race with an invalidate hence we need to
5682          * recheck.
5683          */
5684
5685         if (dev_priv->drrs.busy_frontbuffer_bits)
5686                 goto unlock;
5687
5688         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5689                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5690
5691                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5692                         intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5693         }
5694
5695 unlock:
5696         mutex_unlock(&dev_priv->drrs.mutex);
5697 }
5698
5699 /**
5700  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5701  * @dev_priv: i915 device
5702  * @frontbuffer_bits: frontbuffer plane tracking bits
5703  *
5704  * This function gets called everytime rendering on the given planes start.
5705  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5706  *
5707  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5708  */
5709 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5710                                unsigned int frontbuffer_bits)
5711 {
5712         struct drm_crtc *crtc;
5713         enum pipe pipe;
5714
5715         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5716                 return;
5717
5718         cancel_delayed_work(&dev_priv->drrs.work);
5719
5720         mutex_lock(&dev_priv->drrs.mutex);
5721         if (!dev_priv->drrs.dp) {
5722                 mutex_unlock(&dev_priv->drrs.mutex);
5723                 return;
5724         }
5725
5726         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5727         pipe = to_intel_crtc(crtc)->pipe;
5728
5729         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5730         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5731
5732         /* invalidate means busy screen hence upclock */
5733         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5734                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5735                         dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5736
5737         mutex_unlock(&dev_priv->drrs.mutex);
5738 }
5739
5740 /**
5741  * intel_edp_drrs_flush - Restart Idleness DRRS
5742  * @dev_priv: i915 device
5743  * @frontbuffer_bits: frontbuffer plane tracking bits
5744  *
5745  * This function gets called every time rendering on the given planes has
5746  * completed or flip on a crtc is completed. So DRRS should be upclocked
5747  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5748  * if no other planes are dirty.
5749  *
5750  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5751  */
5752 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5753                           unsigned int frontbuffer_bits)
5754 {
5755         struct drm_crtc *crtc;
5756         enum pipe pipe;
5757
5758         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5759                 return;
5760
5761         cancel_delayed_work(&dev_priv->drrs.work);
5762
5763         mutex_lock(&dev_priv->drrs.mutex);
5764         if (!dev_priv->drrs.dp) {
5765                 mutex_unlock(&dev_priv->drrs.mutex);
5766                 return;
5767         }
5768
5769         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5770         pipe = to_intel_crtc(crtc)->pipe;
5771
5772         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5773         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5774
5775         /* flush means busy screen hence upclock */
5776         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5777                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5778                                 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5779
5780         /*
5781          * flush also means no more activity hence schedule downclock, if all
5782          * other fbs are quiescent too
5783          */
5784         if (!dev_priv->drrs.busy_frontbuffer_bits)
5785                 schedule_delayed_work(&dev_priv->drrs.work,
5786                                 msecs_to_jiffies(1000));
5787         mutex_unlock(&dev_priv->drrs.mutex);
5788 }
5789
5790 /**
5791  * DOC: Display Refresh Rate Switching (DRRS)
5792  *
5793  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5794  * which enables swtching between low and high refresh rates,
5795  * dynamically, based on the usage scenario. This feature is applicable
5796  * for internal panels.
5797  *
5798  * Indication that the panel supports DRRS is given by the panel EDID, which
5799  * would list multiple refresh rates for one resolution.
5800  *
5801  * DRRS is of 2 types - static and seamless.
5802  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5803  * (may appear as a blink on screen) and is used in dock-undock scenario.
5804  * Seamless DRRS involves changing RR without any visual effect to the user
5805  * and can be used during normal system usage. This is done by programming
5806  * certain registers.
5807  *
5808  * Support for static/seamless DRRS may be indicated in the VBT based on
5809  * inputs from the panel spec.
5810  *
5811  * DRRS saves power by switching to low RR based on usage scenarios.
5812  *
5813  * The implementation is based on frontbuffer tracking implementation.  When
5814  * there is a disturbance on the screen triggered by user activity or a periodic
5815  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5816  * no movement on screen, after a timeout of 1 second, a switch to low RR is
5817  * made.
5818  *
5819  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5820  * and intel_edp_drrs_flush() are called.
5821  *
5822  * DRRS can be further extended to support other internal panels and also
5823  * the scenario of video playback wherein RR is set based on the rate
5824  * requested by userspace.
5825  */
5826
5827 /**
5828  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5829  * @connector: eDP connector
5830  * @fixed_mode: preferred mode of panel
5831  *
5832  * This function is  called only once at driver load to initialize basic
5833  * DRRS stuff.
5834  *
5835  * Returns:
5836  * Downclock mode if panel supports it, else return NULL.
5837  * DRRS support is determined by the presence of downclock mode (apart
5838  * from VBT setting).
5839  */
5840 static struct drm_display_mode *
5841 intel_dp_drrs_init(struct intel_connector *connector,
5842                    struct drm_display_mode *fixed_mode)
5843 {
5844         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
5845         struct drm_display_mode *downclock_mode = NULL;
5846
5847         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5848         mutex_init(&dev_priv->drrs.mutex);
5849
5850         if (INTEL_GEN(dev_priv) <= 6) {
5851                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5852                 return NULL;
5853         }
5854
5855         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5856                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5857                 return NULL;
5858         }
5859
5860         downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
5861                                                     &connector->base);
5862
5863         if (!downclock_mode) {
5864                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5865                 return NULL;
5866         }
5867
5868         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5869
5870         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5871         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5872         return downclock_mode;
5873 }
5874
5875 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5876                                      struct intel_connector *intel_connector)
5877 {
5878         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5879         struct drm_i915_private *dev_priv = to_i915(dev);
5880         struct drm_connector *connector = &intel_connector->base;
5881         struct drm_display_mode *fixed_mode = NULL;
5882         struct drm_display_mode *alt_fixed_mode = NULL;
5883         struct drm_display_mode *downclock_mode = NULL;
5884         bool has_dpcd;
5885         struct drm_display_mode *scan;
5886         struct edid *edid;
5887         enum pipe pipe = INVALID_PIPE;
5888
5889         if (!intel_dp_is_edp(intel_dp))
5890                 return true;
5891
5892         /*
5893          * On IBX/CPT we may get here with LVDS already registered. Since the
5894          * driver uses the only internal power sequencer available for both
5895          * eDP and LVDS bail out early in this case to prevent interfering
5896          * with an already powered-on LVDS power sequencer.
5897          */
5898         if (intel_get_lvds_encoder(&dev_priv->drm)) {
5899                 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5900                 DRM_INFO("LVDS was detected, not registering eDP\n");
5901
5902                 return false;
5903         }
5904
5905         pps_lock(intel_dp);
5906
5907         intel_dp_init_panel_power_timestamps(intel_dp);
5908         intel_dp_pps_init(intel_dp);
5909         intel_edp_panel_vdd_sanitize(intel_dp);
5910
5911         pps_unlock(intel_dp);
5912
5913         /* Cache DPCD and EDID for edp. */
5914         has_dpcd = intel_edp_init_dpcd(intel_dp);
5915
5916         if (!has_dpcd) {
5917                 /* if this fails, presume the device is a ghost */
5918                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5919                 goto out_vdd_off;
5920         }
5921
5922         mutex_lock(&dev->mode_config.mutex);
5923         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5924         if (edid) {
5925                 if (drm_add_edid_modes(connector, edid)) {
5926                         drm_mode_connector_update_edid_property(connector,
5927                                                                 edid);
5928                 } else {
5929                         kfree(edid);
5930                         edid = ERR_PTR(-EINVAL);
5931                 }
5932         } else {
5933                 edid = ERR_PTR(-ENOENT);
5934         }
5935         intel_connector->edid = edid;
5936
5937         /* prefer fixed mode from EDID if available, save an alt mode also */
5938         list_for_each_entry(scan, &connector->probed_modes, head) {
5939                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5940                         fixed_mode = drm_mode_duplicate(dev, scan);
5941                         downclock_mode = intel_dp_drrs_init(
5942                                                 intel_connector, fixed_mode);
5943                 } else if (!alt_fixed_mode) {
5944                         alt_fixed_mode = drm_mode_duplicate(dev, scan);
5945                 }
5946         }
5947
5948         /* fallback to VBT if available for eDP */
5949         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5950                 fixed_mode = drm_mode_duplicate(dev,
5951                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5952                 if (fixed_mode) {
5953                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5954                         connector->display_info.width_mm = fixed_mode->width_mm;
5955                         connector->display_info.height_mm = fixed_mode->height_mm;
5956                 }
5957         }
5958         mutex_unlock(&dev->mode_config.mutex);
5959
5960         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5961                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5962                 register_reboot_notifier(&intel_dp->edp_notifier);
5963
5964                 /*
5965                  * Figure out the current pipe for the initial backlight setup.
5966                  * If the current pipe isn't valid, try the PPS pipe, and if that
5967                  * fails just assume pipe A.
5968                  */
5969                 pipe = vlv_active_pipe(intel_dp);
5970
5971                 if (pipe != PIPE_A && pipe != PIPE_B)
5972                         pipe = intel_dp->pps_pipe;
5973
5974                 if (pipe != PIPE_A && pipe != PIPE_B)
5975                         pipe = PIPE_A;
5976
5977                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5978                               pipe_name(pipe));
5979         }
5980
5981         intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode,
5982                          downclock_mode);
5983         intel_connector->panel.backlight.power = intel_edp_backlight_power;
5984         intel_panel_setup_backlight(connector, pipe);
5985
5986         return true;
5987
5988 out_vdd_off:
5989         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5990         /*
5991          * vdd might still be enabled do to the delayed vdd off.
5992          * Make sure vdd is actually turned off here.
5993          */
5994         pps_lock(intel_dp);
5995         edp_panel_vdd_off_sync(intel_dp);
5996         pps_unlock(intel_dp);
5997
5998         return false;
5999 }
6000
6001 /* Set up the hotplug pin and aux power domain. */
6002 static void
6003 intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
6004 {
6005         struct intel_encoder *encoder = &intel_dig_port->base;
6006         struct intel_dp *intel_dp = &intel_dig_port->dp;
6007         struct intel_encoder *intel_encoder = &intel_dig_port->base;
6008         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
6009
6010         encoder->hpd_pin = intel_hpd_pin_default(dev_priv, encoder->port);
6011
6012         switch (encoder->port) {
6013         case PORT_A:
6014                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
6015                 break;
6016         case PORT_B:
6017                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
6018                 break;
6019         case PORT_C:
6020                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
6021                 break;
6022         case PORT_D:
6023                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
6024                 break;
6025         case PORT_E:
6026                 /* FIXME: Check VBT for actual wiring of PORT E */
6027                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
6028                 break;
6029         case PORT_F:
6030                 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_F;
6031                 break;
6032         default:
6033                 MISSING_CASE(encoder->port);
6034         }
6035 }
6036
6037 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
6038 {
6039         struct intel_connector *intel_connector;
6040         struct drm_connector *connector;
6041
6042         intel_connector = container_of(work, typeof(*intel_connector),
6043                                        modeset_retry_work);
6044         connector = &intel_connector->base;
6045         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
6046                       connector->name);
6047
6048         /* Grab the locks before changing connector property*/
6049         mutex_lock(&connector->dev->mode_config.mutex);
6050         /* Set connector link status to BAD and send a Uevent to notify
6051          * userspace to do a modeset.
6052          */
6053         drm_mode_connector_set_link_status_property(connector,
6054                                                     DRM_MODE_LINK_STATUS_BAD);
6055         mutex_unlock(&connector->dev->mode_config.mutex);
6056         /* Send Hotplug uevent so userspace can reprobe */
6057         drm_kms_helper_hotplug_event(connector->dev);
6058 }
6059
6060 bool
6061 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6062                         struct intel_connector *intel_connector)
6063 {
6064         struct drm_connector *connector = &intel_connector->base;
6065         struct intel_dp *intel_dp = &intel_dig_port->dp;
6066         struct intel_encoder *intel_encoder = &intel_dig_port->base;
6067         struct drm_device *dev = intel_encoder->base.dev;
6068         struct drm_i915_private *dev_priv = to_i915(dev);
6069         enum port port = intel_encoder->port;
6070         int type;
6071
6072         /* Initialize the work for modeset in case of link train failure */
6073         INIT_WORK(&intel_connector->modeset_retry_work,
6074                   intel_dp_modeset_retry_work_fn);
6075
6076         if (WARN(intel_dig_port->max_lanes < 1,
6077                  "Not enough lanes (%d) for DP on port %c\n",
6078                  intel_dig_port->max_lanes, port_name(port)))
6079                 return false;
6080
6081         intel_dp_set_source_rates(intel_dp);
6082
6083         intel_dp->reset_link_params = true;
6084         intel_dp->pps_pipe = INVALID_PIPE;
6085         intel_dp->active_pipe = INVALID_PIPE;
6086
6087         /* intel_dp vfuncs */
6088         if (INTEL_GEN(dev_priv) >= 9)
6089                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6090         else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6091                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6092         else if (HAS_PCH_SPLIT(dev_priv))
6093                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6094         else
6095                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
6096
6097         if (INTEL_GEN(dev_priv) >= 9)
6098                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6099         else
6100                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
6101
6102         if (HAS_DDI(dev_priv))
6103                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
6104
6105         /* Preserve the current hw state. */
6106         intel_dp->DP = I915_READ(intel_dp->output_reg);
6107         intel_dp->attached_connector = intel_connector;
6108
6109         if (intel_dp_is_port_edp(dev_priv, port))
6110                 type = DRM_MODE_CONNECTOR_eDP;
6111         else
6112                 type = DRM_MODE_CONNECTOR_DisplayPort;
6113
6114         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6115                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6116
6117         /*
6118          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6119          * for DP the encoder type can be set by the caller to
6120          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6121          */
6122         if (type == DRM_MODE_CONNECTOR_eDP)
6123                 intel_encoder->type = INTEL_OUTPUT_EDP;
6124
6125         /* eDP only on port B and/or C on vlv/chv */
6126         if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6127                     intel_dp_is_edp(intel_dp) &&
6128                     port != PORT_B && port != PORT_C))
6129                 return false;
6130
6131         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6132                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6133                         port_name(port));
6134
6135         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6136         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6137
6138         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6139                 connector->interlace_allowed = true;
6140         connector->doublescan_allowed = 0;
6141
6142         intel_dp_init_connector_port_info(intel_dig_port);
6143
6144         intel_dp_aux_init(intel_dp);
6145
6146         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6147                           edp_panel_vdd_work);
6148
6149         intel_connector_attach_encoder(intel_connector, intel_encoder);
6150
6151         if (HAS_DDI(dev_priv))
6152                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6153         else
6154                 intel_connector->get_hw_state = intel_connector_get_hw_state;
6155
6156         /* init MST on ports that can support it */
6157         if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
6158             (port == PORT_B || port == PORT_C ||
6159              port == PORT_D || port == PORT_F))
6160                 intel_dp_mst_encoder_init(intel_dig_port,
6161                                           intel_connector->base.base.id);
6162
6163         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6164                 intel_dp_aux_fini(intel_dp);
6165                 intel_dp_mst_encoder_cleanup(intel_dig_port);
6166                 goto fail;
6167         }
6168
6169         intel_dp_add_properties(intel_dp, connector);
6170
6171         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6172          * 0xd.  Failure to do so will result in spurious interrupts being
6173          * generated on the port when a cable is not attached.
6174          */
6175         if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
6176                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6177                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6178         }
6179
6180         return true;
6181
6182 fail:
6183         drm_connector_cleanup(connector);
6184
6185         return false;
6186 }
6187
6188 bool intel_dp_init(struct drm_i915_private *dev_priv,
6189                    i915_reg_t output_reg,
6190                    enum port port)
6191 {
6192         struct intel_digital_port *intel_dig_port;
6193         struct intel_encoder *intel_encoder;
6194         struct drm_encoder *encoder;
6195         struct intel_connector *intel_connector;
6196
6197         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6198         if (!intel_dig_port)
6199                 return false;
6200
6201         intel_connector = intel_connector_alloc();
6202         if (!intel_connector)
6203                 goto err_connector_alloc;
6204
6205         intel_encoder = &intel_dig_port->base;
6206         encoder = &intel_encoder->base;
6207
6208         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
6209                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
6210                              "DP %c", port_name(port)))
6211                 goto err_encoder_init;
6212
6213         intel_encoder->compute_config = intel_dp_compute_config;
6214         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6215         intel_encoder->get_config = intel_dp_get_config;
6216         intel_encoder->suspend = intel_dp_encoder_suspend;
6217         if (IS_CHERRYVIEW(dev_priv)) {
6218                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6219                 intel_encoder->pre_enable = chv_pre_enable_dp;
6220                 intel_encoder->enable = vlv_enable_dp;
6221                 intel_encoder->disable = vlv_disable_dp;
6222                 intel_encoder->post_disable = chv_post_disable_dp;
6223                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6224         } else if (IS_VALLEYVIEW(dev_priv)) {
6225                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6226                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6227                 intel_encoder->enable = vlv_enable_dp;
6228                 intel_encoder->disable = vlv_disable_dp;
6229                 intel_encoder->post_disable = vlv_post_disable_dp;
6230         } else if (INTEL_GEN(dev_priv) >= 5) {
6231                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6232                 intel_encoder->enable = g4x_enable_dp;
6233                 intel_encoder->disable = ilk_disable_dp;
6234                 intel_encoder->post_disable = ilk_post_disable_dp;
6235         } else {
6236                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6237                 intel_encoder->enable = g4x_enable_dp;
6238                 intel_encoder->disable = g4x_disable_dp;
6239         }
6240
6241         intel_dig_port->dp.output_reg = output_reg;
6242         intel_dig_port->max_lanes = 4;
6243
6244         intel_encoder->type = INTEL_OUTPUT_DP;
6245         intel_encoder->power_domain = intel_port_to_power_domain(port);
6246         if (IS_CHERRYVIEW(dev_priv)) {
6247                 if (port == PORT_D)
6248                         intel_encoder->crtc_mask = 1 << 2;
6249                 else
6250                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6251         } else {
6252                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6253         }
6254         intel_encoder->cloneable = 0;
6255         intel_encoder->port = port;
6256
6257         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6258         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6259
6260         if (port != PORT_A)
6261                 intel_infoframe_init(intel_dig_port);
6262
6263         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6264                 goto err_init_connector;
6265
6266         return true;
6267
6268 err_init_connector:
6269         drm_encoder_cleanup(encoder);
6270 err_encoder_init:
6271         kfree(intel_connector);
6272 err_connector_alloc:
6273         kfree(intel_dig_port);
6274         return false;
6275 }
6276
6277 void intel_dp_mst_suspend(struct drm_device *dev)
6278 {
6279         struct drm_i915_private *dev_priv = to_i915(dev);
6280         int i;
6281
6282         /* disable MST */
6283         for (i = 0; i < I915_MAX_PORTS; i++) {
6284                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6285
6286                 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6287                         continue;
6288
6289                 if (intel_dig_port->dp.is_mst)
6290                         drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6291         }
6292 }
6293
6294 void intel_dp_mst_resume(struct drm_device *dev)
6295 {
6296         struct drm_i915_private *dev_priv = to_i915(dev);
6297         int i;
6298
6299         for (i = 0; i < I915_MAX_PORTS; i++) {
6300                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6301                 int ret;
6302
6303                 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6304                         continue;
6305
6306                 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6307                 if (ret)
6308                         intel_dp_check_mst_status(&intel_dig_port->dp);
6309         }
6310 }