drm/i915: Prefer encoder->name over port_name()
[linux-2.6-block.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34
35 #include <asm/byteorder.h>
36
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
44
45 #include "i915_debugfs.h"
46 #include "i915_drv.h"
47 #include "i915_trace.h"
48 #include "intel_atomic.h"
49 #include "intel_audio.h"
50 #include "intel_connector.h"
51 #include "intel_ddi.h"
52 #include "intel_display_types.h"
53 #include "intel_dp.h"
54 #include "intel_dp_link_training.h"
55 #include "intel_dp_mst.h"
56 #include "intel_dpio_phy.h"
57 #include "intel_fifo_underrun.h"
58 #include "intel_hdcp.h"
59 #include "intel_hdmi.h"
60 #include "intel_hotplug.h"
61 #include "intel_lspcon.h"
62 #include "intel_lvds.h"
63 #include "intel_panel.h"
64 #include "intel_psr.h"
65 #include "intel_sideband.h"
66 #include "intel_tc.h"
67 #include "intel_vdsc.h"
68
69 #define DP_DPRX_ESI_LEN 14
70
71 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
72 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER      61440
73
74 /* DP DSC throughput values used for slice count calculations KPixels/s */
75 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
76 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
77 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
78
79 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
80 #define DP_DSC_FEC_OVERHEAD_FACTOR              976
81
82 /* Compliance test status bits  */
83 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
84 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
85 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
86 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
87
88 struct dp_link_dpll {
89         int clock;
90         struct dpll dpll;
91 };
92
93 static const struct dp_link_dpll g4x_dpll[] = {
94         { 162000,
95                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
96         { 270000,
97                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
98 };
99
100 static const struct dp_link_dpll pch_dpll[] = {
101         { 162000,
102                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
103         { 270000,
104                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
105 };
106
107 static const struct dp_link_dpll vlv_dpll[] = {
108         { 162000,
109                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
110         { 270000,
111                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
112 };
113
114 /*
115  * CHV supports eDP 1.4 that have  more link rates.
116  * Below only provides the fixed rate but exclude variable rate.
117  */
118 static const struct dp_link_dpll chv_dpll[] = {
119         /*
120          * CHV requires to program fractional division for m2.
121          * m2 is stored in fixed point format using formula below
122          * (m2_int << 22) | m2_fraction
123          */
124         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
125                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
126         { 270000,       /* m2_int = 27, m2_fraction = 0 */
127                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
128 };
129
130 /* Constants for DP DSC configurations */
131 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
132
133 /* With Single pipe configuration, HW is capable of supporting maximum
134  * of 4 slices per line.
135  */
136 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
137
138 /**
139  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
140  * @intel_dp: DP struct
141  *
142  * If a CPU or PCH DP output is attached to an eDP panel, this function
143  * will return true, and false otherwise.
144  */
145 bool intel_dp_is_edp(struct intel_dp *intel_dp)
146 {
147         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
148
149         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
150 }
151
152 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
153 {
154         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
155 }
156
157 static void intel_dp_link_down(struct intel_encoder *encoder,
158                                const struct intel_crtc_state *old_crtc_state);
159 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
160 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
161 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
162                                            const struct intel_crtc_state *crtc_state);
163 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
164                                       enum pipe pipe);
165 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
166
167 /* update sink rates from dpcd */
168 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
169 {
170         static const int dp_rates[] = {
171                 162000, 270000, 540000, 810000
172         };
173         int i, max_rate;
174
175         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
176
177         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
178                 if (dp_rates[i] > max_rate)
179                         break;
180                 intel_dp->sink_rates[i] = dp_rates[i];
181         }
182
183         intel_dp->num_sink_rates = i;
184 }
185
186 /* Get length of rates array potentially limited by max_rate. */
187 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
188 {
189         int i;
190
191         /* Limit results by potentially reduced max rate */
192         for (i = 0; i < len; i++) {
193                 if (rates[len - i - 1] <= max_rate)
194                         return len - i;
195         }
196
197         return 0;
198 }
199
200 /* Get length of common rates array potentially limited by max_rate. */
201 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
202                                           int max_rate)
203 {
204         return intel_dp_rate_limit_len(intel_dp->common_rates,
205                                        intel_dp->num_common_rates, max_rate);
206 }
207
208 /* Theoretical max between source and sink */
209 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
210 {
211         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
212 }
213
214 /* Theoretical max between source and sink */
215 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
216 {
217         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
218         int source_max = intel_dig_port->max_lanes;
219         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
220         int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
221
222         return min3(source_max, sink_max, fia_max);
223 }
224
225 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
226 {
227         return intel_dp->max_link_lane_count;
228 }
229
230 int
231 intel_dp_link_required(int pixel_clock, int bpp)
232 {
233         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
234         return DIV_ROUND_UP(pixel_clock * bpp, 8);
235 }
236
237 int
238 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
239 {
240         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
241          * link rate that is generally expressed in Gbps. Since, 8 bits of data
242          * is transmitted every LS_Clk per lane, there is no need to account for
243          * the channel encoding that is done in the PHY layer here.
244          */
245
246         return max_link_clock * max_lanes;
247 }
248
249 static int
250 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
251 {
252         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253         struct intel_encoder *encoder = &intel_dig_port->base;
254         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
255         int max_dotclk = dev_priv->max_dotclk_freq;
256         int ds_max_dotclk;
257
258         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
259
260         if (type != DP_DS_PORT_TYPE_VGA)
261                 return max_dotclk;
262
263         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
264                                                     intel_dp->downstream_ports);
265
266         if (ds_max_dotclk != 0)
267                 max_dotclk = min(max_dotclk, ds_max_dotclk);
268
269         return max_dotclk;
270 }
271
272 static int cnl_max_source_rate(struct intel_dp *intel_dp)
273 {
274         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
275         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
276         enum port port = dig_port->base.port;
277
278         u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
279
280         /* Low voltage SKUs are limited to max of 5.4G */
281         if (voltage == VOLTAGE_INFO_0_85V)
282                 return 540000;
283
284         /* For this SKU 8.1G is supported in all ports */
285         if (IS_CNL_WITH_PORT_F(dev_priv))
286                 return 810000;
287
288         /* For other SKUs, max rate on ports A and D is 5.4G */
289         if (port == PORT_A || port == PORT_D)
290                 return 540000;
291
292         return 810000;
293 }
294
295 static int icl_max_source_rate(struct intel_dp *intel_dp)
296 {
297         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
298         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
299         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
300
301         if (intel_phy_is_combo(dev_priv, phy) &&
302             !IS_ELKHARTLAKE(dev_priv) &&
303             !intel_dp_is_edp(intel_dp))
304                 return 540000;
305
306         return 810000;
307 }
308
309 static void
310 intel_dp_set_source_rates(struct intel_dp *intel_dp)
311 {
312         /* The values must be in increasing order */
313         static const int cnl_rates[] = {
314                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
315         };
316         static const int bxt_rates[] = {
317                 162000, 216000, 243000, 270000, 324000, 432000, 540000
318         };
319         static const int skl_rates[] = {
320                 162000, 216000, 270000, 324000, 432000, 540000
321         };
322         static const int hsw_rates[] = {
323                 162000, 270000, 540000
324         };
325         static const int g4x_rates[] = {
326                 162000, 270000
327         };
328         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
329         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
330         const struct ddi_vbt_port_info *info =
331                 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
332         const int *source_rates;
333         int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
334
335         /* This should only be done once */
336         WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
337
338         if (INTEL_GEN(dev_priv) >= 10) {
339                 source_rates = cnl_rates;
340                 size = ARRAY_SIZE(cnl_rates);
341                 if (IS_GEN(dev_priv, 10))
342                         max_rate = cnl_max_source_rate(intel_dp);
343                 else
344                         max_rate = icl_max_source_rate(intel_dp);
345         } else if (IS_GEN9_LP(dev_priv)) {
346                 source_rates = bxt_rates;
347                 size = ARRAY_SIZE(bxt_rates);
348         } else if (IS_GEN9_BC(dev_priv)) {
349                 source_rates = skl_rates;
350                 size = ARRAY_SIZE(skl_rates);
351         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
352                    IS_BROADWELL(dev_priv)) {
353                 source_rates = hsw_rates;
354                 size = ARRAY_SIZE(hsw_rates);
355         } else {
356                 source_rates = g4x_rates;
357                 size = ARRAY_SIZE(g4x_rates);
358         }
359
360         if (max_rate && vbt_max_rate)
361                 max_rate = min(max_rate, vbt_max_rate);
362         else if (vbt_max_rate)
363                 max_rate = vbt_max_rate;
364
365         if (max_rate)
366                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
367
368         intel_dp->source_rates = source_rates;
369         intel_dp->num_source_rates = size;
370 }
371
372 static int intersect_rates(const int *source_rates, int source_len,
373                            const int *sink_rates, int sink_len,
374                            int *common_rates)
375 {
376         int i = 0, j = 0, k = 0;
377
378         while (i < source_len && j < sink_len) {
379                 if (source_rates[i] == sink_rates[j]) {
380                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
381                                 return k;
382                         common_rates[k] = source_rates[i];
383                         ++k;
384                         ++i;
385                         ++j;
386                 } else if (source_rates[i] < sink_rates[j]) {
387                         ++i;
388                 } else {
389                         ++j;
390                 }
391         }
392         return k;
393 }
394
395 /* return index of rate in rates array, or -1 if not found */
396 static int intel_dp_rate_index(const int *rates, int len, int rate)
397 {
398         int i;
399
400         for (i = 0; i < len; i++)
401                 if (rate == rates[i])
402                         return i;
403
404         return -1;
405 }
406
407 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
408 {
409         WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
410
411         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
412                                                      intel_dp->num_source_rates,
413                                                      intel_dp->sink_rates,
414                                                      intel_dp->num_sink_rates,
415                                                      intel_dp->common_rates);
416
417         /* Paranoia, there should always be something in common. */
418         if (WARN_ON(intel_dp->num_common_rates == 0)) {
419                 intel_dp->common_rates[0] = 162000;
420                 intel_dp->num_common_rates = 1;
421         }
422 }
423
424 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
425                                        u8 lane_count)
426 {
427         /*
428          * FIXME: we need to synchronize the current link parameters with
429          * hardware readout. Currently fast link training doesn't work on
430          * boot-up.
431          */
432         if (link_rate == 0 ||
433             link_rate > intel_dp->max_link_rate)
434                 return false;
435
436         if (lane_count == 0 ||
437             lane_count > intel_dp_max_lane_count(intel_dp))
438                 return false;
439
440         return true;
441 }
442
443 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
444                                                      int link_rate,
445                                                      u8 lane_count)
446 {
447         const struct drm_display_mode *fixed_mode =
448                 intel_dp->attached_connector->panel.fixed_mode;
449         int mode_rate, max_rate;
450
451         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
452         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
453         if (mode_rate > max_rate)
454                 return false;
455
456         return true;
457 }
458
459 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
460                                             int link_rate, u8 lane_count)
461 {
462         int index;
463
464         index = intel_dp_rate_index(intel_dp->common_rates,
465                                     intel_dp->num_common_rates,
466                                     link_rate);
467         if (index > 0) {
468                 if (intel_dp_is_edp(intel_dp) &&
469                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
470                                                               intel_dp->common_rates[index - 1],
471                                                               lane_count)) {
472                         DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
473                         return 0;
474                 }
475                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
476                 intel_dp->max_link_lane_count = lane_count;
477         } else if (lane_count > 1) {
478                 if (intel_dp_is_edp(intel_dp) &&
479                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
480                                                               intel_dp_max_common_rate(intel_dp),
481                                                               lane_count >> 1)) {
482                         DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
483                         return 0;
484                 }
485                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
486                 intel_dp->max_link_lane_count = lane_count >> 1;
487         } else {
488                 DRM_ERROR("Link Training Unsuccessful\n");
489                 return -1;
490         }
491
492         return 0;
493 }
494
495 static enum drm_mode_status
496 intel_dp_mode_valid(struct drm_connector *connector,
497                     struct drm_display_mode *mode)
498 {
499         struct intel_dp *intel_dp = intel_attached_dp(connector);
500         struct intel_connector *intel_connector = to_intel_connector(connector);
501         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
502         struct drm_i915_private *dev_priv = to_i915(connector->dev);
503         int target_clock = mode->clock;
504         int max_rate, mode_rate, max_lanes, max_link_clock;
505         int max_dotclk;
506         u16 dsc_max_output_bpp = 0;
507         u8 dsc_slice_count = 0;
508
509         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
510                 return MODE_NO_DBLESCAN;
511
512         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
513
514         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
515                 if (mode->hdisplay > fixed_mode->hdisplay)
516                         return MODE_PANEL;
517
518                 if (mode->vdisplay > fixed_mode->vdisplay)
519                         return MODE_PANEL;
520
521                 target_clock = fixed_mode->clock;
522         }
523
524         max_link_clock = intel_dp_max_link_rate(intel_dp);
525         max_lanes = intel_dp_max_lane_count(intel_dp);
526
527         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
528         mode_rate = intel_dp_link_required(target_clock, 18);
529
530         /*
531          * Output bpp is stored in 6.4 format so right shift by 4 to get the
532          * integer value since we support only integer values of bpp.
533          */
534         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
535             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
536                 if (intel_dp_is_edp(intel_dp)) {
537                         dsc_max_output_bpp =
538                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
539                         dsc_slice_count =
540                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
541                                                                 true);
542                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
543                         dsc_max_output_bpp =
544                                 intel_dp_dsc_get_output_bpp(max_link_clock,
545                                                             max_lanes,
546                                                             target_clock,
547                                                             mode->hdisplay) >> 4;
548                         dsc_slice_count =
549                                 intel_dp_dsc_get_slice_count(intel_dp,
550                                                              target_clock,
551                                                              mode->hdisplay);
552                 }
553         }
554
555         if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
556             target_clock > max_dotclk)
557                 return MODE_CLOCK_HIGH;
558
559         if (mode->clock < 10000)
560                 return MODE_CLOCK_LOW;
561
562         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
563                 return MODE_H_ILLEGAL;
564
565         return MODE_OK;
566 }
567
568 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
569 {
570         int i;
571         u32 v = 0;
572
573         if (src_bytes > 4)
574                 src_bytes = 4;
575         for (i = 0; i < src_bytes; i++)
576                 v |= ((u32)src[i]) << ((3 - i) * 8);
577         return v;
578 }
579
580 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
581 {
582         int i;
583         if (dst_bytes > 4)
584                 dst_bytes = 4;
585         for (i = 0; i < dst_bytes; i++)
586                 dst[i] = src >> ((3-i) * 8);
587 }
588
589 static void
590 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
591 static void
592 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
593                                               bool force_disable_vdd);
594 static void
595 intel_dp_pps_init(struct intel_dp *intel_dp);
596
597 static intel_wakeref_t
598 pps_lock(struct intel_dp *intel_dp)
599 {
600         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
601         intel_wakeref_t wakeref;
602
603         /*
604          * See intel_power_sequencer_reset() why we need
605          * a power domain reference here.
606          */
607         wakeref = intel_display_power_get(dev_priv,
608                                           intel_aux_power_domain(dp_to_dig_port(intel_dp)));
609
610         mutex_lock(&dev_priv->pps_mutex);
611
612         return wakeref;
613 }
614
615 static intel_wakeref_t
616 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
617 {
618         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
619
620         mutex_unlock(&dev_priv->pps_mutex);
621         intel_display_power_put(dev_priv,
622                                 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
623                                 wakeref);
624         return 0;
625 }
626
627 #define with_pps_lock(dp, wf) \
628         for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
629
630 static void
631 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
632 {
633         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
634         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
635         enum pipe pipe = intel_dp->pps_pipe;
636         bool pll_enabled, release_cl_override = false;
637         enum dpio_phy phy = DPIO_PHY(pipe);
638         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
639         u32 DP;
640
641         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
642                  "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
643                  pipe_name(pipe), intel_dig_port->base.base.base.id,
644                  intel_dig_port->base.base.name))
645                 return;
646
647         DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
648                       pipe_name(pipe), intel_dig_port->base.base.base.id,
649                       intel_dig_port->base.base.name);
650
651         /* Preserve the BIOS-computed detected bit. This is
652          * supposed to be read-only.
653          */
654         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
655         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
656         DP |= DP_PORT_WIDTH(1);
657         DP |= DP_LINK_TRAIN_PAT_1;
658
659         if (IS_CHERRYVIEW(dev_priv))
660                 DP |= DP_PIPE_SEL_CHV(pipe);
661         else
662                 DP |= DP_PIPE_SEL(pipe);
663
664         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
665
666         /*
667          * The DPLL for the pipe must be enabled for this to work.
668          * So enable temporarily it if it's not already enabled.
669          */
670         if (!pll_enabled) {
671                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
672                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
673
674                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
675                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
676                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
677                                   pipe_name(pipe));
678                         return;
679                 }
680         }
681
682         /*
683          * Similar magic as in intel_dp_enable_port().
684          * We _must_ do this port enable + disable trick
685          * to make this power sequencer lock onto the port.
686          * Otherwise even VDD force bit won't work.
687          */
688         I915_WRITE(intel_dp->output_reg, DP);
689         POSTING_READ(intel_dp->output_reg);
690
691         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
692         POSTING_READ(intel_dp->output_reg);
693
694         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
695         POSTING_READ(intel_dp->output_reg);
696
697         if (!pll_enabled) {
698                 vlv_force_pll_off(dev_priv, pipe);
699
700                 if (release_cl_override)
701                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
702         }
703 }
704
705 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
706 {
707         struct intel_encoder *encoder;
708         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
709
710         /*
711          * We don't have power sequencer currently.
712          * Pick one that's not used by other ports.
713          */
714         for_each_intel_dp(&dev_priv->drm, encoder) {
715                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
716
717                 if (encoder->type == INTEL_OUTPUT_EDP) {
718                         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
719                                 intel_dp->active_pipe != intel_dp->pps_pipe);
720
721                         if (intel_dp->pps_pipe != INVALID_PIPE)
722                                 pipes &= ~(1 << intel_dp->pps_pipe);
723                 } else {
724                         WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
725
726                         if (intel_dp->active_pipe != INVALID_PIPE)
727                                 pipes &= ~(1 << intel_dp->active_pipe);
728                 }
729         }
730
731         if (pipes == 0)
732                 return INVALID_PIPE;
733
734         return ffs(pipes) - 1;
735 }
736
737 static enum pipe
738 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
739 {
740         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
741         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
742         enum pipe pipe;
743
744         lockdep_assert_held(&dev_priv->pps_mutex);
745
746         /* We should never land here with regular DP ports */
747         WARN_ON(!intel_dp_is_edp(intel_dp));
748
749         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
750                 intel_dp->active_pipe != intel_dp->pps_pipe);
751
752         if (intel_dp->pps_pipe != INVALID_PIPE)
753                 return intel_dp->pps_pipe;
754
755         pipe = vlv_find_free_pps(dev_priv);
756
757         /*
758          * Didn't find one. This should not happen since there
759          * are two power sequencers and up to two eDP ports.
760          */
761         if (WARN_ON(pipe == INVALID_PIPE))
762                 pipe = PIPE_A;
763
764         vlv_steal_power_sequencer(dev_priv, pipe);
765         intel_dp->pps_pipe = pipe;
766
767         DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
768                       pipe_name(intel_dp->pps_pipe),
769                       intel_dig_port->base.base.base.id,
770                       intel_dig_port->base.base.name);
771
772         /* init power sequencer on this pipe and port */
773         intel_dp_init_panel_power_sequencer(intel_dp);
774         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
775
776         /*
777          * Even vdd force doesn't work until we've made
778          * the power sequencer lock in on the port.
779          */
780         vlv_power_sequencer_kick(intel_dp);
781
782         return intel_dp->pps_pipe;
783 }
784
785 static int
786 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
787 {
788         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
789         int backlight_controller = dev_priv->vbt.backlight.controller;
790
791         lockdep_assert_held(&dev_priv->pps_mutex);
792
793         /* We should never land here with regular DP ports */
794         WARN_ON(!intel_dp_is_edp(intel_dp));
795
796         if (!intel_dp->pps_reset)
797                 return backlight_controller;
798
799         intel_dp->pps_reset = false;
800
801         /*
802          * Only the HW needs to be reprogrammed, the SW state is fixed and
803          * has been setup during connector init.
804          */
805         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
806
807         return backlight_controller;
808 }
809
810 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
811                                enum pipe pipe);
812
813 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
814                                enum pipe pipe)
815 {
816         return I915_READ(PP_STATUS(pipe)) & PP_ON;
817 }
818
819 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
820                                 enum pipe pipe)
821 {
822         return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
823 }
824
825 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
826                          enum pipe pipe)
827 {
828         return true;
829 }
830
831 static enum pipe
832 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
833                      enum port port,
834                      vlv_pipe_check pipe_check)
835 {
836         enum pipe pipe;
837
838         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
839                 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
840                         PANEL_PORT_SELECT_MASK;
841
842                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
843                         continue;
844
845                 if (!pipe_check(dev_priv, pipe))
846                         continue;
847
848                 return pipe;
849         }
850
851         return INVALID_PIPE;
852 }
853
854 static void
855 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
856 {
857         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
858         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
859         enum port port = intel_dig_port->base.port;
860
861         lockdep_assert_held(&dev_priv->pps_mutex);
862
863         /* try to find a pipe with this port selected */
864         /* first pick one where the panel is on */
865         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
866                                                   vlv_pipe_has_pp_on);
867         /* didn't find one? pick one where vdd is on */
868         if (intel_dp->pps_pipe == INVALID_PIPE)
869                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
870                                                           vlv_pipe_has_vdd_on);
871         /* didn't find one? pick one with just the correct port */
872         if (intel_dp->pps_pipe == INVALID_PIPE)
873                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
874                                                           vlv_pipe_any);
875
876         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
877         if (intel_dp->pps_pipe == INVALID_PIPE) {
878                 DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
879                               intel_dig_port->base.base.base.id,
880                               intel_dig_port->base.base.name);
881                 return;
882         }
883
884         DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
885                       intel_dig_port->base.base.base.id,
886                       intel_dig_port->base.base.name,
887                       pipe_name(intel_dp->pps_pipe));
888
889         intel_dp_init_panel_power_sequencer(intel_dp);
890         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
891 }
892
893 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
894 {
895         struct intel_encoder *encoder;
896
897         if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
898                     !IS_GEN9_LP(dev_priv)))
899                 return;
900
901         /*
902          * We can't grab pps_mutex here due to deadlock with power_domain
903          * mutex when power_domain functions are called while holding pps_mutex.
904          * That also means that in order to use pps_pipe the code needs to
905          * hold both a power domain reference and pps_mutex, and the power domain
906          * reference get/put must be done while _not_ holding pps_mutex.
907          * pps_{lock,unlock}() do these steps in the correct order, so one
908          * should use them always.
909          */
910
911         for_each_intel_dp(&dev_priv->drm, encoder) {
912                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
913
914                 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
915
916                 if (encoder->type != INTEL_OUTPUT_EDP)
917                         continue;
918
919                 if (IS_GEN9_LP(dev_priv))
920                         intel_dp->pps_reset = true;
921                 else
922                         intel_dp->pps_pipe = INVALID_PIPE;
923         }
924 }
925
926 struct pps_registers {
927         i915_reg_t pp_ctrl;
928         i915_reg_t pp_stat;
929         i915_reg_t pp_on;
930         i915_reg_t pp_off;
931         i915_reg_t pp_div;
932 };
933
934 static void intel_pps_get_registers(struct intel_dp *intel_dp,
935                                     struct pps_registers *regs)
936 {
937         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
938         int pps_idx = 0;
939
940         memset(regs, 0, sizeof(*regs));
941
942         if (IS_GEN9_LP(dev_priv))
943                 pps_idx = bxt_power_sequencer_idx(intel_dp);
944         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
945                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
946
947         regs->pp_ctrl = PP_CONTROL(pps_idx);
948         regs->pp_stat = PP_STATUS(pps_idx);
949         regs->pp_on = PP_ON_DELAYS(pps_idx);
950         regs->pp_off = PP_OFF_DELAYS(pps_idx);
951
952         /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
953         if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
954                 regs->pp_div = INVALID_MMIO_REG;
955         else
956                 regs->pp_div = PP_DIVISOR(pps_idx);
957 }
958
959 static i915_reg_t
960 _pp_ctrl_reg(struct intel_dp *intel_dp)
961 {
962         struct pps_registers regs;
963
964         intel_pps_get_registers(intel_dp, &regs);
965
966         return regs.pp_ctrl;
967 }
968
969 static i915_reg_t
970 _pp_stat_reg(struct intel_dp *intel_dp)
971 {
972         struct pps_registers regs;
973
974         intel_pps_get_registers(intel_dp, &regs);
975
976         return regs.pp_stat;
977 }
978
979 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
980    This function only applicable when panel PM state is not to be tracked */
981 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
982                               void *unused)
983 {
984         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
985                                                  edp_notifier);
986         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
987         intel_wakeref_t wakeref;
988
989         if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
990                 return 0;
991
992         with_pps_lock(intel_dp, wakeref) {
993                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
994                         enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
995                         i915_reg_t pp_ctrl_reg, pp_div_reg;
996                         u32 pp_div;
997
998                         pp_ctrl_reg = PP_CONTROL(pipe);
999                         pp_div_reg  = PP_DIVISOR(pipe);
1000                         pp_div = I915_READ(pp_div_reg);
1001                         pp_div &= PP_REFERENCE_DIVIDER_MASK;
1002
1003                         /* 0x1F write to PP_DIV_REG sets max cycle delay */
1004                         I915_WRITE(pp_div_reg, pp_div | 0x1F);
1005                         I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1006                         msleep(intel_dp->panel_power_cycle_delay);
1007                 }
1008         }
1009
1010         return 0;
1011 }
1012
1013 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1014 {
1015         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1016
1017         lockdep_assert_held(&dev_priv->pps_mutex);
1018
1019         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1020             intel_dp->pps_pipe == INVALID_PIPE)
1021                 return false;
1022
1023         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1024 }
1025
1026 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1027 {
1028         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1029
1030         lockdep_assert_held(&dev_priv->pps_mutex);
1031
1032         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1033             intel_dp->pps_pipe == INVALID_PIPE)
1034                 return false;
1035
1036         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1037 }
1038
1039 static void
1040 intel_dp_check_edp(struct intel_dp *intel_dp)
1041 {
1042         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1043
1044         if (!intel_dp_is_edp(intel_dp))
1045                 return;
1046
1047         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1048                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1049                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1050                               I915_READ(_pp_stat_reg(intel_dp)),
1051                               I915_READ(_pp_ctrl_reg(intel_dp)));
1052         }
1053 }
1054
1055 static u32
1056 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1057 {
1058         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1059         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1060         u32 status;
1061         bool done;
1062
1063 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1064         done = wait_event_timeout(i915->gmbus_wait_queue, C,
1065                                   msecs_to_jiffies_timeout(10));
1066
1067         /* just trace the final value */
1068         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1069
1070         if (!done)
1071                 DRM_ERROR("dp aux hw did not signal timeout!\n");
1072 #undef C
1073
1074         return status;
1075 }
1076
1077 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1078 {
1079         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1080
1081         if (index)
1082                 return 0;
1083
1084         /*
1085          * The clock divider is based off the hrawclk, and would like to run at
1086          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1087          */
1088         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1089 }
1090
1091 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1092 {
1093         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1094         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1095
1096         if (index)
1097                 return 0;
1098
1099         /*
1100          * The clock divider is based off the cdclk or PCH rawclk, and would
1101          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1102          * divide by 2000 and use that
1103          */
1104         if (dig_port->aux_ch == AUX_CH_A)
1105                 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1106         else
1107                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1108 }
1109
1110 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1111 {
1112         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1113         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1114
1115         if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1116                 /* Workaround for non-ULT HSW */
1117                 switch (index) {
1118                 case 0: return 63;
1119                 case 1: return 72;
1120                 default: return 0;
1121                 }
1122         }
1123
1124         return ilk_get_aux_clock_divider(intel_dp, index);
1125 }
1126
1127 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1128 {
1129         /*
1130          * SKL doesn't need us to program the AUX clock divider (Hardware will
1131          * derive the clock from CDCLK automatically). We still implement the
1132          * get_aux_clock_divider vfunc to plug-in into the existing code.
1133          */
1134         return index ? 0 : 1;
1135 }
1136
1137 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1138                                 int send_bytes,
1139                                 u32 aux_clock_divider)
1140 {
1141         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1142         struct drm_i915_private *dev_priv =
1143                         to_i915(intel_dig_port->base.base.dev);
1144         u32 precharge, timeout;
1145
1146         if (IS_GEN(dev_priv, 6))
1147                 precharge = 3;
1148         else
1149                 precharge = 5;
1150
1151         if (IS_BROADWELL(dev_priv))
1152                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1153         else
1154                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1155
1156         return DP_AUX_CH_CTL_SEND_BUSY |
1157                DP_AUX_CH_CTL_DONE |
1158                DP_AUX_CH_CTL_INTERRUPT |
1159                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1160                timeout |
1161                DP_AUX_CH_CTL_RECEIVE_ERROR |
1162                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1163                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1164                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1165 }
1166
1167 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1168                                 int send_bytes,
1169                                 u32 unused)
1170 {
1171         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1172         u32 ret;
1173
1174         ret = DP_AUX_CH_CTL_SEND_BUSY |
1175               DP_AUX_CH_CTL_DONE |
1176               DP_AUX_CH_CTL_INTERRUPT |
1177               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1178               DP_AUX_CH_CTL_TIME_OUT_MAX |
1179               DP_AUX_CH_CTL_RECEIVE_ERROR |
1180               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1181               DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1182               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1183
1184         if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1185                 ret |= DP_AUX_CH_CTL_TBT_IO;
1186
1187         return ret;
1188 }
1189
1190 static int
1191 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1192                   const u8 *send, int send_bytes,
1193                   u8 *recv, int recv_size,
1194                   u32 aux_send_ctl_flags)
1195 {
1196         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1197         struct drm_i915_private *i915 =
1198                         to_i915(intel_dig_port->base.base.dev);
1199         struct intel_uncore *uncore = &i915->uncore;
1200         enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1201         bool is_tc_port = intel_phy_is_tc(i915, phy);
1202         i915_reg_t ch_ctl, ch_data[5];
1203         u32 aux_clock_divider;
1204         enum intel_display_power_domain aux_domain =
1205                 intel_aux_power_domain(intel_dig_port);
1206         intel_wakeref_t aux_wakeref;
1207         intel_wakeref_t pps_wakeref;
1208         int i, ret, recv_bytes;
1209         int try, clock = 0;
1210         u32 status;
1211         bool vdd;
1212
1213         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1214         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1215                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1216
1217         if (is_tc_port)
1218                 intel_tc_port_lock(intel_dig_port);
1219
1220         aux_wakeref = intel_display_power_get(i915, aux_domain);
1221         pps_wakeref = pps_lock(intel_dp);
1222
1223         /*
1224          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1225          * In such cases we want to leave VDD enabled and it's up to upper layers
1226          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1227          * ourselves.
1228          */
1229         vdd = edp_panel_vdd_on(intel_dp);
1230
1231         /* dp aux is extremely sensitive to irq latency, hence request the
1232          * lowest possible wakeup latency and so prevent the cpu from going into
1233          * deep sleep states.
1234          */
1235         pm_qos_update_request(&i915->pm_qos, 0);
1236
1237         intel_dp_check_edp(intel_dp);
1238
1239         /* Try to wait for any previous AUX channel activity */
1240         for (try = 0; try < 3; try++) {
1241                 status = intel_uncore_read_notrace(uncore, ch_ctl);
1242                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1243                         break;
1244                 msleep(1);
1245         }
1246         /* just trace the final value */
1247         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1248
1249         if (try == 3) {
1250                 static u32 last_status = -1;
1251                 const u32 status = intel_uncore_read(uncore, ch_ctl);
1252
1253                 if (status != last_status) {
1254                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
1255                              status);
1256                         last_status = status;
1257                 }
1258
1259                 ret = -EBUSY;
1260                 goto out;
1261         }
1262
1263         /* Only 5 data registers! */
1264         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1265                 ret = -E2BIG;
1266                 goto out;
1267         }
1268
1269         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1270                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1271                                                           send_bytes,
1272                                                           aux_clock_divider);
1273
1274                 send_ctl |= aux_send_ctl_flags;
1275
1276                 /* Must try at least 3 times according to DP spec */
1277                 for (try = 0; try < 5; try++) {
1278                         /* Load the send data into the aux channel data registers */
1279                         for (i = 0; i < send_bytes; i += 4)
1280                                 intel_uncore_write(uncore,
1281                                                    ch_data[i >> 2],
1282                                                    intel_dp_pack_aux(send + i,
1283                                                                      send_bytes - i));
1284
1285                         /* Send the command and wait for it to complete */
1286                         intel_uncore_write(uncore, ch_ctl, send_ctl);
1287
1288                         status = intel_dp_aux_wait_done(intel_dp);
1289
1290                         /* Clear done status and any errors */
1291                         intel_uncore_write(uncore,
1292                                            ch_ctl,
1293                                            status |
1294                                            DP_AUX_CH_CTL_DONE |
1295                                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
1296                                            DP_AUX_CH_CTL_RECEIVE_ERROR);
1297
1298                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1299                          *   400us delay required for errors and timeouts
1300                          *   Timeout errors from the HW already meet this
1301                          *   requirement so skip to next iteration
1302                          */
1303                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1304                                 continue;
1305
1306                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1307                                 usleep_range(400, 500);
1308                                 continue;
1309                         }
1310                         if (status & DP_AUX_CH_CTL_DONE)
1311                                 goto done;
1312                 }
1313         }
1314
1315         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1316                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1317                 ret = -EBUSY;
1318                 goto out;
1319         }
1320
1321 done:
1322         /* Check for timeout or receive error.
1323          * Timeouts occur when the sink is not connected
1324          */
1325         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1326                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1327                 ret = -EIO;
1328                 goto out;
1329         }
1330
1331         /* Timeouts occur when the device isn't connected, so they're
1332          * "normal" -- don't fill the kernel log with these */
1333         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1334                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1335                 ret = -ETIMEDOUT;
1336                 goto out;
1337         }
1338
1339         /* Unload any bytes sent back from the other side */
1340         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1341                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1342
1343         /*
1344          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1345          * We have no idea of what happened so we return -EBUSY so
1346          * drm layer takes care for the necessary retries.
1347          */
1348         if (recv_bytes == 0 || recv_bytes > 20) {
1349                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1350                               recv_bytes);
1351                 ret = -EBUSY;
1352                 goto out;
1353         }
1354
1355         if (recv_bytes > recv_size)
1356                 recv_bytes = recv_size;
1357
1358         for (i = 0; i < recv_bytes; i += 4)
1359                 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1360                                     recv + i, recv_bytes - i);
1361
1362         ret = recv_bytes;
1363 out:
1364         pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1365
1366         if (vdd)
1367                 edp_panel_vdd_off(intel_dp, false);
1368
1369         pps_unlock(intel_dp, pps_wakeref);
1370         intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1371
1372         if (is_tc_port)
1373                 intel_tc_port_unlock(intel_dig_port);
1374
1375         return ret;
1376 }
1377
1378 #define BARE_ADDRESS_SIZE       3
1379 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1380
1381 static void
1382 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1383                     const struct drm_dp_aux_msg *msg)
1384 {
1385         txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1386         txbuf[1] = (msg->address >> 8) & 0xff;
1387         txbuf[2] = msg->address & 0xff;
1388         txbuf[3] = msg->size - 1;
1389 }
1390
1391 static ssize_t
1392 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1393 {
1394         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1395         u8 txbuf[20], rxbuf[20];
1396         size_t txsize, rxsize;
1397         int ret;
1398
1399         intel_dp_aux_header(txbuf, msg);
1400
1401         switch (msg->request & ~DP_AUX_I2C_MOT) {
1402         case DP_AUX_NATIVE_WRITE:
1403         case DP_AUX_I2C_WRITE:
1404         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1405                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1406                 rxsize = 2; /* 0 or 1 data bytes */
1407
1408                 if (WARN_ON(txsize > 20))
1409                         return -E2BIG;
1410
1411                 WARN_ON(!msg->buffer != !msg->size);
1412
1413                 if (msg->buffer)
1414                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1415
1416                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1417                                         rxbuf, rxsize, 0);
1418                 if (ret > 0) {
1419                         msg->reply = rxbuf[0] >> 4;
1420
1421                         if (ret > 1) {
1422                                 /* Number of bytes written in a short write. */
1423                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1424                         } else {
1425                                 /* Return payload size. */
1426                                 ret = msg->size;
1427                         }
1428                 }
1429                 break;
1430
1431         case DP_AUX_NATIVE_READ:
1432         case DP_AUX_I2C_READ:
1433                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1434                 rxsize = msg->size + 1;
1435
1436                 if (WARN_ON(rxsize > 20))
1437                         return -E2BIG;
1438
1439                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1440                                         rxbuf, rxsize, 0);
1441                 if (ret > 0) {
1442                         msg->reply = rxbuf[0] >> 4;
1443                         /*
1444                          * Assume happy day, and copy the data. The caller is
1445                          * expected to check msg->reply before touching it.
1446                          *
1447                          * Return payload size.
1448                          */
1449                         ret--;
1450                         memcpy(msg->buffer, rxbuf + 1, ret);
1451                 }
1452                 break;
1453
1454         default:
1455                 ret = -EINVAL;
1456                 break;
1457         }
1458
1459         return ret;
1460 }
1461
1462
1463 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1464 {
1465         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1466         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1467         enum aux_ch aux_ch = dig_port->aux_ch;
1468
1469         switch (aux_ch) {
1470         case AUX_CH_B:
1471         case AUX_CH_C:
1472         case AUX_CH_D:
1473                 return DP_AUX_CH_CTL(aux_ch);
1474         default:
1475                 MISSING_CASE(aux_ch);
1476                 return DP_AUX_CH_CTL(AUX_CH_B);
1477         }
1478 }
1479
1480 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1481 {
1482         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1483         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1484         enum aux_ch aux_ch = dig_port->aux_ch;
1485
1486         switch (aux_ch) {
1487         case AUX_CH_B:
1488         case AUX_CH_C:
1489         case AUX_CH_D:
1490                 return DP_AUX_CH_DATA(aux_ch, index);
1491         default:
1492                 MISSING_CASE(aux_ch);
1493                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1494         }
1495 }
1496
1497 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1498 {
1499         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1500         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1501         enum aux_ch aux_ch = dig_port->aux_ch;
1502
1503         switch (aux_ch) {
1504         case AUX_CH_A:
1505                 return DP_AUX_CH_CTL(aux_ch);
1506         case AUX_CH_B:
1507         case AUX_CH_C:
1508         case AUX_CH_D:
1509                 return PCH_DP_AUX_CH_CTL(aux_ch);
1510         default:
1511                 MISSING_CASE(aux_ch);
1512                 return DP_AUX_CH_CTL(AUX_CH_A);
1513         }
1514 }
1515
1516 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1517 {
1518         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1519         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1520         enum aux_ch aux_ch = dig_port->aux_ch;
1521
1522         switch (aux_ch) {
1523         case AUX_CH_A:
1524                 return DP_AUX_CH_DATA(aux_ch, index);
1525         case AUX_CH_B:
1526         case AUX_CH_C:
1527         case AUX_CH_D:
1528                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1529         default:
1530                 MISSING_CASE(aux_ch);
1531                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1532         }
1533 }
1534
1535 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1536 {
1537         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1538         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1539         enum aux_ch aux_ch = dig_port->aux_ch;
1540
1541         switch (aux_ch) {
1542         case AUX_CH_A:
1543         case AUX_CH_B:
1544         case AUX_CH_C:
1545         case AUX_CH_D:
1546         case AUX_CH_E:
1547         case AUX_CH_F:
1548                 return DP_AUX_CH_CTL(aux_ch);
1549         default:
1550                 MISSING_CASE(aux_ch);
1551                 return DP_AUX_CH_CTL(AUX_CH_A);
1552         }
1553 }
1554
1555 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1556 {
1557         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1558         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1559         enum aux_ch aux_ch = dig_port->aux_ch;
1560
1561         switch (aux_ch) {
1562         case AUX_CH_A:
1563         case AUX_CH_B:
1564         case AUX_CH_C:
1565         case AUX_CH_D:
1566         case AUX_CH_E:
1567         case AUX_CH_F:
1568                 return DP_AUX_CH_DATA(aux_ch, index);
1569         default:
1570                 MISSING_CASE(aux_ch);
1571                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1572         }
1573 }
1574
1575 static void
1576 intel_dp_aux_fini(struct intel_dp *intel_dp)
1577 {
1578         kfree(intel_dp->aux.name);
1579 }
1580
1581 static void
1582 intel_dp_aux_init(struct intel_dp *intel_dp)
1583 {
1584         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1585         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1586         struct intel_encoder *encoder = &dig_port->base;
1587
1588         if (INTEL_GEN(dev_priv) >= 9) {
1589                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1590                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1591         } else if (HAS_PCH_SPLIT(dev_priv)) {
1592                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1593                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1594         } else {
1595                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1596                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1597         }
1598
1599         if (INTEL_GEN(dev_priv) >= 9)
1600                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1601         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1602                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1603         else if (HAS_PCH_SPLIT(dev_priv))
1604                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1605         else
1606                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1607
1608         if (INTEL_GEN(dev_priv) >= 9)
1609                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1610         else
1611                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1612
1613         drm_dp_aux_init(&intel_dp->aux);
1614
1615         /* Failure to allocate our preferred name is not critical */
1616         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1617                                        port_name(encoder->port));
1618         intel_dp->aux.transfer = intel_dp_aux_transfer;
1619 }
1620
1621 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1622 {
1623         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1624
1625         return max_rate >= 540000;
1626 }
1627
1628 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1629 {
1630         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1631
1632         return max_rate >= 810000;
1633 }
1634
1635 static void
1636 intel_dp_set_clock(struct intel_encoder *encoder,
1637                    struct intel_crtc_state *pipe_config)
1638 {
1639         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1640         const struct dp_link_dpll *divisor = NULL;
1641         int i, count = 0;
1642
1643         if (IS_G4X(dev_priv)) {
1644                 divisor = g4x_dpll;
1645                 count = ARRAY_SIZE(g4x_dpll);
1646         } else if (HAS_PCH_SPLIT(dev_priv)) {
1647                 divisor = pch_dpll;
1648                 count = ARRAY_SIZE(pch_dpll);
1649         } else if (IS_CHERRYVIEW(dev_priv)) {
1650                 divisor = chv_dpll;
1651                 count = ARRAY_SIZE(chv_dpll);
1652         } else if (IS_VALLEYVIEW(dev_priv)) {
1653                 divisor = vlv_dpll;
1654                 count = ARRAY_SIZE(vlv_dpll);
1655         }
1656
1657         if (divisor && count) {
1658                 for (i = 0; i < count; i++) {
1659                         if (pipe_config->port_clock == divisor[i].clock) {
1660                                 pipe_config->dpll = divisor[i].dpll;
1661                                 pipe_config->clock_set = true;
1662                                 break;
1663                         }
1664                 }
1665         }
1666 }
1667
1668 static void snprintf_int_array(char *str, size_t len,
1669                                const int *array, int nelem)
1670 {
1671         int i;
1672
1673         str[0] = '\0';
1674
1675         for (i = 0; i < nelem; i++) {
1676                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1677                 if (r >= len)
1678                         return;
1679                 str += r;
1680                 len -= r;
1681         }
1682 }
1683
1684 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1685 {
1686         char str[128]; /* FIXME: too big for stack? */
1687
1688         if ((drm_debug & DRM_UT_KMS) == 0)
1689                 return;
1690
1691         snprintf_int_array(str, sizeof(str),
1692                            intel_dp->source_rates, intel_dp->num_source_rates);
1693         DRM_DEBUG_KMS("source rates: %s\n", str);
1694
1695         snprintf_int_array(str, sizeof(str),
1696                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1697         DRM_DEBUG_KMS("sink rates: %s\n", str);
1698
1699         snprintf_int_array(str, sizeof(str),
1700                            intel_dp->common_rates, intel_dp->num_common_rates);
1701         DRM_DEBUG_KMS("common rates: %s\n", str);
1702 }
1703
1704 int
1705 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1706 {
1707         int len;
1708
1709         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1710         if (WARN_ON(len <= 0))
1711                 return 162000;
1712
1713         return intel_dp->common_rates[len - 1];
1714 }
1715
1716 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1717 {
1718         int i = intel_dp_rate_index(intel_dp->sink_rates,
1719                                     intel_dp->num_sink_rates, rate);
1720
1721         if (WARN_ON(i < 0))
1722                 i = 0;
1723
1724         return i;
1725 }
1726
1727 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1728                            u8 *link_bw, u8 *rate_select)
1729 {
1730         /* eDP 1.4 rate select method. */
1731         if (intel_dp->use_rate_select) {
1732                 *link_bw = 0;
1733                 *rate_select =
1734                         intel_dp_rate_select(intel_dp, port_clock);
1735         } else {
1736                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1737                 *rate_select = 0;
1738         }
1739 }
1740
1741 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1742                                          const struct intel_crtc_state *pipe_config)
1743 {
1744         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1745
1746         /* On TGL, FEC is supported on all Pipes */
1747         if (INTEL_GEN(dev_priv) >= 12)
1748                 return true;
1749
1750         if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
1751                 return true;
1752
1753         return false;
1754 }
1755
1756 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1757                                   const struct intel_crtc_state *pipe_config)
1758 {
1759         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1760                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1761 }
1762
1763 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1764                                          const struct intel_crtc_state *pipe_config)
1765 {
1766         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1767
1768         /* On TGL, DSC is supported on all Pipes */
1769         if (INTEL_GEN(dev_priv) >= 12)
1770                 return true;
1771
1772         if (INTEL_GEN(dev_priv) >= 10 &&
1773             pipe_config->cpu_transcoder != TRANSCODER_A)
1774                 return true;
1775
1776         return false;
1777 }
1778
1779 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1780                                   const struct intel_crtc_state *pipe_config)
1781 {
1782         if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1783                 return false;
1784
1785         return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1786                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1787 }
1788
1789 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1790                                 struct intel_crtc_state *pipe_config)
1791 {
1792         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1793         struct intel_connector *intel_connector = intel_dp->attached_connector;
1794         int bpp, bpc;
1795
1796         bpp = pipe_config->pipe_bpp;
1797         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1798
1799         if (bpc > 0)
1800                 bpp = min(bpp, 3*bpc);
1801
1802         if (intel_dp_is_edp(intel_dp)) {
1803                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1804                 if (intel_connector->base.display_info.bpc == 0 &&
1805                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1806                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1807                                       dev_priv->vbt.edp.bpp);
1808                         bpp = dev_priv->vbt.edp.bpp;
1809                 }
1810         }
1811
1812         return bpp;
1813 }
1814
1815 /* Adjust link config limits based on compliance test requests. */
1816 void
1817 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1818                                   struct intel_crtc_state *pipe_config,
1819                                   struct link_config_limits *limits)
1820 {
1821         /* For DP Compliance we override the computed bpp for the pipe */
1822         if (intel_dp->compliance.test_data.bpc != 0) {
1823                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1824
1825                 limits->min_bpp = limits->max_bpp = bpp;
1826                 pipe_config->dither_force_disable = bpp == 6 * 3;
1827
1828                 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1829         }
1830
1831         /* Use values requested by Compliance Test Request */
1832         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1833                 int index;
1834
1835                 /* Validate the compliance test data since max values
1836                  * might have changed due to link train fallback.
1837                  */
1838                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1839                                                intel_dp->compliance.test_lane_count)) {
1840                         index = intel_dp_rate_index(intel_dp->common_rates,
1841                                                     intel_dp->num_common_rates,
1842                                                     intel_dp->compliance.test_link_rate);
1843                         if (index >= 0)
1844                                 limits->min_clock = limits->max_clock = index;
1845                         limits->min_lane_count = limits->max_lane_count =
1846                                 intel_dp->compliance.test_lane_count;
1847                 }
1848         }
1849 }
1850
1851 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1852 {
1853         /*
1854          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1855          * format of the number of bytes per pixel will be half the number
1856          * of bytes of RGB pixel.
1857          */
1858         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1859                 bpp /= 2;
1860
1861         return bpp;
1862 }
1863
1864 /* Optimize link config in order: max bpp, min clock, min lanes */
1865 static int
1866 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1867                                   struct intel_crtc_state *pipe_config,
1868                                   const struct link_config_limits *limits)
1869 {
1870         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1871         int bpp, clock, lane_count;
1872         int mode_rate, link_clock, link_avail;
1873
1874         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1875                 int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
1876
1877                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1878                                                    output_bpp);
1879
1880                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1881                         for (lane_count = limits->min_lane_count;
1882                              lane_count <= limits->max_lane_count;
1883                              lane_count <<= 1) {
1884                                 link_clock = intel_dp->common_rates[clock];
1885                                 link_avail = intel_dp_max_data_rate(link_clock,
1886                                                                     lane_count);
1887
1888                                 if (mode_rate <= link_avail) {
1889                                         pipe_config->lane_count = lane_count;
1890                                         pipe_config->pipe_bpp = bpp;
1891                                         pipe_config->port_clock = link_clock;
1892
1893                                         return 0;
1894                                 }
1895                         }
1896                 }
1897         }
1898
1899         return -EINVAL;
1900 }
1901
1902 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1903 {
1904         int i, num_bpc;
1905         u8 dsc_bpc[3] = {0};
1906
1907         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1908                                                        dsc_bpc);
1909         for (i = 0; i < num_bpc; i++) {
1910                 if (dsc_max_bpc >= dsc_bpc[i])
1911                         return dsc_bpc[i] * 3;
1912         }
1913
1914         return 0;
1915 }
1916
1917 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1918                                        struct intel_crtc_state *pipe_config,
1919                                        struct drm_connector_state *conn_state,
1920                                        struct link_config_limits *limits)
1921 {
1922         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1923         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1924         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1925         u8 dsc_max_bpc;
1926         int pipe_bpp;
1927         int ret;
1928
1929         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1930                 intel_dp_supports_fec(intel_dp, pipe_config);
1931
1932         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1933                 return -EINVAL;
1934
1935         /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1936         if (INTEL_GEN(dev_priv) >= 12)
1937                 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
1938         else
1939                 dsc_max_bpc = min_t(u8, 10,
1940                                     conn_state->max_requested_bpc);
1941
1942         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1943
1944         /* Min Input BPC for ICL+ is 8 */
1945         if (pipe_bpp < 8 * 3) {
1946                 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1947                 return -EINVAL;
1948         }
1949
1950         /*
1951          * For now enable DSC for max bpp, max link rate, max lane count.
1952          * Optimize this later for the minimum possible link rate/lane count
1953          * with DSC enabled for the requested mode.
1954          */
1955         pipe_config->pipe_bpp = pipe_bpp;
1956         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1957         pipe_config->lane_count = limits->max_lane_count;
1958
1959         if (intel_dp_is_edp(intel_dp)) {
1960                 pipe_config->dsc_params.compressed_bpp =
1961                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1962                               pipe_config->pipe_bpp);
1963                 pipe_config->dsc_params.slice_count =
1964                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1965                                                         true);
1966         } else {
1967                 u16 dsc_max_output_bpp;
1968                 u8 dsc_dp_slice_count;
1969
1970                 dsc_max_output_bpp =
1971                         intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1972                                                     pipe_config->lane_count,
1973                                                     adjusted_mode->crtc_clock,
1974                                                     adjusted_mode->crtc_hdisplay);
1975                 dsc_dp_slice_count =
1976                         intel_dp_dsc_get_slice_count(intel_dp,
1977                                                      adjusted_mode->crtc_clock,
1978                                                      adjusted_mode->crtc_hdisplay);
1979                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1980                         DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1981                         return -EINVAL;
1982                 }
1983                 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1984                                                                dsc_max_output_bpp >> 4,
1985                                                                pipe_config->pipe_bpp);
1986                 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1987         }
1988         /*
1989          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1990          * is greater than the maximum Cdclock and if slice count is even
1991          * then we need to use 2 VDSC instances.
1992          */
1993         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1994                 if (pipe_config->dsc_params.slice_count > 1) {
1995                         pipe_config->dsc_params.dsc_split = true;
1996                 } else {
1997                         DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
1998                         return -EINVAL;
1999                 }
2000         }
2001
2002         ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
2003         if (ret < 0) {
2004                 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
2005                               "Compressed BPP = %d\n",
2006                               pipe_config->pipe_bpp,
2007                               pipe_config->dsc_params.compressed_bpp);
2008                 return ret;
2009         }
2010
2011         pipe_config->dsc_params.compression_enable = true;
2012         DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2013                       "Compressed Bpp = %d Slice Count = %d\n",
2014                       pipe_config->pipe_bpp,
2015                       pipe_config->dsc_params.compressed_bpp,
2016                       pipe_config->dsc_params.slice_count);
2017
2018         return 0;
2019 }
2020
2021 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2022 {
2023         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2024                 return 6 * 3;
2025         else
2026                 return 8 * 3;
2027 }
2028
2029 static int
2030 intel_dp_compute_link_config(struct intel_encoder *encoder,
2031                              struct intel_crtc_state *pipe_config,
2032                              struct drm_connector_state *conn_state)
2033 {
2034         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2035         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2036         struct link_config_limits limits;
2037         int common_len;
2038         int ret;
2039
2040         common_len = intel_dp_common_len_rate_limit(intel_dp,
2041                                                     intel_dp->max_link_rate);
2042
2043         /* No common link rates between source and sink */
2044         WARN_ON(common_len <= 0);
2045
2046         limits.min_clock = 0;
2047         limits.max_clock = common_len - 1;
2048
2049         limits.min_lane_count = 1;
2050         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2051
2052         limits.min_bpp = intel_dp_min_bpp(pipe_config);
2053         limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2054
2055         if (intel_dp_is_edp(intel_dp)) {
2056                 /*
2057                  * Use the maximum clock and number of lanes the eDP panel
2058                  * advertizes being capable of. The panels are generally
2059                  * designed to support only a single clock and lane
2060                  * configuration, and typically these values correspond to the
2061                  * native resolution of the panel.
2062                  */
2063                 limits.min_lane_count = limits.max_lane_count;
2064                 limits.min_clock = limits.max_clock;
2065         }
2066
2067         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2068
2069         DRM_DEBUG_KMS("DP link computation with max lane count %i "
2070                       "max rate %d max bpp %d pixel clock %iKHz\n",
2071                       limits.max_lane_count,
2072                       intel_dp->common_rates[limits.max_clock],
2073                       limits.max_bpp, adjusted_mode->crtc_clock);
2074
2075         /*
2076          * Optimize for slow and wide. This is the place to add alternative
2077          * optimization policy.
2078          */
2079         ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2080
2081         /* enable compression if the mode doesn't fit available BW */
2082         DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2083         if (ret || intel_dp->force_dsc_en) {
2084                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2085                                                   conn_state, &limits);
2086                 if (ret < 0)
2087                         return ret;
2088         }
2089
2090         if (pipe_config->dsc_params.compression_enable) {
2091                 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2092                               pipe_config->lane_count, pipe_config->port_clock,
2093                               pipe_config->pipe_bpp,
2094                               pipe_config->dsc_params.compressed_bpp);
2095
2096                 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2097                               intel_dp_link_required(adjusted_mode->crtc_clock,
2098                                                      pipe_config->dsc_params.compressed_bpp),
2099                               intel_dp_max_data_rate(pipe_config->port_clock,
2100                                                      pipe_config->lane_count));
2101         } else {
2102                 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2103                               pipe_config->lane_count, pipe_config->port_clock,
2104                               pipe_config->pipe_bpp);
2105
2106                 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2107                               intel_dp_link_required(adjusted_mode->crtc_clock,
2108                                                      pipe_config->pipe_bpp),
2109                               intel_dp_max_data_rate(pipe_config->port_clock,
2110                                                      pipe_config->lane_count));
2111         }
2112         return 0;
2113 }
2114
2115 static int
2116 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2117                          struct drm_connector *connector,
2118                          struct intel_crtc_state *crtc_state)
2119 {
2120         const struct drm_display_info *info = &connector->display_info;
2121         const struct drm_display_mode *adjusted_mode =
2122                 &crtc_state->base.adjusted_mode;
2123         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2124         int ret;
2125
2126         if (!drm_mode_is_420_only(info, adjusted_mode) ||
2127             !intel_dp_get_colorimetry_status(intel_dp) ||
2128             !connector->ycbcr_420_allowed)
2129                 return 0;
2130
2131         crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2132
2133         /* YCBCR 420 output conversion needs a scaler */
2134         ret = skl_update_scaler_crtc(crtc_state);
2135         if (ret) {
2136                 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2137                 return ret;
2138         }
2139
2140         intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2141
2142         return 0;
2143 }
2144
2145 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2146                                   const struct drm_connector_state *conn_state)
2147 {
2148         const struct intel_digital_connector_state *intel_conn_state =
2149                 to_intel_digital_connector_state(conn_state);
2150         const struct drm_display_mode *adjusted_mode =
2151                 &crtc_state->base.adjusted_mode;
2152
2153         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2154                 /*
2155                  * See:
2156                  * CEA-861-E - 5.1 Default Encoding Parameters
2157                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2158                  */
2159                 return crtc_state->pipe_bpp != 18 &&
2160                         drm_default_rgb_quant_range(adjusted_mode) ==
2161                         HDMI_QUANTIZATION_RANGE_LIMITED;
2162         } else {
2163                 return intel_conn_state->broadcast_rgb ==
2164                         INTEL_BROADCAST_RGB_LIMITED;
2165         }
2166 }
2167
2168 int
2169 intel_dp_compute_config(struct intel_encoder *encoder,
2170                         struct intel_crtc_state *pipe_config,
2171                         struct drm_connector_state *conn_state)
2172 {
2173         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2174         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2175         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2176         struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2177         enum port port = encoder->port;
2178         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2179         struct intel_connector *intel_connector = intel_dp->attached_connector;
2180         struct intel_digital_connector_state *intel_conn_state =
2181                 to_intel_digital_connector_state(conn_state);
2182         bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2183                                            DP_DPCD_QUIRK_CONSTANT_N);
2184         int ret = 0, output_bpp;
2185
2186         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2187                 pipe_config->has_pch_encoder = true;
2188
2189         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2190         if (lspcon->active)
2191                 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2192         else
2193                 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2194                                                pipe_config);
2195
2196         if (ret)
2197                 return ret;
2198
2199         pipe_config->has_drrs = false;
2200         if (IS_G4X(dev_priv) || port == PORT_A)
2201                 pipe_config->has_audio = false;
2202         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2203                 pipe_config->has_audio = intel_dp->has_audio;
2204         else
2205                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2206
2207         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2208                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2209                                        adjusted_mode);
2210
2211                 if (INTEL_GEN(dev_priv) >= 9) {
2212                         ret = skl_update_scaler_crtc(pipe_config);
2213                         if (ret)
2214                                 return ret;
2215                 }
2216
2217                 if (HAS_GMCH(dev_priv))
2218                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
2219                                                  conn_state->scaling_mode);
2220                 else
2221                         intel_pch_panel_fitting(intel_crtc, pipe_config,
2222                                                 conn_state->scaling_mode);
2223         }
2224
2225         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2226                 return -EINVAL;
2227
2228         if (HAS_GMCH(dev_priv) &&
2229             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2230                 return -EINVAL;
2231
2232         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2233                 return -EINVAL;
2234
2235         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2236         if (ret < 0)
2237                 return ret;
2238
2239         pipe_config->limited_color_range =
2240                 intel_dp_limited_color_range(pipe_config, conn_state);
2241
2242         if (pipe_config->dsc_params.compression_enable)
2243                 output_bpp = pipe_config->dsc_params.compressed_bpp;
2244         else
2245                 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2246
2247         intel_link_compute_m_n(output_bpp,
2248                                pipe_config->lane_count,
2249                                adjusted_mode->crtc_clock,
2250                                pipe_config->port_clock,
2251                                &pipe_config->dp_m_n,
2252                                constant_n);
2253
2254         if (intel_connector->panel.downclock_mode != NULL &&
2255                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2256                         pipe_config->has_drrs = true;
2257                         intel_link_compute_m_n(output_bpp,
2258                                                pipe_config->lane_count,
2259                                                intel_connector->panel.downclock_mode->clock,
2260                                                pipe_config->port_clock,
2261                                                &pipe_config->dp_m2_n2,
2262                                                constant_n);
2263         }
2264
2265         if (!HAS_DDI(dev_priv))
2266                 intel_dp_set_clock(encoder, pipe_config);
2267
2268         intel_psr_compute_config(intel_dp, pipe_config);
2269
2270         intel_hdcp_transcoder_config(intel_connector,
2271                                      pipe_config->cpu_transcoder);
2272
2273         return 0;
2274 }
2275
2276 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2277                               int link_rate, u8 lane_count,
2278                               bool link_mst)
2279 {
2280         intel_dp->link_trained = false;
2281         intel_dp->link_rate = link_rate;
2282         intel_dp->lane_count = lane_count;
2283         intel_dp->link_mst = link_mst;
2284 }
2285
2286 static void intel_dp_prepare(struct intel_encoder *encoder,
2287                              const struct intel_crtc_state *pipe_config)
2288 {
2289         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2290         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2291         enum port port = encoder->port;
2292         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2293         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2294
2295         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2296                                  pipe_config->lane_count,
2297                                  intel_crtc_has_type(pipe_config,
2298                                                      INTEL_OUTPUT_DP_MST));
2299
2300         /*
2301          * There are four kinds of DP registers:
2302          *
2303          *      IBX PCH
2304          *      SNB CPU
2305          *      IVB CPU
2306          *      CPT PCH
2307          *
2308          * IBX PCH and CPU are the same for almost everything,
2309          * except that the CPU DP PLL is configured in this
2310          * register
2311          *
2312          * CPT PCH is quite different, having many bits moved
2313          * to the TRANS_DP_CTL register instead. That
2314          * configuration happens (oddly) in ironlake_pch_enable
2315          */
2316
2317         /* Preserve the BIOS-computed detected bit. This is
2318          * supposed to be read-only.
2319          */
2320         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2321
2322         /* Handle DP bits in common between all three register formats */
2323         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2324         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2325
2326         /* Split out the IBX/CPU vs CPT settings */
2327
2328         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2329                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2330                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2331                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2332                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2333                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2334
2335                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2336                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2337
2338                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2339         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2340                 u32 trans_dp;
2341
2342                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2343
2344                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2345                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2346                         trans_dp |= TRANS_DP_ENH_FRAMING;
2347                 else
2348                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
2349                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2350         } else {
2351                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2352                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
2353
2354                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2355                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2356                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2357                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2358                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2359
2360                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2361                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2362
2363                 if (IS_CHERRYVIEW(dev_priv))
2364                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2365                 else
2366                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2367         }
2368 }
2369
2370 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2371 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2372
2373 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2374 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
2375
2376 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2377 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2378
2379 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2380
2381 static void wait_panel_status(struct intel_dp *intel_dp,
2382                                        u32 mask,
2383                                        u32 value)
2384 {
2385         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2386         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2387
2388         lockdep_assert_held(&dev_priv->pps_mutex);
2389
2390         intel_pps_verify_state(intel_dp);
2391
2392         pp_stat_reg = _pp_stat_reg(intel_dp);
2393         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2394
2395         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2396                         mask, value,
2397                         I915_READ(pp_stat_reg),
2398                         I915_READ(pp_ctrl_reg));
2399
2400         if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2401                                        mask, value, 5000))
2402                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2403                                 I915_READ(pp_stat_reg),
2404                                 I915_READ(pp_ctrl_reg));
2405
2406         DRM_DEBUG_KMS("Wait complete\n");
2407 }
2408
2409 static void wait_panel_on(struct intel_dp *intel_dp)
2410 {
2411         DRM_DEBUG_KMS("Wait for panel power on\n");
2412         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2413 }
2414
2415 static void wait_panel_off(struct intel_dp *intel_dp)
2416 {
2417         DRM_DEBUG_KMS("Wait for panel power off time\n");
2418         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2419 }
2420
2421 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2422 {
2423         ktime_t panel_power_on_time;
2424         s64 panel_power_off_duration;
2425
2426         DRM_DEBUG_KMS("Wait for panel power cycle\n");
2427
2428         /* take the difference of currrent time and panel power off time
2429          * and then make panel wait for t11_t12 if needed. */
2430         panel_power_on_time = ktime_get_boottime();
2431         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2432
2433         /* When we disable the VDD override bit last we have to do the manual
2434          * wait. */
2435         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2436                 wait_remaining_ms_from_jiffies(jiffies,
2437                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2438
2439         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2440 }
2441
2442 static void wait_backlight_on(struct intel_dp *intel_dp)
2443 {
2444         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2445                                        intel_dp->backlight_on_delay);
2446 }
2447
2448 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2449 {
2450         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2451                                        intel_dp->backlight_off_delay);
2452 }
2453
2454 /* Read the current pp_control value, unlocking the register if it
2455  * is locked
2456  */
2457
2458 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2459 {
2460         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2461         u32 control;
2462
2463         lockdep_assert_held(&dev_priv->pps_mutex);
2464
2465         control = I915_READ(_pp_ctrl_reg(intel_dp));
2466         if (WARN_ON(!HAS_DDI(dev_priv) &&
2467                     (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2468                 control &= ~PANEL_UNLOCK_MASK;
2469                 control |= PANEL_UNLOCK_REGS;
2470         }
2471         return control;
2472 }
2473
2474 /*
2475  * Must be paired with edp_panel_vdd_off().
2476  * Must hold pps_mutex around the whole on/off sequence.
2477  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2478  */
2479 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2480 {
2481         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2482         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2483         u32 pp;
2484         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2485         bool need_to_disable = !intel_dp->want_panel_vdd;
2486
2487         lockdep_assert_held(&dev_priv->pps_mutex);
2488
2489         if (!intel_dp_is_edp(intel_dp))
2490                 return false;
2491
2492         cancel_delayed_work(&intel_dp->panel_vdd_work);
2493         intel_dp->want_panel_vdd = true;
2494
2495         if (edp_have_panel_vdd(intel_dp))
2496                 return need_to_disable;
2497
2498         intel_display_power_get(dev_priv,
2499                                 intel_aux_power_domain(intel_dig_port));
2500
2501         DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
2502                       intel_dig_port->base.base.base.id,
2503                       intel_dig_port->base.base.name);
2504
2505         if (!edp_have_panel_power(intel_dp))
2506                 wait_panel_power_cycle(intel_dp);
2507
2508         pp = ironlake_get_pp_control(intel_dp);
2509         pp |= EDP_FORCE_VDD;
2510
2511         pp_stat_reg = _pp_stat_reg(intel_dp);
2512         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2513
2514         I915_WRITE(pp_ctrl_reg, pp);
2515         POSTING_READ(pp_ctrl_reg);
2516         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2517                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2518         /*
2519          * If the panel wasn't on, delay before accessing aux channel
2520          */
2521         if (!edp_have_panel_power(intel_dp)) {
2522                 DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
2523                               intel_dig_port->base.base.base.id,
2524                               intel_dig_port->base.base.name);
2525                 msleep(intel_dp->panel_power_up_delay);
2526         }
2527
2528         return need_to_disable;
2529 }
2530
2531 /*
2532  * Must be paired with intel_edp_panel_vdd_off() or
2533  * intel_edp_panel_off().
2534  * Nested calls to these functions are not allowed since
2535  * we drop the lock. Caller must use some higher level
2536  * locking to prevent nested calls from other threads.
2537  */
2538 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2539 {
2540         intel_wakeref_t wakeref;
2541         bool vdd;
2542
2543         if (!intel_dp_is_edp(intel_dp))
2544                 return;
2545
2546         vdd = false;
2547         with_pps_lock(intel_dp, wakeref)
2548                 vdd = edp_panel_vdd_on(intel_dp);
2549         I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
2550                         dp_to_dig_port(intel_dp)->base.base.base.id,
2551                         dp_to_dig_port(intel_dp)->base.base.name);
2552 }
2553
2554 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2555 {
2556         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2557         struct intel_digital_port *intel_dig_port =
2558                 dp_to_dig_port(intel_dp);
2559         u32 pp;
2560         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2561
2562         lockdep_assert_held(&dev_priv->pps_mutex);
2563
2564         WARN_ON(intel_dp->want_panel_vdd);
2565
2566         if (!edp_have_panel_vdd(intel_dp))
2567                 return;
2568
2569         DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
2570                       intel_dig_port->base.base.base.id,
2571                       intel_dig_port->base.base.name);
2572
2573         pp = ironlake_get_pp_control(intel_dp);
2574         pp &= ~EDP_FORCE_VDD;
2575
2576         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2577         pp_stat_reg = _pp_stat_reg(intel_dp);
2578
2579         I915_WRITE(pp_ctrl_reg, pp);
2580         POSTING_READ(pp_ctrl_reg);
2581
2582         /* Make sure sequencer is idle before allowing subsequent activity */
2583         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2584         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2585
2586         if ((pp & PANEL_POWER_ON) == 0)
2587                 intel_dp->panel_power_off_time = ktime_get_boottime();
2588
2589         intel_display_power_put_unchecked(dev_priv,
2590                                           intel_aux_power_domain(intel_dig_port));
2591 }
2592
2593 static void edp_panel_vdd_work(struct work_struct *__work)
2594 {
2595         struct intel_dp *intel_dp =
2596                 container_of(to_delayed_work(__work),
2597                              struct intel_dp, panel_vdd_work);
2598         intel_wakeref_t wakeref;
2599
2600         with_pps_lock(intel_dp, wakeref) {
2601                 if (!intel_dp->want_panel_vdd)
2602                         edp_panel_vdd_off_sync(intel_dp);
2603         }
2604 }
2605
2606 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2607 {
2608         unsigned long delay;
2609
2610         /*
2611          * Queue the timer to fire a long time from now (relative to the power
2612          * down delay) to keep the panel power up across a sequence of
2613          * operations.
2614          */
2615         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2616         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2617 }
2618
2619 /*
2620  * Must be paired with edp_panel_vdd_on().
2621  * Must hold pps_mutex around the whole on/off sequence.
2622  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2623  */
2624 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2625 {
2626         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2627
2628         lockdep_assert_held(&dev_priv->pps_mutex);
2629
2630         if (!intel_dp_is_edp(intel_dp))
2631                 return;
2632
2633         I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
2634                         dp_to_dig_port(intel_dp)->base.base.base.id,
2635                         dp_to_dig_port(intel_dp)->base.base.name);
2636
2637         intel_dp->want_panel_vdd = false;
2638
2639         if (sync)
2640                 edp_panel_vdd_off_sync(intel_dp);
2641         else
2642                 edp_panel_vdd_schedule_off(intel_dp);
2643 }
2644
2645 static void edp_panel_on(struct intel_dp *intel_dp)
2646 {
2647         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2648         u32 pp;
2649         i915_reg_t pp_ctrl_reg;
2650
2651         lockdep_assert_held(&dev_priv->pps_mutex);
2652
2653         if (!intel_dp_is_edp(intel_dp))
2654                 return;
2655
2656         DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
2657                       dp_to_dig_port(intel_dp)->base.base.base.id,
2658                       dp_to_dig_port(intel_dp)->base.base.name);
2659
2660         if (WARN(edp_have_panel_power(intel_dp),
2661                  "[ENCODER:%d:%s] panel power already on\n",
2662                  dp_to_dig_port(intel_dp)->base.base.base.id,
2663                  dp_to_dig_port(intel_dp)->base.base.name))
2664                 return;
2665
2666         wait_panel_power_cycle(intel_dp);
2667
2668         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2669         pp = ironlake_get_pp_control(intel_dp);
2670         if (IS_GEN(dev_priv, 5)) {
2671                 /* ILK workaround: disable reset around power sequence */
2672                 pp &= ~PANEL_POWER_RESET;
2673                 I915_WRITE(pp_ctrl_reg, pp);
2674                 POSTING_READ(pp_ctrl_reg);
2675         }
2676
2677         pp |= PANEL_POWER_ON;
2678         if (!IS_GEN(dev_priv, 5))
2679                 pp |= PANEL_POWER_RESET;
2680
2681         I915_WRITE(pp_ctrl_reg, pp);
2682         POSTING_READ(pp_ctrl_reg);
2683
2684         wait_panel_on(intel_dp);
2685         intel_dp->last_power_on = jiffies;
2686
2687         if (IS_GEN(dev_priv, 5)) {
2688                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2689                 I915_WRITE(pp_ctrl_reg, pp);
2690                 POSTING_READ(pp_ctrl_reg);
2691         }
2692 }
2693
2694 void intel_edp_panel_on(struct intel_dp *intel_dp)
2695 {
2696         intel_wakeref_t wakeref;
2697
2698         if (!intel_dp_is_edp(intel_dp))
2699                 return;
2700
2701         with_pps_lock(intel_dp, wakeref)
2702                 edp_panel_on(intel_dp);
2703 }
2704
2705
2706 static void edp_panel_off(struct intel_dp *intel_dp)
2707 {
2708         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2709         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2710         u32 pp;
2711         i915_reg_t pp_ctrl_reg;
2712
2713         lockdep_assert_held(&dev_priv->pps_mutex);
2714
2715         if (!intel_dp_is_edp(intel_dp))
2716                 return;
2717
2718         DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
2719                       dig_port->base.base.base.id, dig_port->base.base.name);
2720
2721         WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
2722              dig_port->base.base.base.id, dig_port->base.base.name);
2723
2724         pp = ironlake_get_pp_control(intel_dp);
2725         /* We need to switch off panel power _and_ force vdd, for otherwise some
2726          * panels get very unhappy and cease to work. */
2727         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2728                 EDP_BLC_ENABLE);
2729
2730         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2731
2732         intel_dp->want_panel_vdd = false;
2733
2734         I915_WRITE(pp_ctrl_reg, pp);
2735         POSTING_READ(pp_ctrl_reg);
2736
2737         wait_panel_off(intel_dp);
2738         intel_dp->panel_power_off_time = ktime_get_boottime();
2739
2740         /* We got a reference when we enabled the VDD. */
2741         intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2742 }
2743
2744 void intel_edp_panel_off(struct intel_dp *intel_dp)
2745 {
2746         intel_wakeref_t wakeref;
2747
2748         if (!intel_dp_is_edp(intel_dp))
2749                 return;
2750
2751         with_pps_lock(intel_dp, wakeref)
2752                 edp_panel_off(intel_dp);
2753 }
2754
2755 /* Enable backlight in the panel power control. */
2756 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2757 {
2758         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2759         intel_wakeref_t wakeref;
2760
2761         /*
2762          * If we enable the backlight right away following a panel power
2763          * on, we may see slight flicker as the panel syncs with the eDP
2764          * link.  So delay a bit to make sure the image is solid before
2765          * allowing it to appear.
2766          */
2767         wait_backlight_on(intel_dp);
2768
2769         with_pps_lock(intel_dp, wakeref) {
2770                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2771                 u32 pp;
2772
2773                 pp = ironlake_get_pp_control(intel_dp);
2774                 pp |= EDP_BLC_ENABLE;
2775
2776                 I915_WRITE(pp_ctrl_reg, pp);
2777                 POSTING_READ(pp_ctrl_reg);
2778         }
2779 }
2780
2781 /* Enable backlight PWM and backlight PP control. */
2782 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2783                             const struct drm_connector_state *conn_state)
2784 {
2785         struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2786
2787         if (!intel_dp_is_edp(intel_dp))
2788                 return;
2789
2790         DRM_DEBUG_KMS("\n");
2791
2792         intel_panel_enable_backlight(crtc_state, conn_state);
2793         _intel_edp_backlight_on(intel_dp);
2794 }
2795
2796 /* Disable backlight in the panel power control. */
2797 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2798 {
2799         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2800         intel_wakeref_t wakeref;
2801
2802         if (!intel_dp_is_edp(intel_dp))
2803                 return;
2804
2805         with_pps_lock(intel_dp, wakeref) {
2806                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2807                 u32 pp;
2808
2809                 pp = ironlake_get_pp_control(intel_dp);
2810                 pp &= ~EDP_BLC_ENABLE;
2811
2812                 I915_WRITE(pp_ctrl_reg, pp);
2813                 POSTING_READ(pp_ctrl_reg);
2814         }
2815
2816         intel_dp->last_backlight_off = jiffies;
2817         edp_wait_backlight_off(intel_dp);
2818 }
2819
2820 /* Disable backlight PP control and backlight PWM. */
2821 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2822 {
2823         struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2824
2825         if (!intel_dp_is_edp(intel_dp))
2826                 return;
2827
2828         DRM_DEBUG_KMS("\n");
2829
2830         _intel_edp_backlight_off(intel_dp);
2831         intel_panel_disable_backlight(old_conn_state);
2832 }
2833
2834 /*
2835  * Hook for controlling the panel power control backlight through the bl_power
2836  * sysfs attribute. Take care to handle multiple calls.
2837  */
2838 static void intel_edp_backlight_power(struct intel_connector *connector,
2839                                       bool enable)
2840 {
2841         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2842         intel_wakeref_t wakeref;
2843         bool is_enabled;
2844
2845         is_enabled = false;
2846         with_pps_lock(intel_dp, wakeref)
2847                 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2848         if (is_enabled == enable)
2849                 return;
2850
2851         DRM_DEBUG_KMS("panel power control backlight %s\n",
2852                       enable ? "enable" : "disable");
2853
2854         if (enable)
2855                 _intel_edp_backlight_on(intel_dp);
2856         else
2857                 _intel_edp_backlight_off(intel_dp);
2858 }
2859
2860 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2861 {
2862         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2863         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2864         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2865
2866         I915_STATE_WARN(cur_state != state,
2867                         "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
2868                         dig_port->base.base.base.id, dig_port->base.base.name,
2869                         onoff(state), onoff(cur_state));
2870 }
2871 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2872
2873 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2874 {
2875         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2876
2877         I915_STATE_WARN(cur_state != state,
2878                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2879                         onoff(state), onoff(cur_state));
2880 }
2881 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2882 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2883
2884 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2885                                 const struct intel_crtc_state *pipe_config)
2886 {
2887         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2888         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2889
2890         assert_pipe_disabled(dev_priv, crtc->pipe);
2891         assert_dp_port_disabled(intel_dp);
2892         assert_edp_pll_disabled(dev_priv);
2893
2894         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2895                       pipe_config->port_clock);
2896
2897         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2898
2899         if (pipe_config->port_clock == 162000)
2900                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2901         else
2902                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2903
2904         I915_WRITE(DP_A, intel_dp->DP);
2905         POSTING_READ(DP_A);
2906         udelay(500);
2907
2908         /*
2909          * [DevILK] Work around required when enabling DP PLL
2910          * while a pipe is enabled going to FDI:
2911          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2912          * 2. Program DP PLL enable
2913          */
2914         if (IS_GEN(dev_priv, 5))
2915                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2916
2917         intel_dp->DP |= DP_PLL_ENABLE;
2918
2919         I915_WRITE(DP_A, intel_dp->DP);
2920         POSTING_READ(DP_A);
2921         udelay(200);
2922 }
2923
2924 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2925                                  const struct intel_crtc_state *old_crtc_state)
2926 {
2927         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2928         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2929
2930         assert_pipe_disabled(dev_priv, crtc->pipe);
2931         assert_dp_port_disabled(intel_dp);
2932         assert_edp_pll_enabled(dev_priv);
2933
2934         DRM_DEBUG_KMS("disabling eDP PLL\n");
2935
2936         intel_dp->DP &= ~DP_PLL_ENABLE;
2937
2938         I915_WRITE(DP_A, intel_dp->DP);
2939         POSTING_READ(DP_A);
2940         udelay(200);
2941 }
2942
2943 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2944 {
2945         /*
2946          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2947          * be capable of signalling downstream hpd with a long pulse.
2948          * Whether or not that means D3 is safe to use is not clear,
2949          * but let's assume so until proven otherwise.
2950          *
2951          * FIXME should really check all downstream ports...
2952          */
2953         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2954                 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2955                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2956 }
2957
2958 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2959                                            const struct intel_crtc_state *crtc_state,
2960                                            bool enable)
2961 {
2962         int ret;
2963
2964         if (!crtc_state->dsc_params.compression_enable)
2965                 return;
2966
2967         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2968                                  enable ? DP_DECOMPRESSION_EN : 0);
2969         if (ret < 0)
2970                 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2971                               enable ? "enable" : "disable");
2972 }
2973
2974 /* If the sink supports it, try to set the power state appropriately */
2975 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2976 {
2977         int ret, i;
2978
2979         /* Should have a valid DPCD by this point */
2980         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2981                 return;
2982
2983         if (mode != DRM_MODE_DPMS_ON) {
2984                 if (downstream_hpd_needs_d0(intel_dp))
2985                         return;
2986
2987                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2988                                          DP_SET_POWER_D3);
2989         } else {
2990                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2991
2992                 /*
2993                  * When turning on, we need to retry for 1ms to give the sink
2994                  * time to wake up.
2995                  */
2996                 for (i = 0; i < 3; i++) {
2997                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2998                                                  DP_SET_POWER_D0);
2999                         if (ret == 1)
3000                                 break;
3001                         msleep(1);
3002                 }
3003
3004                 if (ret == 1 && lspcon->active)
3005                         lspcon_wait_pcon_mode(lspcon);
3006         }
3007
3008         if (ret != 1)
3009                 DRM_DEBUG_KMS("failed to %s sink power state\n",
3010                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3011 }
3012
3013 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3014                                  enum port port, enum pipe *pipe)
3015 {
3016         enum pipe p;
3017
3018         for_each_pipe(dev_priv, p) {
3019                 u32 val = I915_READ(TRANS_DP_CTL(p));
3020
3021                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3022                         *pipe = p;
3023                         return true;
3024                 }
3025         }
3026
3027         DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
3028
3029         /* must initialize pipe to something for the asserts */
3030         *pipe = PIPE_A;
3031
3032         return false;
3033 }
3034
3035 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3036                            i915_reg_t dp_reg, enum port port,
3037                            enum pipe *pipe)
3038 {
3039         bool ret;
3040         u32 val;
3041
3042         val = I915_READ(dp_reg);
3043
3044         ret = val & DP_PORT_EN;
3045
3046         /* asserts want to know the pipe even if the port is disabled */
3047         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3048                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3049         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3050                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3051         else if (IS_CHERRYVIEW(dev_priv))
3052                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3053         else
3054                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3055
3056         return ret;
3057 }
3058
3059 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3060                                   enum pipe *pipe)
3061 {
3062         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3063         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3064         intel_wakeref_t wakeref;
3065         bool ret;
3066
3067         wakeref = intel_display_power_get_if_enabled(dev_priv,
3068                                                      encoder->power_domain);
3069         if (!wakeref)
3070                 return false;
3071
3072         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3073                                     encoder->port, pipe);
3074
3075         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3076
3077         return ret;
3078 }
3079
3080 static void intel_dp_get_config(struct intel_encoder *encoder,
3081                                 struct intel_crtc_state *pipe_config)
3082 {
3083         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3084         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3085         u32 tmp, flags = 0;
3086         enum port port = encoder->port;
3087         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3088
3089         if (encoder->type == INTEL_OUTPUT_EDP)
3090                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3091         else
3092                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3093
3094         tmp = I915_READ(intel_dp->output_reg);
3095
3096         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3097
3098         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3099                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3100
3101                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3102                         flags |= DRM_MODE_FLAG_PHSYNC;
3103                 else
3104                         flags |= DRM_MODE_FLAG_NHSYNC;
3105
3106                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3107                         flags |= DRM_MODE_FLAG_PVSYNC;
3108                 else
3109                         flags |= DRM_MODE_FLAG_NVSYNC;
3110         } else {
3111                 if (tmp & DP_SYNC_HS_HIGH)
3112                         flags |= DRM_MODE_FLAG_PHSYNC;
3113                 else
3114                         flags |= DRM_MODE_FLAG_NHSYNC;
3115
3116                 if (tmp & DP_SYNC_VS_HIGH)
3117                         flags |= DRM_MODE_FLAG_PVSYNC;
3118                 else
3119                         flags |= DRM_MODE_FLAG_NVSYNC;
3120         }
3121
3122         pipe_config->base.adjusted_mode.flags |= flags;
3123
3124         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3125                 pipe_config->limited_color_range = true;
3126
3127         pipe_config->lane_count =
3128                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3129
3130         intel_dp_get_m_n(crtc, pipe_config);
3131
3132         if (port == PORT_A) {
3133                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3134                         pipe_config->port_clock = 162000;
3135                 else
3136                         pipe_config->port_clock = 270000;
3137         }
3138
3139         pipe_config->base.adjusted_mode.crtc_clock =
3140                 intel_dotclock_calculate(pipe_config->port_clock,
3141                                          &pipe_config->dp_m_n);
3142
3143         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3144             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3145                 /*
3146                  * This is a big fat ugly hack.
3147                  *
3148                  * Some machines in UEFI boot mode provide us a VBT that has 18
3149                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3150                  * unknown we fail to light up. Yet the same BIOS boots up with
3151                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3152                  * max, not what it tells us to use.
3153                  *
3154                  * Note: This will still be broken if the eDP panel is not lit
3155                  * up by the BIOS, and thus we can't get the mode at module
3156                  * load.
3157                  */
3158                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3159                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3160                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3161         }
3162 }
3163
3164 static void intel_disable_dp(struct intel_encoder *encoder,
3165                              const struct intel_crtc_state *old_crtc_state,
3166                              const struct drm_connector_state *old_conn_state)
3167 {
3168         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3169
3170         intel_dp->link_trained = false;
3171
3172         if (old_crtc_state->has_audio)
3173                 intel_audio_codec_disable(encoder,
3174                                           old_crtc_state, old_conn_state);
3175
3176         /* Make sure the panel is off before trying to change the mode. But also
3177          * ensure that we have vdd while we switch off the panel. */
3178         intel_edp_panel_vdd_on(intel_dp);
3179         intel_edp_backlight_off(old_conn_state);
3180         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3181         intel_edp_panel_off(intel_dp);
3182 }
3183
3184 static void g4x_disable_dp(struct intel_encoder *encoder,
3185                            const struct intel_crtc_state *old_crtc_state,
3186                            const struct drm_connector_state *old_conn_state)
3187 {
3188         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3189 }
3190
3191 static void vlv_disable_dp(struct intel_encoder *encoder,
3192                            const struct intel_crtc_state *old_crtc_state,
3193                            const struct drm_connector_state *old_conn_state)
3194 {
3195         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3196 }
3197
3198 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3199                                 const struct intel_crtc_state *old_crtc_state,
3200                                 const struct drm_connector_state *old_conn_state)
3201 {
3202         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3203         enum port port = encoder->port;
3204
3205         /*
3206          * Bspec does not list a specific disable sequence for g4x DP.
3207          * Follow the ilk+ sequence (disable pipe before the port) for
3208          * g4x DP as it does not suffer from underruns like the normal
3209          * g4x modeset sequence (disable pipe after the port).
3210          */
3211         intel_dp_link_down(encoder, old_crtc_state);
3212
3213         /* Only ilk+ has port A */
3214         if (port == PORT_A)
3215                 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3216 }
3217
3218 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3219                                 const struct intel_crtc_state *old_crtc_state,
3220                                 const struct drm_connector_state *old_conn_state)
3221 {
3222         intel_dp_link_down(encoder, old_crtc_state);
3223 }
3224
3225 static void chv_post_disable_dp(struct intel_encoder *encoder,
3226                                 const struct intel_crtc_state *old_crtc_state,
3227                                 const struct drm_connector_state *old_conn_state)
3228 {
3229         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3230
3231         intel_dp_link_down(encoder, old_crtc_state);
3232
3233         vlv_dpio_get(dev_priv);
3234
3235         /* Assert data lane reset */
3236         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3237
3238         vlv_dpio_put(dev_priv);
3239 }
3240
3241 static void
3242 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3243                          u32 *DP,
3244                          u8 dp_train_pat)
3245 {
3246         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3247         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3248         enum port port = intel_dig_port->base.port;
3249         u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3250
3251         if (dp_train_pat & train_pat_mask)
3252                 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3253                               dp_train_pat & train_pat_mask);
3254
3255         if (HAS_DDI(dev_priv)) {
3256                 u32 temp = I915_READ(DP_TP_CTL(port));
3257
3258                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3259                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3260                 else
3261                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3262
3263                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3264                 switch (dp_train_pat & train_pat_mask) {
3265                 case DP_TRAINING_PATTERN_DISABLE:
3266                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3267
3268                         break;
3269                 case DP_TRAINING_PATTERN_1:
3270                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3271                         break;
3272                 case DP_TRAINING_PATTERN_2:
3273                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3274                         break;
3275                 case DP_TRAINING_PATTERN_3:
3276                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3277                         break;
3278                 case DP_TRAINING_PATTERN_4:
3279                         temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3280                         break;
3281                 }
3282                 I915_WRITE(DP_TP_CTL(port), temp);
3283
3284         } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3285                    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3286                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3287
3288                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3289                 case DP_TRAINING_PATTERN_DISABLE:
3290                         *DP |= DP_LINK_TRAIN_OFF_CPT;
3291                         break;
3292                 case DP_TRAINING_PATTERN_1:
3293                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3294                         break;
3295                 case DP_TRAINING_PATTERN_2:
3296                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3297                         break;
3298                 case DP_TRAINING_PATTERN_3:
3299                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3300                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3301                         break;
3302                 }
3303
3304         } else {
3305                 *DP &= ~DP_LINK_TRAIN_MASK;
3306
3307                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3308                 case DP_TRAINING_PATTERN_DISABLE:
3309                         *DP |= DP_LINK_TRAIN_OFF;
3310                         break;
3311                 case DP_TRAINING_PATTERN_1:
3312                         *DP |= DP_LINK_TRAIN_PAT_1;
3313                         break;
3314                 case DP_TRAINING_PATTERN_2:
3315                         *DP |= DP_LINK_TRAIN_PAT_2;
3316                         break;
3317                 case DP_TRAINING_PATTERN_3:
3318                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3319                         *DP |= DP_LINK_TRAIN_PAT_2;
3320                         break;
3321                 }
3322         }
3323 }
3324
3325 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3326                                  const struct intel_crtc_state *old_crtc_state)
3327 {
3328         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3329
3330         /* enable with pattern 1 (as per spec) */
3331
3332         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3333
3334         /*
3335          * Magic for VLV/CHV. We _must_ first set up the register
3336          * without actually enabling the port, and then do another
3337          * write to enable the port. Otherwise link training will
3338          * fail when the power sequencer is freshly used for this port.
3339          */
3340         intel_dp->DP |= DP_PORT_EN;
3341         if (old_crtc_state->has_audio)
3342                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3343
3344         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3345         POSTING_READ(intel_dp->output_reg);
3346 }
3347
3348 static void intel_enable_dp(struct intel_encoder *encoder,
3349                             const struct intel_crtc_state *pipe_config,
3350                             const struct drm_connector_state *conn_state)
3351 {
3352         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3353         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3354         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3355         u32 dp_reg = I915_READ(intel_dp->output_reg);
3356         enum pipe pipe = crtc->pipe;
3357         intel_wakeref_t wakeref;
3358
3359         if (WARN_ON(dp_reg & DP_PORT_EN))
3360                 return;
3361
3362         with_pps_lock(intel_dp, wakeref) {
3363                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3364                         vlv_init_panel_power_sequencer(encoder, pipe_config);
3365
3366                 intel_dp_enable_port(intel_dp, pipe_config);
3367
3368                 edp_panel_vdd_on(intel_dp);
3369                 edp_panel_on(intel_dp);
3370                 edp_panel_vdd_off(intel_dp, true);
3371         }
3372
3373         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3374                 unsigned int lane_mask = 0x0;
3375
3376                 if (IS_CHERRYVIEW(dev_priv))
3377                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3378
3379                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3380                                     lane_mask);
3381         }
3382
3383         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3384         intel_dp_start_link_train(intel_dp);
3385         intel_dp_stop_link_train(intel_dp);
3386
3387         if (pipe_config->has_audio) {
3388                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3389                                  pipe_name(pipe));
3390                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3391         }
3392 }
3393
3394 static void g4x_enable_dp(struct intel_encoder *encoder,
3395                           const struct intel_crtc_state *pipe_config,
3396                           const struct drm_connector_state *conn_state)
3397 {
3398         intel_enable_dp(encoder, pipe_config, conn_state);
3399         intel_edp_backlight_on(pipe_config, conn_state);
3400 }
3401
3402 static void vlv_enable_dp(struct intel_encoder *encoder,
3403                           const struct intel_crtc_state *pipe_config,
3404                           const struct drm_connector_state *conn_state)
3405 {
3406         intel_edp_backlight_on(pipe_config, conn_state);
3407 }
3408
3409 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3410                               const struct intel_crtc_state *pipe_config,
3411                               const struct drm_connector_state *conn_state)
3412 {
3413         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3414         enum port port = encoder->port;
3415
3416         intel_dp_prepare(encoder, pipe_config);
3417
3418         /* Only ilk+ has port A */
3419         if (port == PORT_A)
3420                 ironlake_edp_pll_on(intel_dp, pipe_config);
3421 }
3422
3423 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3424 {
3425         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3426         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3427         enum pipe pipe = intel_dp->pps_pipe;
3428         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3429
3430         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3431
3432         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3433                 return;
3434
3435         edp_panel_vdd_off_sync(intel_dp);
3436
3437         /*
3438          * VLV seems to get confused when multiple power sequencers
3439          * have the same port selected (even if only one has power/vdd
3440          * enabled). The failure manifests as vlv_wait_port_ready() failing
3441          * CHV on the other hand doesn't seem to mind having the same port
3442          * selected in multiple power sequencers, but let's clear the
3443          * port select always when logically disconnecting a power sequencer
3444          * from a port.
3445          */
3446         DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
3447                       pipe_name(pipe), intel_dig_port->base.base.base.id,
3448                       intel_dig_port->base.base.name);
3449         I915_WRITE(pp_on_reg, 0);
3450         POSTING_READ(pp_on_reg);
3451
3452         intel_dp->pps_pipe = INVALID_PIPE;
3453 }
3454
3455 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3456                                       enum pipe pipe)
3457 {
3458         struct intel_encoder *encoder;
3459
3460         lockdep_assert_held(&dev_priv->pps_mutex);
3461
3462         for_each_intel_dp(&dev_priv->drm, encoder) {
3463                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3464
3465                 WARN(intel_dp->active_pipe == pipe,
3466                      "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
3467                      pipe_name(pipe), encoder->base.base.id,
3468                      encoder->base.name);
3469
3470                 if (intel_dp->pps_pipe != pipe)
3471                         continue;
3472
3473                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
3474                               pipe_name(pipe), encoder->base.base.id,
3475                               encoder->base.name);
3476
3477                 /* make sure vdd is off before we steal it */
3478                 vlv_detach_power_sequencer(intel_dp);
3479         }
3480 }
3481
3482 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3483                                            const struct intel_crtc_state *crtc_state)
3484 {
3485         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3486         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3487         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3488
3489         lockdep_assert_held(&dev_priv->pps_mutex);
3490
3491         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3492
3493         if (intel_dp->pps_pipe != INVALID_PIPE &&
3494             intel_dp->pps_pipe != crtc->pipe) {
3495                 /*
3496                  * If another power sequencer was being used on this
3497                  * port previously make sure to turn off vdd there while
3498                  * we still have control of it.
3499                  */
3500                 vlv_detach_power_sequencer(intel_dp);
3501         }
3502
3503         /*
3504          * We may be stealing the power
3505          * sequencer from another port.
3506          */
3507         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3508
3509         intel_dp->active_pipe = crtc->pipe;
3510
3511         if (!intel_dp_is_edp(intel_dp))
3512                 return;
3513
3514         /* now it's all ours */
3515         intel_dp->pps_pipe = crtc->pipe;
3516
3517         DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
3518                       pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
3519                       encoder->base.name);
3520
3521         /* init power sequencer on this pipe and port */
3522         intel_dp_init_panel_power_sequencer(intel_dp);
3523         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3524 }
3525
3526 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3527                               const struct intel_crtc_state *pipe_config,
3528                               const struct drm_connector_state *conn_state)
3529 {
3530         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3531
3532         intel_enable_dp(encoder, pipe_config, conn_state);
3533 }
3534
3535 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3536                                   const struct intel_crtc_state *pipe_config,
3537                                   const struct drm_connector_state *conn_state)
3538 {
3539         intel_dp_prepare(encoder, pipe_config);
3540
3541         vlv_phy_pre_pll_enable(encoder, pipe_config);
3542 }
3543
3544 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3545                               const struct intel_crtc_state *pipe_config,
3546                               const struct drm_connector_state *conn_state)
3547 {
3548         chv_phy_pre_encoder_enable(encoder, pipe_config);
3549
3550         intel_enable_dp(encoder, pipe_config, conn_state);
3551
3552         /* Second common lane will stay alive on its own now */
3553         chv_phy_release_cl2_override(encoder);
3554 }
3555
3556 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3557                                   const struct intel_crtc_state *pipe_config,
3558                                   const struct drm_connector_state *conn_state)
3559 {
3560         intel_dp_prepare(encoder, pipe_config);
3561
3562         chv_phy_pre_pll_enable(encoder, pipe_config);
3563 }
3564
3565 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3566                                     const struct intel_crtc_state *old_crtc_state,
3567                                     const struct drm_connector_state *old_conn_state)
3568 {
3569         chv_phy_post_pll_disable(encoder, old_crtc_state);
3570 }
3571
3572 /*
3573  * Fetch AUX CH registers 0x202 - 0x207 which contain
3574  * link status information
3575  */
3576 bool
3577 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3578 {
3579         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3580                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3581 }
3582
3583 /* These are source-specific values. */
3584 u8
3585 intel_dp_voltage_max(struct intel_dp *intel_dp)
3586 {
3587         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3588         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3589         enum port port = encoder->port;
3590
3591         if (HAS_DDI(dev_priv))
3592                 return intel_ddi_dp_voltage_max(encoder);
3593         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3594                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3595         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3596                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3597         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3598                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3599         else
3600                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3601 }
3602
3603 u8
3604 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3605 {
3606         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3607         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3608         enum port port = encoder->port;
3609
3610         if (HAS_DDI(dev_priv)) {
3611                 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3612         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3613                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3614                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3615                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3616                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3617                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3618                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3619                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3620                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3621                 default:
3622                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3623                 }
3624         } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3625                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3626                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3627                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3628                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3629                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3630                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3631                 default:
3632                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3633                 }
3634         } else {
3635                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3636                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3637                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3638                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3639                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3640                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3641                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3642                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3643                 default:
3644                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3645                 }
3646         }
3647 }
3648
3649 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3650 {
3651         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3652         unsigned long demph_reg_value, preemph_reg_value,
3653                 uniqtranscale_reg_value;
3654         u8 train_set = intel_dp->train_set[0];
3655
3656         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3657         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3658                 preemph_reg_value = 0x0004000;
3659                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3660                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3661                         demph_reg_value = 0x2B405555;
3662                         uniqtranscale_reg_value = 0x552AB83A;
3663                         break;
3664                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3665                         demph_reg_value = 0x2B404040;
3666                         uniqtranscale_reg_value = 0x5548B83A;
3667                         break;
3668                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3669                         demph_reg_value = 0x2B245555;
3670                         uniqtranscale_reg_value = 0x5560B83A;
3671                         break;
3672                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3673                         demph_reg_value = 0x2B405555;
3674                         uniqtranscale_reg_value = 0x5598DA3A;
3675                         break;
3676                 default:
3677                         return 0;
3678                 }
3679                 break;
3680         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3681                 preemph_reg_value = 0x0002000;
3682                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3683                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3684                         demph_reg_value = 0x2B404040;
3685                         uniqtranscale_reg_value = 0x5552B83A;
3686                         break;
3687                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3688                         demph_reg_value = 0x2B404848;
3689                         uniqtranscale_reg_value = 0x5580B83A;
3690                         break;
3691                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3692                         demph_reg_value = 0x2B404040;
3693                         uniqtranscale_reg_value = 0x55ADDA3A;
3694                         break;
3695                 default:
3696                         return 0;
3697                 }
3698                 break;
3699         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3700                 preemph_reg_value = 0x0000000;
3701                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3702                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3703                         demph_reg_value = 0x2B305555;
3704                         uniqtranscale_reg_value = 0x5570B83A;
3705                         break;
3706                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3707                         demph_reg_value = 0x2B2B4040;
3708                         uniqtranscale_reg_value = 0x55ADDA3A;
3709                         break;
3710                 default:
3711                         return 0;
3712                 }
3713                 break;
3714         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3715                 preemph_reg_value = 0x0006000;
3716                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3717                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3718                         demph_reg_value = 0x1B405555;
3719                         uniqtranscale_reg_value = 0x55ADDA3A;
3720                         break;
3721                 default:
3722                         return 0;
3723                 }
3724                 break;
3725         default:
3726                 return 0;
3727         }
3728
3729         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3730                                  uniqtranscale_reg_value, 0);
3731
3732         return 0;
3733 }
3734
3735 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3736 {
3737         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3738         u32 deemph_reg_value, margin_reg_value;
3739         bool uniq_trans_scale = false;
3740         u8 train_set = intel_dp->train_set[0];
3741
3742         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3743         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3744                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3745                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3746                         deemph_reg_value = 128;
3747                         margin_reg_value = 52;
3748                         break;
3749                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3750                         deemph_reg_value = 128;
3751                         margin_reg_value = 77;
3752                         break;
3753                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3754                         deemph_reg_value = 128;
3755                         margin_reg_value = 102;
3756                         break;
3757                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3758                         deemph_reg_value = 128;
3759                         margin_reg_value = 154;
3760                         uniq_trans_scale = true;
3761                         break;
3762                 default:
3763                         return 0;
3764                 }
3765                 break;
3766         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3767                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3768                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3769                         deemph_reg_value = 85;
3770                         margin_reg_value = 78;
3771                         break;
3772                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3773                         deemph_reg_value = 85;
3774                         margin_reg_value = 116;
3775                         break;
3776                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3777                         deemph_reg_value = 85;
3778                         margin_reg_value = 154;
3779                         break;
3780                 default:
3781                         return 0;
3782                 }
3783                 break;
3784         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3785                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3786                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3787                         deemph_reg_value = 64;
3788                         margin_reg_value = 104;
3789                         break;
3790                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3791                         deemph_reg_value = 64;
3792                         margin_reg_value = 154;
3793                         break;
3794                 default:
3795                         return 0;
3796                 }
3797                 break;
3798         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3799                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3800                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3801                         deemph_reg_value = 43;
3802                         margin_reg_value = 154;
3803                         break;
3804                 default:
3805                         return 0;
3806                 }
3807                 break;
3808         default:
3809                 return 0;
3810         }
3811
3812         chv_set_phy_signal_level(encoder, deemph_reg_value,
3813                                  margin_reg_value, uniq_trans_scale);
3814
3815         return 0;
3816 }
3817
3818 static u32
3819 g4x_signal_levels(u8 train_set)
3820 {
3821         u32 signal_levels = 0;
3822
3823         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3824         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3825         default:
3826                 signal_levels |= DP_VOLTAGE_0_4;
3827                 break;
3828         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3829                 signal_levels |= DP_VOLTAGE_0_6;
3830                 break;
3831         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3832                 signal_levels |= DP_VOLTAGE_0_8;
3833                 break;
3834         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3835                 signal_levels |= DP_VOLTAGE_1_2;
3836                 break;
3837         }
3838         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3839         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3840         default:
3841                 signal_levels |= DP_PRE_EMPHASIS_0;
3842                 break;
3843         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3844                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3845                 break;
3846         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3847                 signal_levels |= DP_PRE_EMPHASIS_6;
3848                 break;
3849         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3850                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3851                 break;
3852         }
3853         return signal_levels;
3854 }
3855
3856 /* SNB CPU eDP voltage swing and pre-emphasis control */
3857 static u32
3858 snb_cpu_edp_signal_levels(u8 train_set)
3859 {
3860         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3861                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3862         switch (signal_levels) {
3863         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3864         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3865                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3866         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3867                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3868         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3869         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3870                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3871         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3872         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3873                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3874         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3875         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3876                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3877         default:
3878                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3879                               "0x%x\n", signal_levels);
3880                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3881         }
3882 }
3883
3884 /* IVB CPU eDP voltage swing and pre-emphasis control */
3885 static u32
3886 ivb_cpu_edp_signal_levels(u8 train_set)
3887 {
3888         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3889                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3890         switch (signal_levels) {
3891         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3892                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3893         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3894                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3895         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3896                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3897
3898         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3899                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3900         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3901                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3902
3903         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3904                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3905         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3906                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3907
3908         default:
3909                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3910                               "0x%x\n", signal_levels);
3911                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3912         }
3913 }
3914
3915 void
3916 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3917 {
3918         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3919         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3920         enum port port = intel_dig_port->base.port;
3921         u32 signal_levels, mask = 0;
3922         u8 train_set = intel_dp->train_set[0];
3923
3924         if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3925                 signal_levels = bxt_signal_levels(intel_dp);
3926         } else if (HAS_DDI(dev_priv)) {
3927                 signal_levels = ddi_signal_levels(intel_dp);
3928                 mask = DDI_BUF_EMP_MASK;
3929         } else if (IS_CHERRYVIEW(dev_priv)) {
3930                 signal_levels = chv_signal_levels(intel_dp);
3931         } else if (IS_VALLEYVIEW(dev_priv)) {
3932                 signal_levels = vlv_signal_levels(intel_dp);
3933         } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3934                 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3935                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3936         } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3937                 signal_levels = snb_cpu_edp_signal_levels(train_set);
3938                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3939         } else {
3940                 signal_levels = g4x_signal_levels(train_set);
3941                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3942         }
3943
3944         if (mask)
3945                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3946
3947         DRM_DEBUG_KMS("Using vswing level %d\n",
3948                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3949         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3950                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3951                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3952
3953         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3954
3955         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3956         POSTING_READ(intel_dp->output_reg);
3957 }
3958
3959 void
3960 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3961                                        u8 dp_train_pat)
3962 {
3963         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3964         struct drm_i915_private *dev_priv =
3965                 to_i915(intel_dig_port->base.base.dev);
3966
3967         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3968
3969         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3970         POSTING_READ(intel_dp->output_reg);
3971 }
3972
3973 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3974 {
3975         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3976         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3977         enum port port = intel_dig_port->base.port;
3978         u32 val;
3979
3980         if (!HAS_DDI(dev_priv))
3981                 return;
3982
3983         val = I915_READ(DP_TP_CTL(port));
3984         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3985         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3986         I915_WRITE(DP_TP_CTL(port), val);
3987
3988         /*
3989          * Until TGL on PORT_A we can have only eDP in SST mode. There the only
3990          * reason we need to set idle transmission mode is to work around a HW
3991          * issue where we enable the pipe while not in idle link-training mode.
3992          * In this case there is requirement to wait for a minimum number of
3993          * idle patterns to be sent.
3994          */
3995         if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
3996                 return;
3997
3998         if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
3999                                   DP_TP_STATUS_IDLE_DONE, 1))
4000                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
4001 }
4002
4003 static void
4004 intel_dp_link_down(struct intel_encoder *encoder,
4005                    const struct intel_crtc_state *old_crtc_state)
4006 {
4007         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4008         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4009         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4010         enum port port = encoder->port;
4011         u32 DP = intel_dp->DP;
4012
4013         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4014                 return;
4015
4016         DRM_DEBUG_KMS("\n");
4017
4018         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4019             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4020                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4021                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4022         } else {
4023                 DP &= ~DP_LINK_TRAIN_MASK;
4024                 DP |= DP_LINK_TRAIN_PAT_IDLE;
4025         }
4026         I915_WRITE(intel_dp->output_reg, DP);
4027         POSTING_READ(intel_dp->output_reg);
4028
4029         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4030         I915_WRITE(intel_dp->output_reg, DP);
4031         POSTING_READ(intel_dp->output_reg);
4032
4033         /*
4034          * HW workaround for IBX, we need to move the port
4035          * to transcoder A after disabling it to allow the
4036          * matching HDMI port to be enabled on transcoder A.
4037          */
4038         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4039                 /*
4040                  * We get CPU/PCH FIFO underruns on the other pipe when
4041                  * doing the workaround. Sweep them under the rug.
4042                  */
4043                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4044                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4045
4046                 /* always enable with pattern 1 (as per spec) */
4047                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4048                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4049                         DP_LINK_TRAIN_PAT_1;
4050                 I915_WRITE(intel_dp->output_reg, DP);
4051                 POSTING_READ(intel_dp->output_reg);
4052
4053                 DP &= ~DP_PORT_EN;
4054                 I915_WRITE(intel_dp->output_reg, DP);
4055                 POSTING_READ(intel_dp->output_reg);
4056
4057                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4058                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4059                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4060         }
4061
4062         msleep(intel_dp->panel_power_down_delay);
4063
4064         intel_dp->DP = DP;
4065
4066         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4067                 intel_wakeref_t wakeref;
4068
4069                 with_pps_lock(intel_dp, wakeref)
4070                         intel_dp->active_pipe = INVALID_PIPE;
4071         }
4072 }
4073
4074 static void
4075 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4076 {
4077         u8 dpcd_ext[6];
4078
4079         /*
4080          * Prior to DP1.3 the bit represented by
4081          * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4082          * if it is set DP_DPCD_REV at 0000h could be at a value less than
4083          * the true capability of the panel. The only way to check is to
4084          * then compare 0000h and 2200h.
4085          */
4086         if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4087               DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4088                 return;
4089
4090         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4091                              &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4092                 DRM_ERROR("DPCD failed read at extended capabilities\n");
4093                 return;
4094         }
4095
4096         if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4097                 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4098                 return;
4099         }
4100
4101         if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4102                 return;
4103
4104         DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4105                       (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4106
4107         memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4108 }
4109
4110 bool
4111 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4112 {
4113         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4114                              sizeof(intel_dp->dpcd)) < 0)
4115                 return false; /* aux transfer failed */
4116
4117         intel_dp_extended_receiver_capabilities(intel_dp);
4118
4119         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4120
4121         return intel_dp->dpcd[DP_DPCD_REV] != 0;
4122 }
4123
4124 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4125 {
4126         u8 dprx = 0;
4127
4128         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4129                               &dprx) != 1)
4130                 return false;
4131         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4132 }
4133
4134 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4135 {
4136         /*
4137          * Clear the cached register set to avoid using stale values
4138          * for the sinks that do not support DSC.
4139          */
4140         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4141
4142         /* Clear fec_capable to avoid using stale values */
4143         intel_dp->fec_capable = 0;
4144
4145         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4146         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4147             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4148                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4149                                      intel_dp->dsc_dpcd,
4150                                      sizeof(intel_dp->dsc_dpcd)) < 0)
4151                         DRM_ERROR("Failed to read DPCD register 0x%x\n",
4152                                   DP_DSC_SUPPORT);
4153
4154                 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4155                               (int)sizeof(intel_dp->dsc_dpcd),
4156                               intel_dp->dsc_dpcd);
4157
4158                 /* FEC is supported only on DP 1.4 */
4159                 if (!intel_dp_is_edp(intel_dp) &&
4160                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4161                                       &intel_dp->fec_capable) < 0)
4162                         DRM_ERROR("Failed to read FEC DPCD register\n");
4163
4164                 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4165         }
4166 }
4167
4168 static bool
4169 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4170 {
4171         struct drm_i915_private *dev_priv =
4172                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4173
4174         /* this function is meant to be called only once */
4175         WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4176
4177         if (!intel_dp_read_dpcd(intel_dp))
4178                 return false;
4179
4180         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4181                          drm_dp_is_branch(intel_dp->dpcd));
4182
4183         /*
4184          * Read the eDP display control registers.
4185          *
4186          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4187          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4188          * set, but require eDP 1.4+ detection (e.g. for supported link rates
4189          * method). The display control registers should read zero if they're
4190          * not supported anyway.
4191          */
4192         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4193                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4194                              sizeof(intel_dp->edp_dpcd))
4195                 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4196                               intel_dp->edp_dpcd);
4197
4198         /*
4199          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4200          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4201          */
4202         intel_psr_init_dpcd(intel_dp);
4203
4204         /* Read the eDP 1.4+ supported link rates. */
4205         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4206                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4207                 int i;
4208
4209                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4210                                 sink_rates, sizeof(sink_rates));
4211
4212                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4213                         int val = le16_to_cpu(sink_rates[i]);
4214
4215                         if (val == 0)
4216                                 break;
4217
4218                         /* Value read multiplied by 200kHz gives the per-lane
4219                          * link rate in kHz. The source rates are, however,
4220                          * stored in terms of LS_Clk kHz. The full conversion
4221                          * back to symbols is
4222                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4223                          */
4224                         intel_dp->sink_rates[i] = (val * 200) / 10;
4225                 }
4226                 intel_dp->num_sink_rates = i;
4227         }
4228
4229         /*
4230          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4231          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4232          */
4233         if (intel_dp->num_sink_rates)
4234                 intel_dp->use_rate_select = true;
4235         else
4236                 intel_dp_set_sink_rates(intel_dp);
4237
4238         intel_dp_set_common_rates(intel_dp);
4239
4240         /* Read the eDP DSC DPCD registers */
4241         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4242                 intel_dp_get_dsc_sink_cap(intel_dp);
4243
4244         return true;
4245 }
4246
4247
4248 static bool
4249 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4250 {
4251         if (!intel_dp_read_dpcd(intel_dp))
4252                 return false;
4253
4254         /*
4255          * Don't clobber cached eDP rates. Also skip re-reading
4256          * the OUI/ID since we know it won't change.
4257          */
4258         if (!intel_dp_is_edp(intel_dp)) {
4259                 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4260                                  drm_dp_is_branch(intel_dp->dpcd));
4261
4262                 intel_dp_set_sink_rates(intel_dp);
4263                 intel_dp_set_common_rates(intel_dp);
4264         }
4265
4266         /*
4267          * Some eDP panels do not set a valid value for sink count, that is why
4268          * it don't care about read it here and in intel_edp_init_dpcd().
4269          */
4270         if (!intel_dp_is_edp(intel_dp) &&
4271             !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
4272                 u8 count;
4273                 ssize_t r;
4274
4275                 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4276                 if (r < 1)
4277                         return false;
4278
4279                 /*
4280                  * Sink count can change between short pulse hpd hence
4281                  * a member variable in intel_dp will track any changes
4282                  * between short pulse interrupts.
4283                  */
4284                 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4285
4286                 /*
4287                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4288                  * a dongle is present but no display. Unless we require to know
4289                  * if a dongle is present or not, we don't need to update
4290                  * downstream port information. So, an early return here saves
4291                  * time from performing other operations which are not required.
4292                  */
4293                 if (!intel_dp->sink_count)
4294                         return false;
4295         }
4296
4297         if (!drm_dp_is_branch(intel_dp->dpcd))
4298                 return true; /* native DP sink */
4299
4300         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4301                 return true; /* no per-port downstream info */
4302
4303         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4304                              intel_dp->downstream_ports,
4305                              DP_MAX_DOWNSTREAM_PORTS) < 0)
4306                 return false; /* downstream port status fetch failed */
4307
4308         return true;
4309 }
4310
4311 static bool
4312 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4313 {
4314         u8 mstm_cap;
4315
4316         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4317                 return false;
4318
4319         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4320                 return false;
4321
4322         return mstm_cap & DP_MST_CAP;
4323 }
4324
4325 static bool
4326 intel_dp_can_mst(struct intel_dp *intel_dp)
4327 {
4328         return i915_modparams.enable_dp_mst &&
4329                 intel_dp->can_mst &&
4330                 intel_dp_sink_can_mst(intel_dp);
4331 }
4332
4333 static void
4334 intel_dp_configure_mst(struct intel_dp *intel_dp)
4335 {
4336         struct intel_encoder *encoder =
4337                 &dp_to_dig_port(intel_dp)->base;
4338         bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4339
4340         DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support? port: %s, sink: %s, modparam: %s\n",
4341                       encoder->base.base.id, encoder->base.name,
4342                       yesno(intel_dp->can_mst), yesno(sink_can_mst),
4343                       yesno(i915_modparams.enable_dp_mst));
4344
4345         if (!intel_dp->can_mst)
4346                 return;
4347
4348         intel_dp->is_mst = sink_can_mst &&
4349                 i915_modparams.enable_dp_mst;
4350
4351         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4352                                         intel_dp->is_mst);
4353 }
4354
4355 static bool
4356 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4357 {
4358         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4359                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4360                 DP_DPRX_ESI_LEN;
4361 }
4362
4363 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4364                                 int mode_clock, int mode_hdisplay)
4365 {
4366         u16 bits_per_pixel, max_bpp_small_joiner_ram;
4367         int i;
4368
4369         /*
4370          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4371          * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4372          * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4373          * for MST -> TimeSlotsPerMTP has to be calculated
4374          */
4375         bits_per_pixel = (link_clock * lane_count * 8 *
4376                           DP_DSC_FEC_OVERHEAD_FACTOR) /
4377                 mode_clock;
4378
4379         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4380         max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4381                 mode_hdisplay;
4382
4383         /*
4384          * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4385          * check, output bpp from small joiner RAM check)
4386          */
4387         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4388
4389         /* Error out if the max bpp is less than smallest allowed valid bpp */
4390         if (bits_per_pixel < valid_dsc_bpp[0]) {
4391                 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4392                 return 0;
4393         }
4394
4395         /* Find the nearest match in the array of known BPPs from VESA */
4396         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4397                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4398                         break;
4399         }
4400         bits_per_pixel = valid_dsc_bpp[i];
4401
4402         /*
4403          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4404          * fractional part is 0
4405          */
4406         return bits_per_pixel << 4;
4407 }
4408
4409 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4410                                 int mode_clock,
4411                                 int mode_hdisplay)
4412 {
4413         u8 min_slice_count, i;
4414         int max_slice_width;
4415
4416         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4417                 min_slice_count = DIV_ROUND_UP(mode_clock,
4418                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
4419         else
4420                 min_slice_count = DIV_ROUND_UP(mode_clock,
4421                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
4422
4423         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4424         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4425                 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4426                               max_slice_width);
4427                 return 0;
4428         }
4429         /* Also take into account max slice width */
4430         min_slice_count = min_t(u8, min_slice_count,
4431                                 DIV_ROUND_UP(mode_hdisplay,
4432                                              max_slice_width));
4433
4434         /* Find the closest match to the valid slice count values */
4435         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4436                 if (valid_dsc_slicecount[i] >
4437                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4438                                                     false))
4439                         break;
4440                 if (min_slice_count  <= valid_dsc_slicecount[i])
4441                         return valid_dsc_slicecount[i];
4442         }
4443
4444         DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4445         return 0;
4446 }
4447
4448 static void
4449 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4450                                const struct intel_crtc_state *crtc_state)
4451 {
4452         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4453         struct dp_sdp vsc_sdp = {};
4454
4455         /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4456         vsc_sdp.sdp_header.HB0 = 0;
4457         vsc_sdp.sdp_header.HB1 = 0x7;
4458
4459         /*
4460          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4461          * Colorimetry Format indication.
4462          */
4463         vsc_sdp.sdp_header.HB2 = 0x5;
4464
4465         /*
4466          * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4467          * Colorimetry Format indication (HB2 = 05h).
4468          */
4469         vsc_sdp.sdp_header.HB3 = 0x13;
4470
4471         /*
4472          * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4473          * DB16[3:0] DP 1.4a spec, Table 2-120
4474          */
4475         vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4476         /* RGB->YCBCR color conversion uses the BT.709 color space. */
4477         vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4478
4479         /*
4480          * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4481          * the following Component Bit Depth values are defined:
4482          * 001b = 8bpc.
4483          * 010b = 10bpc.
4484          * 011b = 12bpc.
4485          * 100b = 16bpc.
4486          */
4487         switch (crtc_state->pipe_bpp) {
4488         case 24: /* 8bpc */
4489                 vsc_sdp.db[17] = 0x1;
4490                 break;
4491         case 30: /* 10bpc */
4492                 vsc_sdp.db[17] = 0x2;
4493                 break;
4494         case 36: /* 12bpc */
4495                 vsc_sdp.db[17] = 0x3;
4496                 break;
4497         case 48: /* 16bpc */
4498                 vsc_sdp.db[17] = 0x4;
4499                 break;
4500         default:
4501                 MISSING_CASE(crtc_state->pipe_bpp);
4502                 break;
4503         }
4504
4505         /*
4506          * Dynamic Range (Bit 7)
4507          * 0 = VESA range, 1 = CTA range.
4508          * all YCbCr are always limited range
4509          */
4510         vsc_sdp.db[17] |= 0x80;
4511
4512         /*
4513          * Content Type (Bits 2:0)
4514          * 000b = Not defined.
4515          * 001b = Graphics.
4516          * 010b = Photo.
4517          * 011b = Video.
4518          * 100b = Game
4519          * All other values are RESERVED.
4520          * Note: See CTA-861-G for the definition and expected
4521          * processing by a stream sink for the above contect types.
4522          */
4523         vsc_sdp.db[18] = 0;
4524
4525         intel_dig_port->write_infoframe(&intel_dig_port->base,
4526                         crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4527 }
4528
4529 void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4530                                const struct intel_crtc_state *crtc_state)
4531 {
4532         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4533                 return;
4534
4535         intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4536 }
4537
4538 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4539 {
4540         int status = 0;
4541         int test_link_rate;
4542         u8 test_lane_count, test_link_bw;
4543         /* (DP CTS 1.2)
4544          * 4.3.1.11
4545          */
4546         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4547         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4548                                    &test_lane_count);
4549
4550         if (status <= 0) {
4551                 DRM_DEBUG_KMS("Lane count read failed\n");
4552                 return DP_TEST_NAK;
4553         }
4554         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4555
4556         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4557                                    &test_link_bw);
4558         if (status <= 0) {
4559                 DRM_DEBUG_KMS("Link Rate read failed\n");
4560                 return DP_TEST_NAK;
4561         }
4562         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4563
4564         /* Validate the requested link rate and lane count */
4565         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4566                                         test_lane_count))
4567                 return DP_TEST_NAK;
4568
4569         intel_dp->compliance.test_lane_count = test_lane_count;
4570         intel_dp->compliance.test_link_rate = test_link_rate;
4571
4572         return DP_TEST_ACK;
4573 }
4574
4575 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4576 {
4577         u8 test_pattern;
4578         u8 test_misc;
4579         __be16 h_width, v_height;
4580         int status = 0;
4581
4582         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4583         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4584                                    &test_pattern);
4585         if (status <= 0) {
4586                 DRM_DEBUG_KMS("Test pattern read failed\n");
4587                 return DP_TEST_NAK;
4588         }
4589         if (test_pattern != DP_COLOR_RAMP)
4590                 return DP_TEST_NAK;
4591
4592         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4593                                   &h_width, 2);
4594         if (status <= 0) {
4595                 DRM_DEBUG_KMS("H Width read failed\n");
4596                 return DP_TEST_NAK;
4597         }
4598
4599         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4600                                   &v_height, 2);
4601         if (status <= 0) {
4602                 DRM_DEBUG_KMS("V Height read failed\n");
4603                 return DP_TEST_NAK;
4604         }
4605
4606         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4607                                    &test_misc);
4608         if (status <= 0) {
4609                 DRM_DEBUG_KMS("TEST MISC read failed\n");
4610                 return DP_TEST_NAK;
4611         }
4612         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4613                 return DP_TEST_NAK;
4614         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4615                 return DP_TEST_NAK;
4616         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4617         case DP_TEST_BIT_DEPTH_6:
4618                 intel_dp->compliance.test_data.bpc = 6;
4619                 break;
4620         case DP_TEST_BIT_DEPTH_8:
4621                 intel_dp->compliance.test_data.bpc = 8;
4622                 break;
4623         default:
4624                 return DP_TEST_NAK;
4625         }
4626
4627         intel_dp->compliance.test_data.video_pattern = test_pattern;
4628         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4629         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4630         /* Set test active flag here so userspace doesn't interrupt things */
4631         intel_dp->compliance.test_active = 1;
4632
4633         return DP_TEST_ACK;
4634 }
4635
4636 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4637 {
4638         u8 test_result = DP_TEST_ACK;
4639         struct intel_connector *intel_connector = intel_dp->attached_connector;
4640         struct drm_connector *connector = &intel_connector->base;
4641
4642         if (intel_connector->detect_edid == NULL ||
4643             connector->edid_corrupt ||
4644             intel_dp->aux.i2c_defer_count > 6) {
4645                 /* Check EDID read for NACKs, DEFERs and corruption
4646                  * (DP CTS 1.2 Core r1.1)
4647                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4648                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4649                  *    4.2.2.6 : EDID corruption detected
4650                  * Use failsafe mode for all cases
4651                  */
4652                 if (intel_dp->aux.i2c_nack_count > 0 ||
4653                         intel_dp->aux.i2c_defer_count > 0)
4654                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4655                                       intel_dp->aux.i2c_nack_count,
4656                                       intel_dp->aux.i2c_defer_count);
4657                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4658         } else {
4659                 struct edid *block = intel_connector->detect_edid;
4660
4661                 /* We have to write the checksum
4662                  * of the last block read
4663                  */
4664                 block += intel_connector->detect_edid->extensions;
4665
4666                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4667                                        block->checksum) <= 0)
4668                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4669
4670                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4671                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4672         }
4673
4674         /* Set test active flag here so userspace doesn't interrupt things */
4675         intel_dp->compliance.test_active = 1;
4676
4677         return test_result;
4678 }
4679
4680 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4681 {
4682         u8 test_result = DP_TEST_NAK;
4683         return test_result;
4684 }
4685
4686 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4687 {
4688         u8 response = DP_TEST_NAK;
4689         u8 request = 0;
4690         int status;
4691
4692         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4693         if (status <= 0) {
4694                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4695                 goto update_status;
4696         }
4697
4698         switch (request) {
4699         case DP_TEST_LINK_TRAINING:
4700                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4701                 response = intel_dp_autotest_link_training(intel_dp);
4702                 break;
4703         case DP_TEST_LINK_VIDEO_PATTERN:
4704                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4705                 response = intel_dp_autotest_video_pattern(intel_dp);
4706                 break;
4707         case DP_TEST_LINK_EDID_READ:
4708                 DRM_DEBUG_KMS("EDID test requested\n");
4709                 response = intel_dp_autotest_edid(intel_dp);
4710                 break;
4711         case DP_TEST_LINK_PHY_TEST_PATTERN:
4712                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4713                 response = intel_dp_autotest_phy_pattern(intel_dp);
4714                 break;
4715         default:
4716                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4717                 break;
4718         }
4719
4720         if (response & DP_TEST_ACK)
4721                 intel_dp->compliance.test_type = request;
4722
4723 update_status:
4724         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4725         if (status <= 0)
4726                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4727 }
4728
4729 static int
4730 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4731 {
4732         bool bret;
4733
4734         if (intel_dp->is_mst) {
4735                 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4736                 int ret = 0;
4737                 int retry;
4738                 bool handled;
4739
4740                 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4741                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4742 go_again:
4743                 if (bret == true) {
4744
4745                         /* check link status - esi[10] = 0x200c */
4746                         if (intel_dp->active_mst_links > 0 &&
4747                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4748                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4749                                 intel_dp_start_link_train(intel_dp);
4750                                 intel_dp_stop_link_train(intel_dp);
4751                         }
4752
4753                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4754                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4755
4756                         if (handled) {
4757                                 for (retry = 0; retry < 3; retry++) {
4758                                         int wret;
4759                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4760                                                                  DP_SINK_COUNT_ESI+1,
4761                                                                  &esi[1], 3);
4762                                         if (wret == 3) {
4763                                                 break;
4764                                         }
4765                                 }
4766
4767                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4768                                 if (bret == true) {
4769                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4770                                         goto go_again;
4771                                 }
4772                         } else
4773                                 ret = 0;
4774
4775                         return ret;
4776                 } else {
4777                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4778                         intel_dp->is_mst = false;
4779                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4780                                                         intel_dp->is_mst);
4781                 }
4782         }
4783         return -EINVAL;
4784 }
4785
4786 static bool
4787 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4788 {
4789         u8 link_status[DP_LINK_STATUS_SIZE];
4790
4791         if (!intel_dp->link_trained)
4792                 return false;
4793
4794         /*
4795          * While PSR source HW is enabled, it will control main-link sending
4796          * frames, enabling and disabling it so trying to do a retrain will fail
4797          * as the link would or not be on or it could mix training patterns
4798          * and frame data at the same time causing retrain to fail.
4799          * Also when exiting PSR, HW will retrain the link anyways fixing
4800          * any link status error.
4801          */
4802         if (intel_psr_enabled(intel_dp))
4803                 return false;
4804
4805         if (!intel_dp_get_link_status(intel_dp, link_status))
4806                 return false;
4807
4808         /*
4809          * Validate the cached values of intel_dp->link_rate and
4810          * intel_dp->lane_count before attempting to retrain.
4811          */
4812         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4813                                         intel_dp->lane_count))
4814                 return false;
4815
4816         /* Retrain if Channel EQ or CR not ok */
4817         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4818 }
4819
4820 int intel_dp_retrain_link(struct intel_encoder *encoder,
4821                           struct drm_modeset_acquire_ctx *ctx)
4822 {
4823         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4824         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4825         struct intel_connector *connector = intel_dp->attached_connector;
4826         struct drm_connector_state *conn_state;
4827         struct intel_crtc_state *crtc_state;
4828         struct intel_crtc *crtc;
4829         int ret;
4830
4831         /* FIXME handle the MST connectors as well */
4832
4833         if (!connector || connector->base.status != connector_status_connected)
4834                 return 0;
4835
4836         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4837                                ctx);
4838         if (ret)
4839                 return ret;
4840
4841         conn_state = connector->base.state;
4842
4843         crtc = to_intel_crtc(conn_state->crtc);
4844         if (!crtc)
4845                 return 0;
4846
4847         ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4848         if (ret)
4849                 return ret;
4850
4851         crtc_state = to_intel_crtc_state(crtc->base.state);
4852
4853         WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4854
4855         if (!crtc_state->base.active)
4856                 return 0;
4857
4858         if (conn_state->commit &&
4859             !try_wait_for_completion(&conn_state->commit->hw_done))
4860                 return 0;
4861
4862         if (!intel_dp_needs_link_retrain(intel_dp))
4863                 return 0;
4864
4865         /* Suppress underruns caused by re-training */
4866         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4867         if (crtc_state->has_pch_encoder)
4868                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4869                                                       intel_crtc_pch_transcoder(crtc), false);
4870
4871         intel_dp_start_link_train(intel_dp);
4872         intel_dp_stop_link_train(intel_dp);
4873
4874         /* Keep underrun reporting disabled until things are stable */
4875         intel_wait_for_vblank(dev_priv, crtc->pipe);
4876
4877         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4878         if (crtc_state->has_pch_encoder)
4879                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4880                                                       intel_crtc_pch_transcoder(crtc), true);
4881
4882         return 0;
4883 }
4884
4885 /*
4886  * If display is now connected check links status,
4887  * there has been known issues of link loss triggering
4888  * long pulse.
4889  *
4890  * Some sinks (eg. ASUS PB287Q) seem to perform some
4891  * weird HPD ping pong during modesets. So we can apparently
4892  * end up with HPD going low during a modeset, and then
4893  * going back up soon after. And once that happens we must
4894  * retrain the link to get a picture. That's in case no
4895  * userspace component reacted to intermittent HPD dip.
4896  */
4897 static enum intel_hotplug_state
4898 intel_dp_hotplug(struct intel_encoder *encoder,
4899                  struct intel_connector *connector,
4900                  bool irq_received)
4901 {
4902         struct drm_modeset_acquire_ctx ctx;
4903         enum intel_hotplug_state state;
4904         int ret;
4905
4906         state = intel_encoder_hotplug(encoder, connector, irq_received);
4907
4908         drm_modeset_acquire_init(&ctx, 0);
4909
4910         for (;;) {
4911                 ret = intel_dp_retrain_link(encoder, &ctx);
4912
4913                 if (ret == -EDEADLK) {
4914                         drm_modeset_backoff(&ctx);
4915                         continue;
4916                 }
4917
4918                 break;
4919         }
4920
4921         drm_modeset_drop_locks(&ctx);
4922         drm_modeset_acquire_fini(&ctx);
4923         WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4924
4925         /*
4926          * Keeping it consistent with intel_ddi_hotplug() and
4927          * intel_hdmi_hotplug().
4928          */
4929         if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
4930                 state = INTEL_HOTPLUG_RETRY;
4931
4932         return state;
4933 }
4934
4935 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4936 {
4937         u8 val;
4938
4939         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4940                 return;
4941
4942         if (drm_dp_dpcd_readb(&intel_dp->aux,
4943                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4944                 return;
4945
4946         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4947
4948         if (val & DP_AUTOMATED_TEST_REQUEST)
4949                 intel_dp_handle_test_request(intel_dp);
4950
4951         if (val & DP_CP_IRQ)
4952                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4953
4954         if (val & DP_SINK_SPECIFIC_IRQ)
4955                 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4956 }
4957
4958 /*
4959  * According to DP spec
4960  * 5.1.2:
4961  *  1. Read DPCD
4962  *  2. Configure link according to Receiver Capabilities
4963  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4964  *  4. Check link status on receipt of hot-plug interrupt
4965  *
4966  * intel_dp_short_pulse -  handles short pulse interrupts
4967  * when full detection is not required.
4968  * Returns %true if short pulse is handled and full detection
4969  * is NOT required and %false otherwise.
4970  */
4971 static bool
4972 intel_dp_short_pulse(struct intel_dp *intel_dp)
4973 {
4974         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4975         u8 old_sink_count = intel_dp->sink_count;
4976         bool ret;
4977
4978         /*
4979          * Clearing compliance test variables to allow capturing
4980          * of values for next automated test request.
4981          */
4982         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4983
4984         /*
4985          * Now read the DPCD to see if it's actually running
4986          * If the current value of sink count doesn't match with
4987          * the value that was stored earlier or dpcd read failed
4988          * we need to do full detection
4989          */
4990         ret = intel_dp_get_dpcd(intel_dp);
4991
4992         if ((old_sink_count != intel_dp->sink_count) || !ret) {
4993                 /* No need to proceed if we are going to do full detect */
4994                 return false;
4995         }
4996
4997         intel_dp_check_service_irq(intel_dp);
4998
4999         /* Handle CEC interrupts, if any */
5000         drm_dp_cec_irq(&intel_dp->aux);
5001
5002         /* defer to the hotplug work for link retraining if needed */
5003         if (intel_dp_needs_link_retrain(intel_dp))
5004                 return false;
5005
5006         intel_psr_short_pulse(intel_dp);
5007
5008         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
5009                 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
5010                 /* Send a Hotplug Uevent to userspace to start modeset */
5011                 drm_kms_helper_hotplug_event(&dev_priv->drm);
5012         }
5013
5014         return true;
5015 }
5016
5017 /* XXX this is probably wrong for multiple downstream ports */
5018 static enum drm_connector_status
5019 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5020 {
5021         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5022         u8 *dpcd = intel_dp->dpcd;
5023         u8 type;
5024
5025         if (WARN_ON(intel_dp_is_edp(intel_dp)))
5026                 return connector_status_connected;
5027
5028         if (lspcon->active)
5029                 lspcon_resume(lspcon);
5030
5031         if (!intel_dp_get_dpcd(intel_dp))
5032                 return connector_status_disconnected;
5033
5034         /* if there's no downstream port, we're done */
5035         if (!drm_dp_is_branch(dpcd))
5036                 return connector_status_connected;
5037
5038         /* If we're HPD-aware, SINK_COUNT changes dynamically */
5039         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5040             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5041
5042                 return intel_dp->sink_count ?
5043                 connector_status_connected : connector_status_disconnected;
5044         }
5045
5046         if (intel_dp_can_mst(intel_dp))
5047                 return connector_status_connected;
5048
5049         /* If no HPD, poke DDC gently */
5050         if (drm_probe_ddc(&intel_dp->aux.ddc))
5051                 return connector_status_connected;
5052
5053         /* Well we tried, say unknown for unreliable port types */
5054         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5055                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5056                 if (type == DP_DS_PORT_TYPE_VGA ||
5057                     type == DP_DS_PORT_TYPE_NON_EDID)
5058                         return connector_status_unknown;
5059         } else {
5060                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5061                         DP_DWN_STRM_PORT_TYPE_MASK;
5062                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5063                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
5064                         return connector_status_unknown;
5065         }
5066
5067         /* Anything else is out of spec, warn and ignore */
5068         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5069         return connector_status_disconnected;
5070 }
5071
5072 static enum drm_connector_status
5073 edp_detect(struct intel_dp *intel_dp)
5074 {
5075         return connector_status_connected;
5076 }
5077
5078 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5079 {
5080         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5081         u32 bit;
5082
5083         switch (encoder->hpd_pin) {
5084         case HPD_PORT_B:
5085                 bit = SDE_PORTB_HOTPLUG;
5086                 break;
5087         case HPD_PORT_C:
5088                 bit = SDE_PORTC_HOTPLUG;
5089                 break;
5090         case HPD_PORT_D:
5091                 bit = SDE_PORTD_HOTPLUG;
5092                 break;
5093         default:
5094                 MISSING_CASE(encoder->hpd_pin);
5095                 return false;
5096         }
5097
5098         return I915_READ(SDEISR) & bit;
5099 }
5100
5101 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5102 {
5103         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5104         u32 bit;
5105
5106         switch (encoder->hpd_pin) {
5107         case HPD_PORT_B:
5108                 bit = SDE_PORTB_HOTPLUG_CPT;
5109                 break;
5110         case HPD_PORT_C:
5111                 bit = SDE_PORTC_HOTPLUG_CPT;
5112                 break;
5113         case HPD_PORT_D:
5114                 bit = SDE_PORTD_HOTPLUG_CPT;
5115                 break;
5116         default:
5117                 MISSING_CASE(encoder->hpd_pin);
5118                 return false;
5119         }
5120
5121         return I915_READ(SDEISR) & bit;
5122 }
5123
5124 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5125 {
5126         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5127         u32 bit;
5128
5129         switch (encoder->hpd_pin) {
5130         case HPD_PORT_A:
5131                 bit = SDE_PORTA_HOTPLUG_SPT;
5132                 break;
5133         case HPD_PORT_E:
5134                 bit = SDE_PORTE_HOTPLUG_SPT;
5135                 break;
5136         default:
5137                 return cpt_digital_port_connected(encoder);
5138         }
5139
5140         return I915_READ(SDEISR) & bit;
5141 }
5142
5143 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5144 {
5145         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5146         u32 bit;
5147
5148         switch (encoder->hpd_pin) {
5149         case HPD_PORT_B:
5150                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5151                 break;
5152         case HPD_PORT_C:
5153                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5154                 break;
5155         case HPD_PORT_D:
5156                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5157                 break;
5158         default:
5159                 MISSING_CASE(encoder->hpd_pin);
5160                 return false;
5161         }
5162
5163         return I915_READ(PORT_HOTPLUG_STAT) & bit;
5164 }
5165
5166 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5167 {
5168         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5169         u32 bit;
5170
5171         switch (encoder->hpd_pin) {
5172         case HPD_PORT_B:
5173                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5174                 break;
5175         case HPD_PORT_C:
5176                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5177                 break;
5178         case HPD_PORT_D:
5179                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5180                 break;
5181         default:
5182                 MISSING_CASE(encoder->hpd_pin);
5183                 return false;
5184         }
5185
5186         return I915_READ(PORT_HOTPLUG_STAT) & bit;
5187 }
5188
5189 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5190 {
5191         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5192
5193         if (encoder->hpd_pin == HPD_PORT_A)
5194                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5195         else
5196                 return ibx_digital_port_connected(encoder);
5197 }
5198
5199 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5200 {
5201         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5202
5203         if (encoder->hpd_pin == HPD_PORT_A)
5204                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5205         else
5206                 return cpt_digital_port_connected(encoder);
5207 }
5208
5209 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5210 {
5211         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5212
5213         if (encoder->hpd_pin == HPD_PORT_A)
5214                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5215         else
5216                 return cpt_digital_port_connected(encoder);
5217 }
5218
5219 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5220 {
5221         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5222
5223         if (encoder->hpd_pin == HPD_PORT_A)
5224                 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5225         else
5226                 return cpt_digital_port_connected(encoder);
5227 }
5228
5229 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5230 {
5231         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5232         u32 bit;
5233
5234         switch (encoder->hpd_pin) {
5235         case HPD_PORT_A:
5236                 bit = BXT_DE_PORT_HP_DDIA;
5237                 break;
5238         case HPD_PORT_B:
5239                 bit = BXT_DE_PORT_HP_DDIB;
5240                 break;
5241         case HPD_PORT_C:
5242                 bit = BXT_DE_PORT_HP_DDIC;
5243                 break;
5244         default:
5245                 MISSING_CASE(encoder->hpd_pin);
5246                 return false;
5247         }
5248
5249         return I915_READ(GEN8_DE_PORT_ISR) & bit;
5250 }
5251
5252 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5253                                      struct intel_digital_port *intel_dig_port)
5254 {
5255         enum port port = intel_dig_port->base.port;
5256
5257         return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5258 }
5259
5260 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5261 {
5262         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5263         struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5264         enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
5265
5266         if (intel_phy_is_combo(dev_priv, phy))
5267                 return icl_combo_port_connected(dev_priv, dig_port);
5268         else if (intel_phy_is_tc(dev_priv, phy))
5269                 return intel_tc_port_connected(dig_port);
5270         else
5271                 MISSING_CASE(encoder->hpd_pin);
5272
5273         return false;
5274 }
5275
5276 /*
5277  * intel_digital_port_connected - is the specified port connected?
5278  * @encoder: intel_encoder
5279  *
5280  * In cases where there's a connector physically connected but it can't be used
5281  * by our hardware we also return false, since the rest of the driver should
5282  * pretty much treat the port as disconnected. This is relevant for type-C
5283  * (starting on ICL) where there's ownership involved.
5284  *
5285  * Return %true if port is connected, %false otherwise.
5286  */
5287 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5288 {
5289         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5290
5291         if (HAS_GMCH(dev_priv)) {
5292                 if (IS_GM45(dev_priv))
5293                         return gm45_digital_port_connected(encoder);
5294                 else
5295                         return g4x_digital_port_connected(encoder);
5296         }
5297
5298         if (INTEL_GEN(dev_priv) >= 11)
5299                 return icl_digital_port_connected(encoder);
5300         else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5301                 return spt_digital_port_connected(encoder);
5302         else if (IS_GEN9_LP(dev_priv))
5303                 return bxt_digital_port_connected(encoder);
5304         else if (IS_GEN(dev_priv, 8))
5305                 return bdw_digital_port_connected(encoder);
5306         else if (IS_GEN(dev_priv, 7))
5307                 return ivb_digital_port_connected(encoder);
5308         else if (IS_GEN(dev_priv, 6))
5309                 return snb_digital_port_connected(encoder);
5310         else if (IS_GEN(dev_priv, 5))
5311                 return ilk_digital_port_connected(encoder);
5312
5313         MISSING_CASE(INTEL_GEN(dev_priv));
5314         return false;
5315 }
5316
5317 bool intel_digital_port_connected(struct intel_encoder *encoder)
5318 {
5319         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5320         bool is_connected = false;
5321         intel_wakeref_t wakeref;
5322
5323         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5324                 is_connected = __intel_digital_port_connected(encoder);
5325
5326         return is_connected;
5327 }
5328
5329 static struct edid *
5330 intel_dp_get_edid(struct intel_dp *intel_dp)
5331 {
5332         struct intel_connector *intel_connector = intel_dp->attached_connector;
5333
5334         /* use cached edid if we have one */
5335         if (intel_connector->edid) {
5336                 /* invalid edid */
5337                 if (IS_ERR(intel_connector->edid))
5338                         return NULL;
5339
5340                 return drm_edid_duplicate(intel_connector->edid);
5341         } else
5342                 return drm_get_edid(&intel_connector->base,
5343                                     &intel_dp->aux.ddc);
5344 }
5345
5346 static void
5347 intel_dp_set_edid(struct intel_dp *intel_dp)
5348 {
5349         struct intel_connector *intel_connector = intel_dp->attached_connector;
5350         struct edid *edid;
5351
5352         intel_dp_unset_edid(intel_dp);
5353         edid = intel_dp_get_edid(intel_dp);
5354         intel_connector->detect_edid = edid;
5355
5356         intel_dp->has_audio = drm_detect_monitor_audio(edid);
5357         drm_dp_cec_set_edid(&intel_dp->aux, edid);
5358 }
5359
5360 static void
5361 intel_dp_unset_edid(struct intel_dp *intel_dp)
5362 {
5363         struct intel_connector *intel_connector = intel_dp->attached_connector;
5364
5365         drm_dp_cec_unset_edid(&intel_dp->aux);
5366         kfree(intel_connector->detect_edid);
5367         intel_connector->detect_edid = NULL;
5368
5369         intel_dp->has_audio = false;
5370 }
5371
5372 static int
5373 intel_dp_detect(struct drm_connector *connector,
5374                 struct drm_modeset_acquire_ctx *ctx,
5375                 bool force)
5376 {
5377         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5378         struct intel_dp *intel_dp = intel_attached_dp(connector);
5379         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5380         struct intel_encoder *encoder = &dig_port->base;
5381         enum drm_connector_status status;
5382
5383         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5384                       connector->base.id, connector->name);
5385         WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5386
5387         /* Can't disconnect eDP */
5388         if (intel_dp_is_edp(intel_dp))
5389                 status = edp_detect(intel_dp);
5390         else if (intel_digital_port_connected(encoder))
5391                 status = intel_dp_detect_dpcd(intel_dp);
5392         else
5393                 status = connector_status_disconnected;
5394
5395         if (status == connector_status_disconnected) {
5396                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5397                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5398
5399                 if (intel_dp->is_mst) {
5400                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5401                                       intel_dp->is_mst,
5402                                       intel_dp->mst_mgr.mst_state);
5403                         intel_dp->is_mst = false;
5404                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5405                                                         intel_dp->is_mst);
5406                 }
5407
5408                 goto out;
5409         }
5410
5411         if (intel_dp->reset_link_params) {
5412                 /* Initial max link lane count */
5413                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5414
5415                 /* Initial max link rate */
5416                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5417
5418                 intel_dp->reset_link_params = false;
5419         }
5420
5421         intel_dp_print_rates(intel_dp);
5422
5423         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5424         if (INTEL_GEN(dev_priv) >= 11)
5425                 intel_dp_get_dsc_sink_cap(intel_dp);
5426
5427         intel_dp_configure_mst(intel_dp);
5428
5429         if (intel_dp->is_mst) {
5430                 /*
5431                  * If we are in MST mode then this connector
5432                  * won't appear connected or have anything
5433                  * with EDID on it
5434                  */
5435                 status = connector_status_disconnected;
5436                 goto out;
5437         }
5438
5439         /*
5440          * Some external monitors do not signal loss of link synchronization
5441          * with an IRQ_HPD, so force a link status check.
5442          */
5443         if (!intel_dp_is_edp(intel_dp)) {
5444                 int ret;
5445
5446                 ret = intel_dp_retrain_link(encoder, ctx);
5447                 if (ret)
5448                         return ret;
5449         }
5450
5451         /*
5452          * Clearing NACK and defer counts to get their exact values
5453          * while reading EDID which are required by Compliance tests
5454          * 4.2.2.4 and 4.2.2.5
5455          */
5456         intel_dp->aux.i2c_nack_count = 0;
5457         intel_dp->aux.i2c_defer_count = 0;
5458
5459         intel_dp_set_edid(intel_dp);
5460         if (intel_dp_is_edp(intel_dp) ||
5461             to_intel_connector(connector)->detect_edid)
5462                 status = connector_status_connected;
5463
5464         intel_dp_check_service_irq(intel_dp);
5465
5466 out:
5467         if (status != connector_status_connected && !intel_dp->is_mst)
5468                 intel_dp_unset_edid(intel_dp);
5469
5470         return status;
5471 }
5472
5473 static void
5474 intel_dp_force(struct drm_connector *connector)
5475 {
5476         struct intel_dp *intel_dp = intel_attached_dp(connector);
5477         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5478         struct intel_encoder *intel_encoder = &dig_port->base;
5479         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5480         enum intel_display_power_domain aux_domain =
5481                 intel_aux_power_domain(dig_port);
5482         intel_wakeref_t wakeref;
5483
5484         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5485                       connector->base.id, connector->name);
5486         intel_dp_unset_edid(intel_dp);
5487
5488         if (connector->status != connector_status_connected)
5489                 return;
5490
5491         wakeref = intel_display_power_get(dev_priv, aux_domain);
5492
5493         intel_dp_set_edid(intel_dp);
5494
5495         intel_display_power_put(dev_priv, aux_domain, wakeref);
5496 }
5497
5498 static int intel_dp_get_modes(struct drm_connector *connector)
5499 {
5500         struct intel_connector *intel_connector = to_intel_connector(connector);
5501         struct edid *edid;
5502
5503         edid = intel_connector->detect_edid;
5504         if (edid) {
5505                 int ret = intel_connector_update_modes(connector, edid);
5506                 if (ret)
5507                         return ret;
5508         }
5509
5510         /* if eDP has no EDID, fall back to fixed mode */
5511         if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5512             intel_connector->panel.fixed_mode) {
5513                 struct drm_display_mode *mode;
5514
5515                 mode = drm_mode_duplicate(connector->dev,
5516                                           intel_connector->panel.fixed_mode);
5517                 if (mode) {
5518                         drm_mode_probed_add(connector, mode);
5519                         return 1;
5520                 }
5521         }
5522
5523         return 0;
5524 }
5525
5526 static int
5527 intel_dp_connector_register(struct drm_connector *connector)
5528 {
5529         struct intel_dp *intel_dp = intel_attached_dp(connector);
5530         struct drm_device *dev = connector->dev;
5531         int ret;
5532
5533         ret = intel_connector_register(connector);
5534         if (ret)
5535                 return ret;
5536
5537         i915_debugfs_connector_add(connector);
5538
5539         DRM_DEBUG_KMS("registering %s bus for %s\n",
5540                       intel_dp->aux.name, connector->kdev->kobj.name);
5541
5542         intel_dp->aux.dev = connector->kdev;
5543         ret = drm_dp_aux_register(&intel_dp->aux);
5544         if (!ret)
5545                 drm_dp_cec_register_connector(&intel_dp->aux,
5546                                               connector->name, dev->dev);
5547         return ret;
5548 }
5549
5550 static void
5551 intel_dp_connector_unregister(struct drm_connector *connector)
5552 {
5553         struct intel_dp *intel_dp = intel_attached_dp(connector);
5554
5555         drm_dp_cec_unregister_connector(&intel_dp->aux);
5556         drm_dp_aux_unregister(&intel_dp->aux);
5557         intel_connector_unregister(connector);
5558 }
5559
5560 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5561 {
5562         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5563         struct intel_dp *intel_dp = &intel_dig_port->dp;
5564
5565         intel_dp_mst_encoder_cleanup(intel_dig_port);
5566         if (intel_dp_is_edp(intel_dp)) {
5567                 intel_wakeref_t wakeref;
5568
5569                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5570                 /*
5571                  * vdd might still be enabled do to the delayed vdd off.
5572                  * Make sure vdd is actually turned off here.
5573                  */
5574                 with_pps_lock(intel_dp, wakeref)
5575                         edp_panel_vdd_off_sync(intel_dp);
5576
5577                 if (intel_dp->edp_notifier.notifier_call) {
5578                         unregister_reboot_notifier(&intel_dp->edp_notifier);
5579                         intel_dp->edp_notifier.notifier_call = NULL;
5580                 }
5581         }
5582
5583         intel_dp_aux_fini(intel_dp);
5584 }
5585
5586 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5587 {
5588         intel_dp_encoder_flush_work(encoder);
5589
5590         drm_encoder_cleanup(encoder);
5591         kfree(enc_to_dig_port(encoder));
5592 }
5593
5594 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5595 {
5596         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5597         intel_wakeref_t wakeref;
5598
5599         if (!intel_dp_is_edp(intel_dp))
5600                 return;
5601
5602         /*
5603          * vdd might still be enabled do to the delayed vdd off.
5604          * Make sure vdd is actually turned off here.
5605          */
5606         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5607         with_pps_lock(intel_dp, wakeref)
5608                 edp_panel_vdd_off_sync(intel_dp);
5609 }
5610
5611 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5612 {
5613         long ret;
5614
5615 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5616         ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5617                                                msecs_to_jiffies(timeout));
5618
5619         if (!ret)
5620                 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5621 }
5622
5623 static
5624 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5625                                 u8 *an)
5626 {
5627         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5628         static const struct drm_dp_aux_msg msg = {
5629                 .request = DP_AUX_NATIVE_WRITE,
5630                 .address = DP_AUX_HDCP_AKSV,
5631                 .size = DRM_HDCP_KSV_LEN,
5632         };
5633         u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5634         ssize_t dpcd_ret;
5635         int ret;
5636
5637         /* Output An first, that's easy */
5638         dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5639                                      an, DRM_HDCP_AN_LEN);
5640         if (dpcd_ret != DRM_HDCP_AN_LEN) {
5641                 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5642                               dpcd_ret);
5643                 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5644         }
5645
5646         /*
5647          * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5648          * order to get it on the wire, we need to create the AUX header as if
5649          * we were writing the data, and then tickle the hardware to output the
5650          * data once the header is sent out.
5651          */
5652         intel_dp_aux_header(txbuf, &msg);
5653
5654         ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5655                                 rxbuf, sizeof(rxbuf),
5656                                 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5657         if (ret < 0) {
5658                 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5659                 return ret;
5660         } else if (ret == 0) {
5661                 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5662                 return -EIO;
5663         }
5664
5665         reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5666         if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5667                 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5668                               reply);
5669                 return -EIO;
5670         }
5671         return 0;
5672 }
5673
5674 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5675                                    u8 *bksv)
5676 {
5677         ssize_t ret;
5678         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5679                                DRM_HDCP_KSV_LEN);
5680         if (ret != DRM_HDCP_KSV_LEN) {
5681                 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5682                 return ret >= 0 ? -EIO : ret;
5683         }
5684         return 0;
5685 }
5686
5687 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5688                                       u8 *bstatus)
5689 {
5690         ssize_t ret;
5691         /*
5692          * For some reason the HDMI and DP HDCP specs call this register
5693          * definition by different names. In the HDMI spec, it's called BSTATUS,
5694          * but in DP it's called BINFO.
5695          */
5696         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5697                                bstatus, DRM_HDCP_BSTATUS_LEN);
5698         if (ret != DRM_HDCP_BSTATUS_LEN) {
5699                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5700                 return ret >= 0 ? -EIO : ret;
5701         }
5702         return 0;
5703 }
5704
5705 static
5706 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5707                              u8 *bcaps)
5708 {
5709         ssize_t ret;
5710
5711         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5712                                bcaps, 1);
5713         if (ret != 1) {
5714                 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5715                 return ret >= 0 ? -EIO : ret;
5716         }
5717
5718         return 0;
5719 }
5720
5721 static
5722 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5723                                    bool *repeater_present)
5724 {
5725         ssize_t ret;
5726         u8 bcaps;
5727
5728         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5729         if (ret)
5730                 return ret;
5731
5732         *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5733         return 0;
5734 }
5735
5736 static
5737 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5738                                 u8 *ri_prime)
5739 {
5740         ssize_t ret;
5741         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5742                                ri_prime, DRM_HDCP_RI_LEN);
5743         if (ret != DRM_HDCP_RI_LEN) {
5744                 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5745                 return ret >= 0 ? -EIO : ret;
5746         }
5747         return 0;
5748 }
5749
5750 static
5751 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5752                                  bool *ksv_ready)
5753 {
5754         ssize_t ret;
5755         u8 bstatus;
5756         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5757                                &bstatus, 1);
5758         if (ret != 1) {
5759                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5760                 return ret >= 0 ? -EIO : ret;
5761         }
5762         *ksv_ready = bstatus & DP_BSTATUS_READY;
5763         return 0;
5764 }
5765
5766 static
5767 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5768                                 int num_downstream, u8 *ksv_fifo)
5769 {
5770         ssize_t ret;
5771         int i;
5772
5773         /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5774         for (i = 0; i < num_downstream; i += 3) {
5775                 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5776                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5777                                        DP_AUX_HDCP_KSV_FIFO,
5778                                        ksv_fifo + i * DRM_HDCP_KSV_LEN,
5779                                        len);
5780                 if (ret != len) {
5781                         DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5782                                       i, ret);
5783                         return ret >= 0 ? -EIO : ret;
5784                 }
5785         }
5786         return 0;
5787 }
5788
5789 static
5790 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5791                                     int i, u32 *part)
5792 {
5793         ssize_t ret;
5794
5795         if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5796                 return -EINVAL;
5797
5798         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5799                                DP_AUX_HDCP_V_PRIME(i), part,
5800                                DRM_HDCP_V_PRIME_PART_LEN);
5801         if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5802                 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5803                 return ret >= 0 ? -EIO : ret;
5804         }
5805         return 0;
5806 }
5807
5808 static
5809 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5810                                     bool enable)
5811 {
5812         /* Not used for single stream DisplayPort setups */
5813         return 0;
5814 }
5815
5816 static
5817 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5818 {
5819         ssize_t ret;
5820         u8 bstatus;
5821
5822         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5823                                &bstatus, 1);
5824         if (ret != 1) {
5825                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5826                 return false;
5827         }
5828
5829         return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5830 }
5831
5832 static
5833 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5834                           bool *hdcp_capable)
5835 {
5836         ssize_t ret;
5837         u8 bcaps;
5838
5839         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5840         if (ret)
5841                 return ret;
5842
5843         *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5844         return 0;
5845 }
5846
5847 struct hdcp2_dp_errata_stream_type {
5848         u8      msg_id;
5849         u8      stream_type;
5850 } __packed;
5851
5852 struct hdcp2_dp_msg_data {
5853         u8 msg_id;
5854         u32 offset;
5855         bool msg_detectable;
5856         u32 timeout;
5857         u32 timeout2; /* Added for non_paired situation */
5858 };
5859
5860 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
5861         { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
5862         { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5863           false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
5864         { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5865           false, 0, 0 },
5866         { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5867           false, 0, 0 },
5868         { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5869           true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5870           HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
5871         { HDCP_2_2_AKE_SEND_PAIRING_INFO,
5872           DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5873           HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
5874         { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
5875         { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5876           false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
5877         { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5878           0, 0 },
5879         { HDCP_2_2_REP_SEND_RECVID_LIST,
5880           DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5881           HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
5882         { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5883           0, 0 },
5884         { HDCP_2_2_REP_STREAM_MANAGE,
5885           DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5886           0, 0 },
5887         { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5888           false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
5889 /* local define to shovel this through the write_2_2 interface */
5890 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE  50
5891         { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5892           DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5893           0, 0 },
5894 };
5895
5896 static inline
5897 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5898                                   u8 *rx_status)
5899 {
5900         ssize_t ret;
5901
5902         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5903                                DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5904                                HDCP_2_2_DP_RXSTATUS_LEN);
5905         if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5906                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5907                 return ret >= 0 ? -EIO : ret;
5908         }
5909
5910         return 0;
5911 }
5912
5913 static
5914 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5915                                   u8 msg_id, bool *msg_ready)
5916 {
5917         u8 rx_status;
5918         int ret;
5919
5920         *msg_ready = false;
5921         ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5922         if (ret < 0)
5923                 return ret;
5924
5925         switch (msg_id) {
5926         case HDCP_2_2_AKE_SEND_HPRIME:
5927                 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5928                         *msg_ready = true;
5929                 break;
5930         case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5931                 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5932                         *msg_ready = true;
5933                 break;
5934         case HDCP_2_2_REP_SEND_RECVID_LIST:
5935                 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5936                         *msg_ready = true;
5937                 break;
5938         default:
5939                 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5940                 return -EINVAL;
5941         }
5942
5943         return 0;
5944 }
5945
5946 static ssize_t
5947 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5948                             const struct hdcp2_dp_msg_data *hdcp2_msg_data)
5949 {
5950         struct intel_dp *dp = &intel_dig_port->dp;
5951         struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5952         u8 msg_id = hdcp2_msg_data->msg_id;
5953         int ret, timeout;
5954         bool msg_ready = false;
5955
5956         if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5957                 timeout = hdcp2_msg_data->timeout2;
5958         else
5959                 timeout = hdcp2_msg_data->timeout;
5960
5961         /*
5962          * There is no way to detect the CERT, LPRIME and STREAM_READY
5963          * availability. So Wait for timeout and read the msg.
5964          */
5965         if (!hdcp2_msg_data->msg_detectable) {
5966                 mdelay(timeout);
5967                 ret = 0;
5968         } else {
5969                 /*
5970                  * As we want to check the msg availability at timeout, Ignoring
5971                  * the timeout at wait for CP_IRQ.
5972                  */
5973                 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
5974                 ret = hdcp2_detect_msg_availability(intel_dig_port,
5975                                                     msg_id, &msg_ready);
5976                 if (!msg_ready)
5977                         ret = -ETIMEDOUT;
5978         }
5979
5980         if (ret)
5981                 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
5982                               hdcp2_msg_data->msg_id, ret, timeout);
5983
5984         return ret;
5985 }
5986
5987 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
5988 {
5989         int i;
5990
5991         for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
5992                 if (hdcp2_dp_msg_data[i].msg_id == msg_id)
5993                         return &hdcp2_dp_msg_data[i];
5994
5995         return NULL;
5996 }
5997
5998 static
5999 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6000                              void *buf, size_t size)
6001 {
6002         struct intel_dp *dp = &intel_dig_port->dp;
6003         struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6004         unsigned int offset;
6005         u8 *byte = buf;
6006         ssize_t ret, bytes_to_write, len;
6007         const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6008
6009         hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6010         if (!hdcp2_msg_data)
6011                 return -EINVAL;
6012
6013         offset = hdcp2_msg_data->offset;
6014
6015         /* No msg_id in DP HDCP2.2 msgs */
6016         bytes_to_write = size - 1;
6017         byte++;
6018
6019         hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6020
6021         while (bytes_to_write) {
6022                 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6023                                 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6024
6025                 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6026                                         offset, (void *)byte, len);
6027                 if (ret < 0)
6028                         return ret;
6029
6030                 bytes_to_write -= ret;
6031                 byte += ret;
6032                 offset += ret;
6033         }
6034
6035         return size;
6036 }
6037
6038 static
6039 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6040 {
6041         u8 rx_info[HDCP_2_2_RXINFO_LEN];
6042         u32 dev_cnt;
6043         ssize_t ret;
6044
6045         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6046                                DP_HDCP_2_2_REG_RXINFO_OFFSET,
6047                                (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6048         if (ret != HDCP_2_2_RXINFO_LEN)
6049                 return ret >= 0 ? -EIO : ret;
6050
6051         dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6052                    HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6053
6054         if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6055                 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6056
6057         ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6058                 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6059                 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6060
6061         return ret;
6062 }
6063
6064 static
6065 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6066                             u8 msg_id, void *buf, size_t size)
6067 {
6068         unsigned int offset;
6069         u8 *byte = buf;
6070         ssize_t ret, bytes_to_recv, len;
6071         const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6072
6073         hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6074         if (!hdcp2_msg_data)
6075                 return -EINVAL;
6076         offset = hdcp2_msg_data->offset;
6077
6078         ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6079         if (ret < 0)
6080                 return ret;
6081
6082         if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6083                 ret = get_receiver_id_list_size(intel_dig_port);
6084                 if (ret < 0)
6085                         return ret;
6086
6087                 size = ret;
6088         }
6089         bytes_to_recv = size - 1;
6090
6091         /* DP adaptation msgs has no msg_id */
6092         byte++;
6093
6094         while (bytes_to_recv) {
6095                 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6096                       DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6097
6098                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6099                                        (void *)byte, len);
6100                 if (ret < 0) {
6101                         DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6102                         return ret;
6103                 }
6104
6105                 bytes_to_recv -= ret;
6106                 byte += ret;
6107                 offset += ret;
6108         }
6109         byte = buf;
6110         *byte = msg_id;
6111
6112         return size;
6113 }
6114
6115 static
6116 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6117                                       bool is_repeater, u8 content_type)
6118 {
6119         struct hdcp2_dp_errata_stream_type stream_type_msg;
6120
6121         if (is_repeater)
6122                 return 0;
6123
6124         /*
6125          * Errata for DP: As Stream type is used for encryption, Receiver
6126          * should be communicated with stream type for the decryption of the
6127          * content.
6128          * Repeater will be communicated with stream type as a part of it's
6129          * auth later in time.
6130          */
6131         stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6132         stream_type_msg.stream_type = content_type;
6133
6134         return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6135                                         sizeof(stream_type_msg));
6136 }
6137
6138 static
6139 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6140 {
6141         u8 rx_status;
6142         int ret;
6143
6144         ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6145         if (ret)
6146                 return ret;
6147
6148         if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6149                 ret = HDCP_REAUTH_REQUEST;
6150         else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6151                 ret = HDCP_LINK_INTEGRITY_FAILURE;
6152         else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6153                 ret = HDCP_TOPOLOGY_CHANGE;
6154
6155         return ret;
6156 }
6157
6158 static
6159 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6160                            bool *capable)
6161 {
6162         u8 rx_caps[3];
6163         int ret;
6164
6165         *capable = false;
6166         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6167                                DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6168                                rx_caps, HDCP_2_2_RXCAPS_LEN);
6169         if (ret != HDCP_2_2_RXCAPS_LEN)
6170                 return ret >= 0 ? -EIO : ret;
6171
6172         if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6173             HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6174                 *capable = true;
6175
6176         return 0;
6177 }
6178
6179 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6180         .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6181         .read_bksv = intel_dp_hdcp_read_bksv,
6182         .read_bstatus = intel_dp_hdcp_read_bstatus,
6183         .repeater_present = intel_dp_hdcp_repeater_present,
6184         .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6185         .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6186         .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6187         .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6188         .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6189         .check_link = intel_dp_hdcp_check_link,
6190         .hdcp_capable = intel_dp_hdcp_capable,
6191         .write_2_2_msg = intel_dp_hdcp2_write_msg,
6192         .read_2_2_msg = intel_dp_hdcp2_read_msg,
6193         .config_stream_type = intel_dp_hdcp2_config_stream_type,
6194         .check_2_2_link = intel_dp_hdcp2_check_link,
6195         .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6196         .protocol = HDCP_PROTOCOL_DP,
6197 };
6198
6199 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6200 {
6201         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6202         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6203
6204         lockdep_assert_held(&dev_priv->pps_mutex);
6205
6206         if (!edp_have_panel_vdd(intel_dp))
6207                 return;
6208
6209         /*
6210          * The VDD bit needs a power domain reference, so if the bit is
6211          * already enabled when we boot or resume, grab this reference and
6212          * schedule a vdd off, so we don't hold on to the reference
6213          * indefinitely.
6214          */
6215         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6216         intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6217
6218         edp_panel_vdd_schedule_off(intel_dp);
6219 }
6220
6221 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6222 {
6223         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6224         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6225         enum pipe pipe;
6226
6227         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6228                                   encoder->port, &pipe))
6229                 return pipe;
6230
6231         return INVALID_PIPE;
6232 }
6233
6234 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6235 {
6236         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6237         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6238         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6239         intel_wakeref_t wakeref;
6240
6241         if (!HAS_DDI(dev_priv))
6242                 intel_dp->DP = I915_READ(intel_dp->output_reg);
6243
6244         if (lspcon->active)
6245                 lspcon_resume(lspcon);
6246
6247         intel_dp->reset_link_params = true;
6248
6249         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6250             !intel_dp_is_edp(intel_dp))
6251                 return;
6252
6253         with_pps_lock(intel_dp, wakeref) {
6254                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6255                         intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6256
6257                 if (intel_dp_is_edp(intel_dp)) {
6258                         /*
6259                          * Reinit the power sequencer, in case BIOS did
6260                          * something nasty with it.
6261                          */
6262                         intel_dp_pps_init(intel_dp);
6263                         intel_edp_panel_vdd_sanitize(intel_dp);
6264                 }
6265         }
6266 }
6267
6268 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6269         .force = intel_dp_force,
6270         .fill_modes = drm_helper_probe_single_connector_modes,
6271         .atomic_get_property = intel_digital_connector_atomic_get_property,
6272         .atomic_set_property = intel_digital_connector_atomic_set_property,
6273         .late_register = intel_dp_connector_register,
6274         .early_unregister = intel_dp_connector_unregister,
6275         .destroy = intel_connector_destroy,
6276         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6277         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6278 };
6279
6280 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6281         .detect_ctx = intel_dp_detect,
6282         .get_modes = intel_dp_get_modes,
6283         .mode_valid = intel_dp_mode_valid,
6284         .atomic_check = intel_digital_connector_atomic_check,
6285 };
6286
6287 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6288         .reset = intel_dp_encoder_reset,
6289         .destroy = intel_dp_encoder_destroy,
6290 };
6291
6292 enum irqreturn
6293 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6294 {
6295         struct intel_dp *intel_dp = &intel_dig_port->dp;
6296
6297         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6298                 /*
6299                  * vdd off can generate a long pulse on eDP which
6300                  * would require vdd on to handle it, and thus we
6301                  * would end up in an endless cycle of
6302                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6303                  */
6304                 DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
6305                               intel_dig_port->base.base.base.id,
6306                               intel_dig_port->base.base.name);
6307                 return IRQ_HANDLED;
6308         }
6309
6310         DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
6311                       intel_dig_port->base.base.base.id,
6312                       intel_dig_port->base.base.name,
6313                       long_hpd ? "long" : "short");
6314
6315         if (long_hpd) {
6316                 intel_dp->reset_link_params = true;
6317                 return IRQ_NONE;
6318         }
6319
6320         if (intel_dp->is_mst) {
6321                 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6322                         /*
6323                          * If we were in MST mode, and device is not
6324                          * there, get out of MST mode
6325                          */
6326                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6327                                       intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6328                         intel_dp->is_mst = false;
6329                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6330                                                         intel_dp->is_mst);
6331
6332                         return IRQ_NONE;
6333                 }
6334         }
6335
6336         if (!intel_dp->is_mst) {
6337                 bool handled;
6338
6339                 handled = intel_dp_short_pulse(intel_dp);
6340
6341                 if (!handled)
6342                         return IRQ_NONE;
6343         }
6344
6345         return IRQ_HANDLED;
6346 }
6347
6348 /* check the VBT to see whether the eDP is on another port */
6349 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6350 {
6351         /*
6352          * eDP not supported on g4x. so bail out early just
6353          * for a bit extra safety in case the VBT is bonkers.
6354          */
6355         if (INTEL_GEN(dev_priv) < 5)
6356                 return false;
6357
6358         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6359                 return true;
6360
6361         return intel_bios_is_port_edp(dev_priv, port);
6362 }
6363
6364 static void
6365 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6366 {
6367         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6368         enum port port = dp_to_dig_port(intel_dp)->base.port;
6369
6370         if (!IS_G4X(dev_priv) && port != PORT_A)
6371                 intel_attach_force_audio_property(connector);
6372
6373         intel_attach_broadcast_rgb_property(connector);
6374         if (HAS_GMCH(dev_priv))
6375                 drm_connector_attach_max_bpc_property(connector, 6, 10);
6376         else if (INTEL_GEN(dev_priv) >= 5)
6377                 drm_connector_attach_max_bpc_property(connector, 6, 12);
6378
6379         if (intel_dp_is_edp(intel_dp)) {
6380                 u32 allowed_scalers;
6381
6382                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6383                 if (!HAS_GMCH(dev_priv))
6384                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6385
6386                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6387
6388                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6389
6390         }
6391 }
6392
6393 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6394 {
6395         intel_dp->panel_power_off_time = ktime_get_boottime();
6396         intel_dp->last_power_on = jiffies;
6397         intel_dp->last_backlight_off = jiffies;
6398 }
6399
6400 static void
6401 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6402 {
6403         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6404         u32 pp_on, pp_off, pp_ctl;
6405         struct pps_registers regs;
6406
6407         intel_pps_get_registers(intel_dp, &regs);
6408
6409         pp_ctl = ironlake_get_pp_control(intel_dp);
6410
6411         /* Ensure PPS is unlocked */
6412         if (!HAS_DDI(dev_priv))
6413                 I915_WRITE(regs.pp_ctrl, pp_ctl);
6414
6415         pp_on = I915_READ(regs.pp_on);
6416         pp_off = I915_READ(regs.pp_off);
6417
6418         /* Pull timing values out of registers */
6419         seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6420         seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6421         seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6422         seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6423
6424         if (i915_mmio_reg_valid(regs.pp_div)) {
6425                 u32 pp_div;
6426
6427                 pp_div = I915_READ(regs.pp_div);
6428
6429                 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6430         } else {
6431                 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6432         }
6433 }
6434
6435 static void
6436 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6437 {
6438         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6439                       state_name,
6440                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6441 }
6442
6443 static void
6444 intel_pps_verify_state(struct intel_dp *intel_dp)
6445 {
6446         struct edp_power_seq hw;
6447         struct edp_power_seq *sw = &intel_dp->pps_delays;
6448
6449         intel_pps_readout_hw_state(intel_dp, &hw);
6450
6451         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6452             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6453                 DRM_ERROR("PPS state mismatch\n");
6454                 intel_pps_dump_state("sw", sw);
6455                 intel_pps_dump_state("hw", &hw);
6456         }
6457 }
6458
6459 static void
6460 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6461 {
6462         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6463         struct edp_power_seq cur, vbt, spec,
6464                 *final = &intel_dp->pps_delays;
6465
6466         lockdep_assert_held(&dev_priv->pps_mutex);
6467
6468         /* already initialized? */
6469         if (final->t11_t12 != 0)
6470                 return;
6471
6472         intel_pps_readout_hw_state(intel_dp, &cur);
6473
6474         intel_pps_dump_state("cur", &cur);
6475
6476         vbt = dev_priv->vbt.edp.pps;
6477         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6478          * of 500ms appears to be too short. Ocassionally the panel
6479          * just fails to power back on. Increasing the delay to 800ms
6480          * seems sufficient to avoid this problem.
6481          */
6482         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6483                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6484                 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6485                               vbt.t11_t12);
6486         }
6487         /* T11_T12 delay is special and actually in units of 100ms, but zero
6488          * based in the hw (so we need to add 100 ms). But the sw vbt
6489          * table multiplies it with 1000 to make it in units of 100usec,
6490          * too. */
6491         vbt.t11_t12 += 100 * 10;
6492
6493         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6494          * our hw here, which are all in 100usec. */
6495         spec.t1_t3 = 210 * 10;
6496         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6497         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6498         spec.t10 = 500 * 10;
6499         /* This one is special and actually in units of 100ms, but zero
6500          * based in the hw (so we need to add 100 ms). But the sw vbt
6501          * table multiplies it with 1000 to make it in units of 100usec,
6502          * too. */
6503         spec.t11_t12 = (510 + 100) * 10;
6504
6505         intel_pps_dump_state("vbt", &vbt);
6506
6507         /* Use the max of the register settings and vbt. If both are
6508          * unset, fall back to the spec limits. */
6509 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
6510                                        spec.field : \
6511                                        max(cur.field, vbt.field))
6512         assign_final(t1_t3);
6513         assign_final(t8);
6514         assign_final(t9);
6515         assign_final(t10);
6516         assign_final(t11_t12);
6517 #undef assign_final
6518
6519 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
6520         intel_dp->panel_power_up_delay = get_delay(t1_t3);
6521         intel_dp->backlight_on_delay = get_delay(t8);
6522         intel_dp->backlight_off_delay = get_delay(t9);
6523         intel_dp->panel_power_down_delay = get_delay(t10);
6524         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6525 #undef get_delay
6526
6527         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6528                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6529                       intel_dp->panel_power_cycle_delay);
6530
6531         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6532                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6533
6534         /*
6535          * We override the HW backlight delays to 1 because we do manual waits
6536          * on them. For T8, even BSpec recommends doing it. For T9, if we
6537          * don't do this, we'll end up waiting for the backlight off delay
6538          * twice: once when we do the manual sleep, and once when we disable
6539          * the panel and wait for the PP_STATUS bit to become zero.
6540          */
6541         final->t8 = 1;
6542         final->t9 = 1;
6543
6544         /*
6545          * HW has only a 100msec granularity for t11_t12 so round it up
6546          * accordingly.
6547          */
6548         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6549 }
6550
6551 static void
6552 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6553                                               bool force_disable_vdd)
6554 {
6555         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6556         u32 pp_on, pp_off, port_sel = 0;
6557         int div = dev_priv->rawclk_freq / 1000;
6558         struct pps_registers regs;
6559         enum port port = dp_to_dig_port(intel_dp)->base.port;
6560         const struct edp_power_seq *seq = &intel_dp->pps_delays;
6561
6562         lockdep_assert_held(&dev_priv->pps_mutex);
6563
6564         intel_pps_get_registers(intel_dp, &regs);
6565
6566         /*
6567          * On some VLV machines the BIOS can leave the VDD
6568          * enabled even on power sequencers which aren't
6569          * hooked up to any port. This would mess up the
6570          * power domain tracking the first time we pick
6571          * one of these power sequencers for use since
6572          * edp_panel_vdd_on() would notice that the VDD was
6573          * already on and therefore wouldn't grab the power
6574          * domain reference. Disable VDD first to avoid this.
6575          * This also avoids spuriously turning the VDD on as
6576          * soon as the new power sequencer gets initialized.
6577          */
6578         if (force_disable_vdd) {
6579                 u32 pp = ironlake_get_pp_control(intel_dp);
6580
6581                 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6582
6583                 if (pp & EDP_FORCE_VDD)
6584                         DRM_DEBUG_KMS("VDD already on, disabling first\n");
6585
6586                 pp &= ~EDP_FORCE_VDD;
6587
6588                 I915_WRITE(regs.pp_ctrl, pp);
6589         }
6590
6591         pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6592                 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6593         pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6594                 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6595
6596         /* Haswell doesn't have any port selection bits for the panel
6597          * power sequencer any more. */
6598         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6599                 port_sel = PANEL_PORT_SELECT_VLV(port);
6600         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6601                 switch (port) {
6602                 case PORT_A:
6603                         port_sel = PANEL_PORT_SELECT_DPA;
6604                         break;
6605                 case PORT_C:
6606                         port_sel = PANEL_PORT_SELECT_DPC;
6607                         break;
6608                 case PORT_D:
6609                         port_sel = PANEL_PORT_SELECT_DPD;
6610                         break;
6611                 default:
6612                         MISSING_CASE(port);
6613                         break;
6614                 }
6615         }
6616
6617         pp_on |= port_sel;
6618
6619         I915_WRITE(regs.pp_on, pp_on);
6620         I915_WRITE(regs.pp_off, pp_off);
6621
6622         /*
6623          * Compute the divisor for the pp clock, simply match the Bspec formula.
6624          */
6625         if (i915_mmio_reg_valid(regs.pp_div)) {
6626                 I915_WRITE(regs.pp_div,
6627                            REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6628                            REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6629         } else {
6630                 u32 pp_ctl;
6631
6632                 pp_ctl = I915_READ(regs.pp_ctrl);
6633                 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6634                 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6635                 I915_WRITE(regs.pp_ctrl, pp_ctl);
6636         }
6637
6638         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6639                       I915_READ(regs.pp_on),
6640                       I915_READ(regs.pp_off),
6641                       i915_mmio_reg_valid(regs.pp_div) ?
6642                       I915_READ(regs.pp_div) :
6643                       (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6644 }
6645
6646 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6647 {
6648         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6649
6650         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6651                 vlv_initial_power_sequencer_setup(intel_dp);
6652         } else {
6653                 intel_dp_init_panel_power_sequencer(intel_dp);
6654                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6655         }
6656 }
6657
6658 /**
6659  * intel_dp_set_drrs_state - program registers for RR switch to take effect
6660  * @dev_priv: i915 device
6661  * @crtc_state: a pointer to the active intel_crtc_state
6662  * @refresh_rate: RR to be programmed
6663  *
6664  * This function gets called when refresh rate (RR) has to be changed from
6665  * one frequency to another. Switches can be between high and low RR
6666  * supported by the panel or to any other RR based on media playback (in
6667  * this case, RR value needs to be passed from user space).
6668  *
6669  * The caller of this function needs to take a lock on dev_priv->drrs.
6670  */
6671 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6672                                     const struct intel_crtc_state *crtc_state,
6673                                     int refresh_rate)
6674 {
6675         struct intel_dp *intel_dp = dev_priv->drrs.dp;
6676         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6677         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6678
6679         if (refresh_rate <= 0) {
6680                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6681                 return;
6682         }
6683
6684         if (intel_dp == NULL) {
6685                 DRM_DEBUG_KMS("DRRS not supported.\n");
6686                 return;
6687         }
6688
6689         if (!intel_crtc) {
6690                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6691                 return;
6692         }
6693
6694         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6695                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6696                 return;
6697         }
6698
6699         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6700                         refresh_rate)
6701                 index = DRRS_LOW_RR;
6702
6703         if (index == dev_priv->drrs.refresh_rate_type) {
6704                 DRM_DEBUG_KMS(
6705                         "DRRS requested for previously set RR...ignoring\n");
6706                 return;
6707         }
6708
6709         if (!crtc_state->base.active) {
6710                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6711                 return;
6712         }
6713
6714         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6715                 switch (index) {
6716                 case DRRS_HIGH_RR:
6717                         intel_dp_set_m_n(crtc_state, M1_N1);
6718                         break;
6719                 case DRRS_LOW_RR:
6720                         intel_dp_set_m_n(crtc_state, M2_N2);
6721                         break;
6722                 case DRRS_MAX_RR:
6723                 default:
6724                         DRM_ERROR("Unsupported refreshrate type\n");
6725                 }
6726         } else if (INTEL_GEN(dev_priv) > 6) {
6727                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6728                 u32 val;
6729
6730                 val = I915_READ(reg);
6731                 if (index > DRRS_HIGH_RR) {
6732                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6733                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6734                         else
6735                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6736                 } else {
6737                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6738                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6739                         else
6740                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6741                 }
6742                 I915_WRITE(reg, val);
6743         }
6744
6745         dev_priv->drrs.refresh_rate_type = index;
6746
6747         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6748 }
6749
6750 /**
6751  * intel_edp_drrs_enable - init drrs struct if supported
6752  * @intel_dp: DP struct
6753  * @crtc_state: A pointer to the active crtc state.
6754  *
6755  * Initializes frontbuffer_bits and drrs.dp
6756  */
6757 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6758                            const struct intel_crtc_state *crtc_state)
6759 {
6760         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6761
6762         if (!crtc_state->has_drrs) {
6763                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6764                 return;
6765         }
6766
6767         if (dev_priv->psr.enabled) {
6768                 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6769                 return;
6770         }
6771
6772         mutex_lock(&dev_priv->drrs.mutex);
6773         if (dev_priv->drrs.dp) {
6774                 DRM_DEBUG_KMS("DRRS already enabled\n");
6775                 goto unlock;
6776         }
6777
6778         dev_priv->drrs.busy_frontbuffer_bits = 0;
6779
6780         dev_priv->drrs.dp = intel_dp;
6781
6782 unlock:
6783         mutex_unlock(&dev_priv->drrs.mutex);
6784 }
6785
6786 /**
6787  * intel_edp_drrs_disable - Disable DRRS
6788  * @intel_dp: DP struct
6789  * @old_crtc_state: Pointer to old crtc_state.
6790  *
6791  */
6792 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6793                             const struct intel_crtc_state *old_crtc_state)
6794 {
6795         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6796
6797         if (!old_crtc_state->has_drrs)
6798                 return;
6799
6800         mutex_lock(&dev_priv->drrs.mutex);
6801         if (!dev_priv->drrs.dp) {
6802                 mutex_unlock(&dev_priv->drrs.mutex);
6803                 return;
6804         }
6805
6806         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6807                 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6808                         intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6809
6810         dev_priv->drrs.dp = NULL;
6811         mutex_unlock(&dev_priv->drrs.mutex);
6812
6813         cancel_delayed_work_sync(&dev_priv->drrs.work);
6814 }
6815
6816 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6817 {
6818         struct drm_i915_private *dev_priv =
6819                 container_of(work, typeof(*dev_priv), drrs.work.work);
6820         struct intel_dp *intel_dp;
6821
6822         mutex_lock(&dev_priv->drrs.mutex);
6823
6824         intel_dp = dev_priv->drrs.dp;
6825
6826         if (!intel_dp)
6827                 goto unlock;
6828
6829         /*
6830          * The delayed work can race with an invalidate hence we need to
6831          * recheck.
6832          */
6833
6834         if (dev_priv->drrs.busy_frontbuffer_bits)
6835                 goto unlock;
6836
6837         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6838                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6839
6840                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6841                         intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6842         }
6843
6844 unlock:
6845         mutex_unlock(&dev_priv->drrs.mutex);
6846 }
6847
6848 /**
6849  * intel_edp_drrs_invalidate - Disable Idleness DRRS
6850  * @dev_priv: i915 device
6851  * @frontbuffer_bits: frontbuffer plane tracking bits
6852  *
6853  * This function gets called everytime rendering on the given planes start.
6854  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6855  *
6856  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6857  */
6858 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6859                                unsigned int frontbuffer_bits)
6860 {
6861         struct drm_crtc *crtc;
6862         enum pipe pipe;
6863
6864         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6865                 return;
6866
6867         cancel_delayed_work(&dev_priv->drrs.work);
6868
6869         mutex_lock(&dev_priv->drrs.mutex);
6870         if (!dev_priv->drrs.dp) {
6871                 mutex_unlock(&dev_priv->drrs.mutex);
6872                 return;
6873         }
6874
6875         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6876         pipe = to_intel_crtc(crtc)->pipe;
6877
6878         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6879         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6880
6881         /* invalidate means busy screen hence upclock */
6882         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6883                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6884                         dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6885
6886         mutex_unlock(&dev_priv->drrs.mutex);
6887 }
6888
6889 /**
6890  * intel_edp_drrs_flush - Restart Idleness DRRS
6891  * @dev_priv: i915 device
6892  * @frontbuffer_bits: frontbuffer plane tracking bits
6893  *
6894  * This function gets called every time rendering on the given planes has
6895  * completed or flip on a crtc is completed. So DRRS should be upclocked
6896  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6897  * if no other planes are dirty.
6898  *
6899  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6900  */
6901 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6902                           unsigned int frontbuffer_bits)
6903 {
6904         struct drm_crtc *crtc;
6905         enum pipe pipe;
6906
6907         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6908                 return;
6909
6910         cancel_delayed_work(&dev_priv->drrs.work);
6911
6912         mutex_lock(&dev_priv->drrs.mutex);
6913         if (!dev_priv->drrs.dp) {
6914                 mutex_unlock(&dev_priv->drrs.mutex);
6915                 return;
6916         }
6917
6918         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6919         pipe = to_intel_crtc(crtc)->pipe;
6920
6921         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6922         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6923
6924         /* flush means busy screen hence upclock */
6925         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6926                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6927                                 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6928
6929         /*
6930          * flush also means no more activity hence schedule downclock, if all
6931          * other fbs are quiescent too
6932          */
6933         if (!dev_priv->drrs.busy_frontbuffer_bits)
6934                 schedule_delayed_work(&dev_priv->drrs.work,
6935                                 msecs_to_jiffies(1000));
6936         mutex_unlock(&dev_priv->drrs.mutex);
6937 }
6938
6939 /**
6940  * DOC: Display Refresh Rate Switching (DRRS)
6941  *
6942  * Display Refresh Rate Switching (DRRS) is a power conservation feature
6943  * which enables swtching between low and high refresh rates,
6944  * dynamically, based on the usage scenario. This feature is applicable
6945  * for internal panels.
6946  *
6947  * Indication that the panel supports DRRS is given by the panel EDID, which
6948  * would list multiple refresh rates for one resolution.
6949  *
6950  * DRRS is of 2 types - static and seamless.
6951  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6952  * (may appear as a blink on screen) and is used in dock-undock scenario.
6953  * Seamless DRRS involves changing RR without any visual effect to the user
6954  * and can be used during normal system usage. This is done by programming
6955  * certain registers.
6956  *
6957  * Support for static/seamless DRRS may be indicated in the VBT based on
6958  * inputs from the panel spec.
6959  *
6960  * DRRS saves power by switching to low RR based on usage scenarios.
6961  *
6962  * The implementation is based on frontbuffer tracking implementation.  When
6963  * there is a disturbance on the screen triggered by user activity or a periodic
6964  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
6965  * no movement on screen, after a timeout of 1 second, a switch to low RR is
6966  * made.
6967  *
6968  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6969  * and intel_edp_drrs_flush() are called.
6970  *
6971  * DRRS can be further extended to support other internal panels and also
6972  * the scenario of video playback wherein RR is set based on the rate
6973  * requested by userspace.
6974  */
6975
6976 /**
6977  * intel_dp_drrs_init - Init basic DRRS work and mutex.
6978  * @connector: eDP connector
6979  * @fixed_mode: preferred mode of panel
6980  *
6981  * This function is  called only once at driver load to initialize basic
6982  * DRRS stuff.
6983  *
6984  * Returns:
6985  * Downclock mode if panel supports it, else return NULL.
6986  * DRRS support is determined by the presence of downclock mode (apart
6987  * from VBT setting).
6988  */
6989 static struct drm_display_mode *
6990 intel_dp_drrs_init(struct intel_connector *connector,
6991                    struct drm_display_mode *fixed_mode)
6992 {
6993         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6994         struct drm_display_mode *downclock_mode = NULL;
6995
6996         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6997         mutex_init(&dev_priv->drrs.mutex);
6998
6999         if (INTEL_GEN(dev_priv) <= 6) {
7000                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7001                 return NULL;
7002         }
7003
7004         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7005                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
7006                 return NULL;
7007         }
7008
7009         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7010         if (!downclock_mode) {
7011                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
7012                 return NULL;
7013         }
7014
7015         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7016
7017         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7018         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
7019         return downclock_mode;
7020 }
7021
7022 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7023                                      struct intel_connector *intel_connector)
7024 {
7025         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7026         struct drm_device *dev = &dev_priv->drm;
7027         struct drm_connector *connector = &intel_connector->base;
7028         struct drm_display_mode *fixed_mode = NULL;
7029         struct drm_display_mode *downclock_mode = NULL;
7030         bool has_dpcd;
7031         enum pipe pipe = INVALID_PIPE;
7032         intel_wakeref_t wakeref;
7033         struct edid *edid;
7034
7035         if (!intel_dp_is_edp(intel_dp))
7036                 return true;
7037
7038         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7039
7040         /*
7041          * On IBX/CPT we may get here with LVDS already registered. Since the
7042          * driver uses the only internal power sequencer available for both
7043          * eDP and LVDS bail out early in this case to prevent interfering
7044          * with an already powered-on LVDS power sequencer.
7045          */
7046         if (intel_get_lvds_encoder(dev_priv)) {
7047                 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7048                 DRM_INFO("LVDS was detected, not registering eDP\n");
7049
7050                 return false;
7051         }
7052
7053         with_pps_lock(intel_dp, wakeref) {
7054                 intel_dp_init_panel_power_timestamps(intel_dp);
7055                 intel_dp_pps_init(intel_dp);
7056                 intel_edp_panel_vdd_sanitize(intel_dp);
7057         }
7058
7059         /* Cache DPCD and EDID for edp. */
7060         has_dpcd = intel_edp_init_dpcd(intel_dp);
7061
7062         if (!has_dpcd) {
7063                 /* if this fails, presume the device is a ghost */
7064                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7065                 goto out_vdd_off;
7066         }
7067
7068         mutex_lock(&dev->mode_config.mutex);
7069         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7070         if (edid) {
7071                 if (drm_add_edid_modes(connector, edid)) {
7072                         drm_connector_update_edid_property(connector,
7073                                                                 edid);
7074                 } else {
7075                         kfree(edid);
7076                         edid = ERR_PTR(-EINVAL);
7077                 }
7078         } else {
7079                 edid = ERR_PTR(-ENOENT);
7080         }
7081         intel_connector->edid = edid;
7082
7083         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7084         if (fixed_mode)
7085                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7086
7087         /* fallback to VBT if available for eDP */
7088         if (!fixed_mode)
7089                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7090         mutex_unlock(&dev->mode_config.mutex);
7091
7092         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7093                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7094                 register_reboot_notifier(&intel_dp->edp_notifier);
7095
7096                 /*
7097                  * Figure out the current pipe for the initial backlight setup.
7098                  * If the current pipe isn't valid, try the PPS pipe, and if that
7099                  * fails just assume pipe A.
7100                  */
7101                 pipe = vlv_active_pipe(intel_dp);
7102
7103                 if (pipe != PIPE_A && pipe != PIPE_B)
7104                         pipe = intel_dp->pps_pipe;
7105
7106                 if (pipe != PIPE_A && pipe != PIPE_B)
7107                         pipe = PIPE_A;
7108
7109                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7110                               pipe_name(pipe));
7111         }
7112
7113         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7114         intel_connector->panel.backlight.power = intel_edp_backlight_power;
7115         intel_panel_setup_backlight(connector, pipe);
7116
7117         if (fixed_mode)
7118                 drm_connector_init_panel_orientation_property(
7119                         connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7120
7121         return true;
7122
7123 out_vdd_off:
7124         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7125         /*
7126          * vdd might still be enabled do to the delayed vdd off.
7127          * Make sure vdd is actually turned off here.
7128          */
7129         with_pps_lock(intel_dp, wakeref)
7130                 edp_panel_vdd_off_sync(intel_dp);
7131
7132         return false;
7133 }
7134
7135 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7136 {
7137         struct intel_connector *intel_connector;
7138         struct drm_connector *connector;
7139
7140         intel_connector = container_of(work, typeof(*intel_connector),
7141                                        modeset_retry_work);
7142         connector = &intel_connector->base;
7143         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7144                       connector->name);
7145
7146         /* Grab the locks before changing connector property*/
7147         mutex_lock(&connector->dev->mode_config.mutex);
7148         /* Set connector link status to BAD and send a Uevent to notify
7149          * userspace to do a modeset.
7150          */
7151         drm_connector_set_link_status_property(connector,
7152                                                DRM_MODE_LINK_STATUS_BAD);
7153         mutex_unlock(&connector->dev->mode_config.mutex);
7154         /* Send Hotplug uevent so userspace can reprobe */
7155         drm_kms_helper_hotplug_event(connector->dev);
7156 }
7157
7158 bool
7159 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7160                         struct intel_connector *intel_connector)
7161 {
7162         struct drm_connector *connector = &intel_connector->base;
7163         struct intel_dp *intel_dp = &intel_dig_port->dp;
7164         struct intel_encoder *intel_encoder = &intel_dig_port->base;
7165         struct drm_device *dev = intel_encoder->base.dev;
7166         struct drm_i915_private *dev_priv = to_i915(dev);
7167         enum port port = intel_encoder->port;
7168         enum phy phy = intel_port_to_phy(dev_priv, port);
7169         int type;
7170
7171         /* Initialize the work for modeset in case of link train failure */
7172         INIT_WORK(&intel_connector->modeset_retry_work,
7173                   intel_dp_modeset_retry_work_fn);
7174
7175         if (WARN(intel_dig_port->max_lanes < 1,
7176                  "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
7177                  intel_dig_port->max_lanes, intel_encoder->base.base.id,
7178                  intel_encoder->base.name))
7179                 return false;
7180
7181         intel_dp_set_source_rates(intel_dp);
7182
7183         intel_dp->reset_link_params = true;
7184         intel_dp->pps_pipe = INVALID_PIPE;
7185         intel_dp->active_pipe = INVALID_PIPE;
7186
7187         /* Preserve the current hw state. */
7188         intel_dp->DP = I915_READ(intel_dp->output_reg);
7189         intel_dp->attached_connector = intel_connector;
7190
7191         if (intel_dp_is_port_edp(dev_priv, port)) {
7192                 /*
7193                  * Currently we don't support eDP on TypeC ports, although in
7194                  * theory it could work on TypeC legacy ports.
7195                  */
7196                 WARN_ON(intel_phy_is_tc(dev_priv, phy));
7197                 type = DRM_MODE_CONNECTOR_eDP;
7198         } else {
7199                 type = DRM_MODE_CONNECTOR_DisplayPort;
7200         }
7201
7202         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7203                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7204
7205         /*
7206          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7207          * for DP the encoder type can be set by the caller to
7208          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7209          */
7210         if (type == DRM_MODE_CONNECTOR_eDP)
7211                 intel_encoder->type = INTEL_OUTPUT_EDP;
7212
7213         /* eDP only on port B and/or C on vlv/chv */
7214         if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7215                     intel_dp_is_edp(intel_dp) &&
7216                     port != PORT_B && port != PORT_C))
7217                 return false;
7218
7219         DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
7220                       type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7221                       intel_encoder->base.base.id, intel_encoder->base.name);
7222
7223         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7224         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7225
7226         if (!HAS_GMCH(dev_priv))
7227                 connector->interlace_allowed = true;
7228         connector->doublescan_allowed = 0;
7229
7230         if (INTEL_GEN(dev_priv) >= 11)
7231                 connector->ycbcr_420_allowed = true;
7232
7233         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7234
7235         intel_dp_aux_init(intel_dp);
7236
7237         intel_connector_attach_encoder(intel_connector, intel_encoder);
7238
7239         if (HAS_DDI(dev_priv))
7240                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7241         else
7242                 intel_connector->get_hw_state = intel_connector_get_hw_state;
7243
7244         /* init MST on ports that can support it */
7245         if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7246             (port == PORT_B || port == PORT_C ||
7247              port == PORT_D || port == PORT_F))
7248                 intel_dp_mst_encoder_init(intel_dig_port,
7249                                           intel_connector->base.base.id);
7250
7251         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7252                 intel_dp_aux_fini(intel_dp);
7253                 intel_dp_mst_encoder_cleanup(intel_dig_port);
7254                 goto fail;
7255         }
7256
7257         intel_dp_add_properties(intel_dp, connector);
7258
7259         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7260                 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7261                 if (ret)
7262                         DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7263         }
7264
7265         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7266          * 0xd.  Failure to do so will result in spurious interrupts being
7267          * generated on the port when a cable is not attached.
7268          */
7269         if (IS_G45(dev_priv)) {
7270                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7271                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7272         }
7273
7274         return true;
7275
7276 fail:
7277         drm_connector_cleanup(connector);
7278
7279         return false;
7280 }
7281
7282 bool intel_dp_init(struct drm_i915_private *dev_priv,
7283                    i915_reg_t output_reg,
7284                    enum port port)
7285 {
7286         struct intel_digital_port *intel_dig_port;
7287         struct intel_encoder *intel_encoder;
7288         struct drm_encoder *encoder;
7289         struct intel_connector *intel_connector;
7290
7291         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7292         if (!intel_dig_port)
7293                 return false;
7294
7295         intel_connector = intel_connector_alloc();
7296         if (!intel_connector)
7297                 goto err_connector_alloc;
7298
7299         intel_encoder = &intel_dig_port->base;
7300         encoder = &intel_encoder->base;
7301
7302         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7303                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7304                              "DP %c", port_name(port)))
7305                 goto err_encoder_init;
7306
7307         intel_encoder->hotplug = intel_dp_hotplug;
7308         intel_encoder->compute_config = intel_dp_compute_config;
7309         intel_encoder->get_hw_state = intel_dp_get_hw_state;
7310         intel_encoder->get_config = intel_dp_get_config;
7311         intel_encoder->update_pipe = intel_panel_update_backlight;
7312         intel_encoder->suspend = intel_dp_encoder_suspend;
7313         if (IS_CHERRYVIEW(dev_priv)) {
7314                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7315                 intel_encoder->pre_enable = chv_pre_enable_dp;
7316                 intel_encoder->enable = vlv_enable_dp;
7317                 intel_encoder->disable = vlv_disable_dp;
7318                 intel_encoder->post_disable = chv_post_disable_dp;
7319                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7320         } else if (IS_VALLEYVIEW(dev_priv)) {
7321                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7322                 intel_encoder->pre_enable = vlv_pre_enable_dp;
7323                 intel_encoder->enable = vlv_enable_dp;
7324                 intel_encoder->disable = vlv_disable_dp;
7325                 intel_encoder->post_disable = vlv_post_disable_dp;
7326         } else {
7327                 intel_encoder->pre_enable = g4x_pre_enable_dp;
7328                 intel_encoder->enable = g4x_enable_dp;
7329                 intel_encoder->disable = g4x_disable_dp;
7330                 intel_encoder->post_disable = g4x_post_disable_dp;
7331         }
7332
7333         intel_dig_port->dp.output_reg = output_reg;
7334         intel_dig_port->max_lanes = 4;
7335
7336         intel_encoder->type = INTEL_OUTPUT_DP;
7337         intel_encoder->power_domain = intel_port_to_power_domain(port);
7338         if (IS_CHERRYVIEW(dev_priv)) {
7339                 if (port == PORT_D)
7340                         intel_encoder->crtc_mask = 1 << 2;
7341                 else
7342                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7343         } else {
7344                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7345         }
7346         intel_encoder->cloneable = 0;
7347         intel_encoder->port = port;
7348
7349         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7350
7351         if (port != PORT_A)
7352                 intel_infoframe_init(intel_dig_port);
7353
7354         intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7355         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7356                 goto err_init_connector;
7357
7358         return true;
7359
7360 err_init_connector:
7361         drm_encoder_cleanup(encoder);
7362 err_encoder_init:
7363         kfree(intel_connector);
7364 err_connector_alloc:
7365         kfree(intel_dig_port);
7366         return false;
7367 }
7368
7369 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7370 {
7371         struct intel_encoder *encoder;
7372
7373         for_each_intel_encoder(&dev_priv->drm, encoder) {
7374                 struct intel_dp *intel_dp;
7375
7376                 if (encoder->type != INTEL_OUTPUT_DDI)
7377                         continue;
7378
7379                 intel_dp = enc_to_intel_dp(&encoder->base);
7380
7381                 if (!intel_dp->can_mst)
7382                         continue;
7383
7384                 if (intel_dp->is_mst)
7385                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7386         }
7387 }
7388
7389 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7390 {
7391         struct intel_encoder *encoder;
7392
7393         for_each_intel_encoder(&dev_priv->drm, encoder) {
7394                 struct intel_dp *intel_dp;
7395                 int ret;
7396
7397                 if (encoder->type != INTEL_OUTPUT_DDI)
7398                         continue;
7399
7400                 intel_dp = enc_to_intel_dp(&encoder->base);
7401
7402                 if (!intel_dp->can_mst)
7403                         continue;
7404
7405                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7406                 if (ret) {
7407                         intel_dp->is_mst = false;
7408                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7409                                                         false);
7410                 }
7411         }
7412 }