Merge tag 'csky-for-linus-5.3-rc1' of git://github.com/c-sky/csky-linux
[linux-2.6-block.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34
35 #include <asm/byteorder.h>
36
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
44
45 #include "i915_debugfs.h"
46 #include "i915_drv.h"
47 #include "intel_atomic.h"
48 #include "intel_audio.h"
49 #include "intel_connector.h"
50 #include "intel_ddi.h"
51 #include "intel_dp.h"
52 #include "intel_dp_link_training.h"
53 #include "intel_dp_mst.h"
54 #include "intel_dpio_phy.h"
55 #include "intel_drv.h"
56 #include "intel_fifo_underrun.h"
57 #include "intel_hdcp.h"
58 #include "intel_hdmi.h"
59 #include "intel_hotplug.h"
60 #include "intel_lspcon.h"
61 #include "intel_lvds.h"
62 #include "intel_panel.h"
63 #include "intel_psr.h"
64 #include "intel_sideband.h"
65 #include "intel_vdsc.h"
66
67 #define DP_DPRX_ESI_LEN 14
68
69 /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
70 #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER      61440
71 #define DP_DSC_MIN_SUPPORTED_BPC                8
72 #define DP_DSC_MAX_SUPPORTED_BPC                10
73
74 /* DP DSC throughput values used for slice count calculations KPixels/s */
75 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
76 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
77 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
78
79 /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
80 #define DP_DSC_FEC_OVERHEAD_FACTOR              976
81
82 /* Compliance test status bits  */
83 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
84 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
85 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
86 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
87
88 struct dp_link_dpll {
89         int clock;
90         struct dpll dpll;
91 };
92
93 static const struct dp_link_dpll g4x_dpll[] = {
94         { 162000,
95                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
96         { 270000,
97                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
98 };
99
100 static const struct dp_link_dpll pch_dpll[] = {
101         { 162000,
102                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
103         { 270000,
104                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
105 };
106
107 static const struct dp_link_dpll vlv_dpll[] = {
108         { 162000,
109                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
110         { 270000,
111                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
112 };
113
114 /*
115  * CHV supports eDP 1.4 that have  more link rates.
116  * Below only provides the fixed rate but exclude variable rate.
117  */
118 static const struct dp_link_dpll chv_dpll[] = {
119         /*
120          * CHV requires to program fractional division for m2.
121          * m2 is stored in fixed point format using formula below
122          * (m2_int << 22) | m2_fraction
123          */
124         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
125                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
126         { 270000,       /* m2_int = 27, m2_fraction = 0 */
127                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
128 };
129
130 /* Constants for DP DSC configurations */
131 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
132
133 /* With Single pipe configuration, HW is capable of supporting maximum
134  * of 4 slices per line.
135  */
136 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
137
138 /**
139  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
140  * @intel_dp: DP struct
141  *
142  * If a CPU or PCH DP output is attached to an eDP panel, this function
143  * will return true, and false otherwise.
144  */
145 bool intel_dp_is_edp(struct intel_dp *intel_dp)
146 {
147         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
148
149         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
150 }
151
152 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
153 {
154         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
155 }
156
157 static void intel_dp_link_down(struct intel_encoder *encoder,
158                                const struct intel_crtc_state *old_crtc_state);
159 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
160 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
161 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
162                                            const struct intel_crtc_state *crtc_state);
163 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
164                                       enum pipe pipe);
165 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
166
167 /* update sink rates from dpcd */
168 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
169 {
170         static const int dp_rates[] = {
171                 162000, 270000, 540000, 810000
172         };
173         int i, max_rate;
174
175         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
176
177         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
178                 if (dp_rates[i] > max_rate)
179                         break;
180                 intel_dp->sink_rates[i] = dp_rates[i];
181         }
182
183         intel_dp->num_sink_rates = i;
184 }
185
186 /* Get length of rates array potentially limited by max_rate. */
187 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
188 {
189         int i;
190
191         /* Limit results by potentially reduced max rate */
192         for (i = 0; i < len; i++) {
193                 if (rates[len - i - 1] <= max_rate)
194                         return len - i;
195         }
196
197         return 0;
198 }
199
200 /* Get length of common rates array potentially limited by max_rate. */
201 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
202                                           int max_rate)
203 {
204         return intel_dp_rate_limit_len(intel_dp->common_rates,
205                                        intel_dp->num_common_rates, max_rate);
206 }
207
208 /* Theoretical max between source and sink */
209 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
210 {
211         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
212 }
213
214 static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
215 {
216         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
217         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
218         enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
219         intel_wakeref_t wakeref;
220         u32 lane_info;
221
222         if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
223                 return 4;
224
225         lane_info = 0;
226         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
227                 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
228                              DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
229                                 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
230
231         switch (lane_info) {
232         default:
233                 MISSING_CASE(lane_info);
234         case 1:
235         case 2:
236         case 4:
237         case 8:
238                 return 1;
239         case 3:
240         case 12:
241                 return 2;
242         case 15:
243                 return 4;
244         }
245 }
246
247 /* Theoretical max between source and sink */
248 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
249 {
250         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
251         int source_max = intel_dig_port->max_lanes;
252         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
253         int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
254
255         return min3(source_max, sink_max, fia_max);
256 }
257
258 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
259 {
260         return intel_dp->max_link_lane_count;
261 }
262
263 int
264 intel_dp_link_required(int pixel_clock, int bpp)
265 {
266         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
267         return DIV_ROUND_UP(pixel_clock * bpp, 8);
268 }
269
270 int
271 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
272 {
273         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
274          * link rate that is generally expressed in Gbps. Since, 8 bits of data
275          * is transmitted every LS_Clk per lane, there is no need to account for
276          * the channel encoding that is done in the PHY layer here.
277          */
278
279         return max_link_clock * max_lanes;
280 }
281
282 static int
283 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
284 {
285         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
286         struct intel_encoder *encoder = &intel_dig_port->base;
287         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
288         int max_dotclk = dev_priv->max_dotclk_freq;
289         int ds_max_dotclk;
290
291         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
292
293         if (type != DP_DS_PORT_TYPE_VGA)
294                 return max_dotclk;
295
296         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
297                                                     intel_dp->downstream_ports);
298
299         if (ds_max_dotclk != 0)
300                 max_dotclk = min(max_dotclk, ds_max_dotclk);
301
302         return max_dotclk;
303 }
304
305 static int cnl_max_source_rate(struct intel_dp *intel_dp)
306 {
307         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
308         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
309         enum port port = dig_port->base.port;
310
311         u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
312
313         /* Low voltage SKUs are limited to max of 5.4G */
314         if (voltage == VOLTAGE_INFO_0_85V)
315                 return 540000;
316
317         /* For this SKU 8.1G is supported in all ports */
318         if (IS_CNL_WITH_PORT_F(dev_priv))
319                 return 810000;
320
321         /* For other SKUs, max rate on ports A and D is 5.4G */
322         if (port == PORT_A || port == PORT_D)
323                 return 540000;
324
325         return 810000;
326 }
327
328 static int icl_max_source_rate(struct intel_dp *intel_dp)
329 {
330         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
331         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
332         enum port port = dig_port->base.port;
333
334         if (intel_port_is_combophy(dev_priv, port) &&
335             !IS_ELKHARTLAKE(dev_priv) &&
336             !intel_dp_is_edp(intel_dp))
337                 return 540000;
338
339         return 810000;
340 }
341
342 static void
343 intel_dp_set_source_rates(struct intel_dp *intel_dp)
344 {
345         /* The values must be in increasing order */
346         static const int cnl_rates[] = {
347                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
348         };
349         static const int bxt_rates[] = {
350                 162000, 216000, 243000, 270000, 324000, 432000, 540000
351         };
352         static const int skl_rates[] = {
353                 162000, 216000, 270000, 324000, 432000, 540000
354         };
355         static const int hsw_rates[] = {
356                 162000, 270000, 540000
357         };
358         static const int g4x_rates[] = {
359                 162000, 270000
360         };
361         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
362         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
363         const struct ddi_vbt_port_info *info =
364                 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
365         const int *source_rates;
366         int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
367
368         /* This should only be done once */
369         WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
370
371         if (INTEL_GEN(dev_priv) >= 10) {
372                 source_rates = cnl_rates;
373                 size = ARRAY_SIZE(cnl_rates);
374                 if (IS_GEN(dev_priv, 10))
375                         max_rate = cnl_max_source_rate(intel_dp);
376                 else
377                         max_rate = icl_max_source_rate(intel_dp);
378         } else if (IS_GEN9_LP(dev_priv)) {
379                 source_rates = bxt_rates;
380                 size = ARRAY_SIZE(bxt_rates);
381         } else if (IS_GEN9_BC(dev_priv)) {
382                 source_rates = skl_rates;
383                 size = ARRAY_SIZE(skl_rates);
384         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
385                    IS_BROADWELL(dev_priv)) {
386                 source_rates = hsw_rates;
387                 size = ARRAY_SIZE(hsw_rates);
388         } else {
389                 source_rates = g4x_rates;
390                 size = ARRAY_SIZE(g4x_rates);
391         }
392
393         if (max_rate && vbt_max_rate)
394                 max_rate = min(max_rate, vbt_max_rate);
395         else if (vbt_max_rate)
396                 max_rate = vbt_max_rate;
397
398         if (max_rate)
399                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
400
401         intel_dp->source_rates = source_rates;
402         intel_dp->num_source_rates = size;
403 }
404
405 static int intersect_rates(const int *source_rates, int source_len,
406                            const int *sink_rates, int sink_len,
407                            int *common_rates)
408 {
409         int i = 0, j = 0, k = 0;
410
411         while (i < source_len && j < sink_len) {
412                 if (source_rates[i] == sink_rates[j]) {
413                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
414                                 return k;
415                         common_rates[k] = source_rates[i];
416                         ++k;
417                         ++i;
418                         ++j;
419                 } else if (source_rates[i] < sink_rates[j]) {
420                         ++i;
421                 } else {
422                         ++j;
423                 }
424         }
425         return k;
426 }
427
428 /* return index of rate in rates array, or -1 if not found */
429 static int intel_dp_rate_index(const int *rates, int len, int rate)
430 {
431         int i;
432
433         for (i = 0; i < len; i++)
434                 if (rate == rates[i])
435                         return i;
436
437         return -1;
438 }
439
440 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
441 {
442         WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
443
444         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
445                                                      intel_dp->num_source_rates,
446                                                      intel_dp->sink_rates,
447                                                      intel_dp->num_sink_rates,
448                                                      intel_dp->common_rates);
449
450         /* Paranoia, there should always be something in common. */
451         if (WARN_ON(intel_dp->num_common_rates == 0)) {
452                 intel_dp->common_rates[0] = 162000;
453                 intel_dp->num_common_rates = 1;
454         }
455 }
456
457 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
458                                        u8 lane_count)
459 {
460         /*
461          * FIXME: we need to synchronize the current link parameters with
462          * hardware readout. Currently fast link training doesn't work on
463          * boot-up.
464          */
465         if (link_rate == 0 ||
466             link_rate > intel_dp->max_link_rate)
467                 return false;
468
469         if (lane_count == 0 ||
470             lane_count > intel_dp_max_lane_count(intel_dp))
471                 return false;
472
473         return true;
474 }
475
476 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
477                                                      int link_rate,
478                                                      u8 lane_count)
479 {
480         const struct drm_display_mode *fixed_mode =
481                 intel_dp->attached_connector->panel.fixed_mode;
482         int mode_rate, max_rate;
483
484         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
485         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
486         if (mode_rate > max_rate)
487                 return false;
488
489         return true;
490 }
491
492 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
493                                             int link_rate, u8 lane_count)
494 {
495         int index;
496
497         index = intel_dp_rate_index(intel_dp->common_rates,
498                                     intel_dp->num_common_rates,
499                                     link_rate);
500         if (index > 0) {
501                 if (intel_dp_is_edp(intel_dp) &&
502                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
503                                                               intel_dp->common_rates[index - 1],
504                                                               lane_count)) {
505                         DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
506                         return 0;
507                 }
508                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
509                 intel_dp->max_link_lane_count = lane_count;
510         } else if (lane_count > 1) {
511                 if (intel_dp_is_edp(intel_dp) &&
512                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
513                                                               intel_dp_max_common_rate(intel_dp),
514                                                               lane_count >> 1)) {
515                         DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
516                         return 0;
517                 }
518                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
519                 intel_dp->max_link_lane_count = lane_count >> 1;
520         } else {
521                 DRM_ERROR("Link Training Unsuccessful\n");
522                 return -1;
523         }
524
525         return 0;
526 }
527
528 static enum drm_mode_status
529 intel_dp_mode_valid(struct drm_connector *connector,
530                     struct drm_display_mode *mode)
531 {
532         struct intel_dp *intel_dp = intel_attached_dp(connector);
533         struct intel_connector *intel_connector = to_intel_connector(connector);
534         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
535         struct drm_i915_private *dev_priv = to_i915(connector->dev);
536         int target_clock = mode->clock;
537         int max_rate, mode_rate, max_lanes, max_link_clock;
538         int max_dotclk;
539         u16 dsc_max_output_bpp = 0;
540         u8 dsc_slice_count = 0;
541
542         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
543                 return MODE_NO_DBLESCAN;
544
545         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
546
547         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
548                 if (mode->hdisplay > fixed_mode->hdisplay)
549                         return MODE_PANEL;
550
551                 if (mode->vdisplay > fixed_mode->vdisplay)
552                         return MODE_PANEL;
553
554                 target_clock = fixed_mode->clock;
555         }
556
557         max_link_clock = intel_dp_max_link_rate(intel_dp);
558         max_lanes = intel_dp_max_lane_count(intel_dp);
559
560         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
561         mode_rate = intel_dp_link_required(target_clock, 18);
562
563         /*
564          * Output bpp is stored in 6.4 format so right shift by 4 to get the
565          * integer value since we support only integer values of bpp.
566          */
567         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
568             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
569                 if (intel_dp_is_edp(intel_dp)) {
570                         dsc_max_output_bpp =
571                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
572                         dsc_slice_count =
573                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
574                                                                 true);
575                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
576                         dsc_max_output_bpp =
577                                 intel_dp_dsc_get_output_bpp(max_link_clock,
578                                                             max_lanes,
579                                                             target_clock,
580                                                             mode->hdisplay) >> 4;
581                         dsc_slice_count =
582                                 intel_dp_dsc_get_slice_count(intel_dp,
583                                                              target_clock,
584                                                              mode->hdisplay);
585                 }
586         }
587
588         if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
589             target_clock > max_dotclk)
590                 return MODE_CLOCK_HIGH;
591
592         if (mode->clock < 10000)
593                 return MODE_CLOCK_LOW;
594
595         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
596                 return MODE_H_ILLEGAL;
597
598         return MODE_OK;
599 }
600
601 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
602 {
603         int i;
604         u32 v = 0;
605
606         if (src_bytes > 4)
607                 src_bytes = 4;
608         for (i = 0; i < src_bytes; i++)
609                 v |= ((u32)src[i]) << ((3 - i) * 8);
610         return v;
611 }
612
613 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
614 {
615         int i;
616         if (dst_bytes > 4)
617                 dst_bytes = 4;
618         for (i = 0; i < dst_bytes; i++)
619                 dst[i] = src >> ((3-i) * 8);
620 }
621
622 static void
623 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
624 static void
625 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
626                                               bool force_disable_vdd);
627 static void
628 intel_dp_pps_init(struct intel_dp *intel_dp);
629
630 static intel_wakeref_t
631 pps_lock(struct intel_dp *intel_dp)
632 {
633         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
634         intel_wakeref_t wakeref;
635
636         /*
637          * See intel_power_sequencer_reset() why we need
638          * a power domain reference here.
639          */
640         wakeref = intel_display_power_get(dev_priv,
641                                           intel_aux_power_domain(dp_to_dig_port(intel_dp)));
642
643         mutex_lock(&dev_priv->pps_mutex);
644
645         return wakeref;
646 }
647
648 static intel_wakeref_t
649 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
650 {
651         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
652
653         mutex_unlock(&dev_priv->pps_mutex);
654         intel_display_power_put(dev_priv,
655                                 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
656                                 wakeref);
657         return 0;
658 }
659
660 #define with_pps_lock(dp, wf) \
661         for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
662
663 static void
664 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
665 {
666         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
667         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
668         enum pipe pipe = intel_dp->pps_pipe;
669         bool pll_enabled, release_cl_override = false;
670         enum dpio_phy phy = DPIO_PHY(pipe);
671         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
672         u32 DP;
673
674         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
675                  "skipping pipe %c power sequencer kick due to port %c being active\n",
676                  pipe_name(pipe), port_name(intel_dig_port->base.port)))
677                 return;
678
679         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
680                       pipe_name(pipe), port_name(intel_dig_port->base.port));
681
682         /* Preserve the BIOS-computed detected bit. This is
683          * supposed to be read-only.
684          */
685         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
686         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
687         DP |= DP_PORT_WIDTH(1);
688         DP |= DP_LINK_TRAIN_PAT_1;
689
690         if (IS_CHERRYVIEW(dev_priv))
691                 DP |= DP_PIPE_SEL_CHV(pipe);
692         else
693                 DP |= DP_PIPE_SEL(pipe);
694
695         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
696
697         /*
698          * The DPLL for the pipe must be enabled for this to work.
699          * So enable temporarily it if it's not already enabled.
700          */
701         if (!pll_enabled) {
702                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
703                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
704
705                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
706                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
707                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
708                                   pipe_name(pipe));
709                         return;
710                 }
711         }
712
713         /*
714          * Similar magic as in intel_dp_enable_port().
715          * We _must_ do this port enable + disable trick
716          * to make this power sequencer lock onto the port.
717          * Otherwise even VDD force bit won't work.
718          */
719         I915_WRITE(intel_dp->output_reg, DP);
720         POSTING_READ(intel_dp->output_reg);
721
722         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
723         POSTING_READ(intel_dp->output_reg);
724
725         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
726         POSTING_READ(intel_dp->output_reg);
727
728         if (!pll_enabled) {
729                 vlv_force_pll_off(dev_priv, pipe);
730
731                 if (release_cl_override)
732                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
733         }
734 }
735
736 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
737 {
738         struct intel_encoder *encoder;
739         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
740
741         /*
742          * We don't have power sequencer currently.
743          * Pick one that's not used by other ports.
744          */
745         for_each_intel_dp(&dev_priv->drm, encoder) {
746                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
747
748                 if (encoder->type == INTEL_OUTPUT_EDP) {
749                         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
750                                 intel_dp->active_pipe != intel_dp->pps_pipe);
751
752                         if (intel_dp->pps_pipe != INVALID_PIPE)
753                                 pipes &= ~(1 << intel_dp->pps_pipe);
754                 } else {
755                         WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
756
757                         if (intel_dp->active_pipe != INVALID_PIPE)
758                                 pipes &= ~(1 << intel_dp->active_pipe);
759                 }
760         }
761
762         if (pipes == 0)
763                 return INVALID_PIPE;
764
765         return ffs(pipes) - 1;
766 }
767
768 static enum pipe
769 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
770 {
771         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
772         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
773         enum pipe pipe;
774
775         lockdep_assert_held(&dev_priv->pps_mutex);
776
777         /* We should never land here with regular DP ports */
778         WARN_ON(!intel_dp_is_edp(intel_dp));
779
780         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
781                 intel_dp->active_pipe != intel_dp->pps_pipe);
782
783         if (intel_dp->pps_pipe != INVALID_PIPE)
784                 return intel_dp->pps_pipe;
785
786         pipe = vlv_find_free_pps(dev_priv);
787
788         /*
789          * Didn't find one. This should not happen since there
790          * are two power sequencers and up to two eDP ports.
791          */
792         if (WARN_ON(pipe == INVALID_PIPE))
793                 pipe = PIPE_A;
794
795         vlv_steal_power_sequencer(dev_priv, pipe);
796         intel_dp->pps_pipe = pipe;
797
798         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
799                       pipe_name(intel_dp->pps_pipe),
800                       port_name(intel_dig_port->base.port));
801
802         /* init power sequencer on this pipe and port */
803         intel_dp_init_panel_power_sequencer(intel_dp);
804         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
805
806         /*
807          * Even vdd force doesn't work until we've made
808          * the power sequencer lock in on the port.
809          */
810         vlv_power_sequencer_kick(intel_dp);
811
812         return intel_dp->pps_pipe;
813 }
814
815 static int
816 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
817 {
818         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
819         int backlight_controller = dev_priv->vbt.backlight.controller;
820
821         lockdep_assert_held(&dev_priv->pps_mutex);
822
823         /* We should never land here with regular DP ports */
824         WARN_ON(!intel_dp_is_edp(intel_dp));
825
826         if (!intel_dp->pps_reset)
827                 return backlight_controller;
828
829         intel_dp->pps_reset = false;
830
831         /*
832          * Only the HW needs to be reprogrammed, the SW state is fixed and
833          * has been setup during connector init.
834          */
835         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
836
837         return backlight_controller;
838 }
839
840 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
841                                enum pipe pipe);
842
843 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
844                                enum pipe pipe)
845 {
846         return I915_READ(PP_STATUS(pipe)) & PP_ON;
847 }
848
849 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
850                                 enum pipe pipe)
851 {
852         return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
853 }
854
855 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
856                          enum pipe pipe)
857 {
858         return true;
859 }
860
861 static enum pipe
862 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
863                      enum port port,
864                      vlv_pipe_check pipe_check)
865 {
866         enum pipe pipe;
867
868         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
869                 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
870                         PANEL_PORT_SELECT_MASK;
871
872                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
873                         continue;
874
875                 if (!pipe_check(dev_priv, pipe))
876                         continue;
877
878                 return pipe;
879         }
880
881         return INVALID_PIPE;
882 }
883
884 static void
885 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
886 {
887         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
888         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
889         enum port port = intel_dig_port->base.port;
890
891         lockdep_assert_held(&dev_priv->pps_mutex);
892
893         /* try to find a pipe with this port selected */
894         /* first pick one where the panel is on */
895         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
896                                                   vlv_pipe_has_pp_on);
897         /* didn't find one? pick one where vdd is on */
898         if (intel_dp->pps_pipe == INVALID_PIPE)
899                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
900                                                           vlv_pipe_has_vdd_on);
901         /* didn't find one? pick one with just the correct port */
902         if (intel_dp->pps_pipe == INVALID_PIPE)
903                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
904                                                           vlv_pipe_any);
905
906         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
907         if (intel_dp->pps_pipe == INVALID_PIPE) {
908                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
909                               port_name(port));
910                 return;
911         }
912
913         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
914                       port_name(port), pipe_name(intel_dp->pps_pipe));
915
916         intel_dp_init_panel_power_sequencer(intel_dp);
917         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
918 }
919
920 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
921 {
922         struct intel_encoder *encoder;
923
924         if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
925                     !IS_GEN9_LP(dev_priv)))
926                 return;
927
928         /*
929          * We can't grab pps_mutex here due to deadlock with power_domain
930          * mutex when power_domain functions are called while holding pps_mutex.
931          * That also means that in order to use pps_pipe the code needs to
932          * hold both a power domain reference and pps_mutex, and the power domain
933          * reference get/put must be done while _not_ holding pps_mutex.
934          * pps_{lock,unlock}() do these steps in the correct order, so one
935          * should use them always.
936          */
937
938         for_each_intel_dp(&dev_priv->drm, encoder) {
939                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
940
941                 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
942
943                 if (encoder->type != INTEL_OUTPUT_EDP)
944                         continue;
945
946                 if (IS_GEN9_LP(dev_priv))
947                         intel_dp->pps_reset = true;
948                 else
949                         intel_dp->pps_pipe = INVALID_PIPE;
950         }
951 }
952
953 struct pps_registers {
954         i915_reg_t pp_ctrl;
955         i915_reg_t pp_stat;
956         i915_reg_t pp_on;
957         i915_reg_t pp_off;
958         i915_reg_t pp_div;
959 };
960
961 static void intel_pps_get_registers(struct intel_dp *intel_dp,
962                                     struct pps_registers *regs)
963 {
964         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
965         int pps_idx = 0;
966
967         memset(regs, 0, sizeof(*regs));
968
969         if (IS_GEN9_LP(dev_priv))
970                 pps_idx = bxt_power_sequencer_idx(intel_dp);
971         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
972                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
973
974         regs->pp_ctrl = PP_CONTROL(pps_idx);
975         regs->pp_stat = PP_STATUS(pps_idx);
976         regs->pp_on = PP_ON_DELAYS(pps_idx);
977         regs->pp_off = PP_OFF_DELAYS(pps_idx);
978
979         /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
980         if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
981                 regs->pp_div = INVALID_MMIO_REG;
982         else
983                 regs->pp_div = PP_DIVISOR(pps_idx);
984 }
985
986 static i915_reg_t
987 _pp_ctrl_reg(struct intel_dp *intel_dp)
988 {
989         struct pps_registers regs;
990
991         intel_pps_get_registers(intel_dp, &regs);
992
993         return regs.pp_ctrl;
994 }
995
996 static i915_reg_t
997 _pp_stat_reg(struct intel_dp *intel_dp)
998 {
999         struct pps_registers regs;
1000
1001         intel_pps_get_registers(intel_dp, &regs);
1002
1003         return regs.pp_stat;
1004 }
1005
1006 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1007    This function only applicable when panel PM state is not to be tracked */
1008 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1009                               void *unused)
1010 {
1011         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1012                                                  edp_notifier);
1013         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1014         intel_wakeref_t wakeref;
1015
1016         if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1017                 return 0;
1018
1019         with_pps_lock(intel_dp, wakeref) {
1020                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1021                         enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1022                         i915_reg_t pp_ctrl_reg, pp_div_reg;
1023                         u32 pp_div;
1024
1025                         pp_ctrl_reg = PP_CONTROL(pipe);
1026                         pp_div_reg  = PP_DIVISOR(pipe);
1027                         pp_div = I915_READ(pp_div_reg);
1028                         pp_div &= PP_REFERENCE_DIVIDER_MASK;
1029
1030                         /* 0x1F write to PP_DIV_REG sets max cycle delay */
1031                         I915_WRITE(pp_div_reg, pp_div | 0x1F);
1032                         I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1033                         msleep(intel_dp->panel_power_cycle_delay);
1034                 }
1035         }
1036
1037         return 0;
1038 }
1039
1040 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1041 {
1042         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1043
1044         lockdep_assert_held(&dev_priv->pps_mutex);
1045
1046         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1047             intel_dp->pps_pipe == INVALID_PIPE)
1048                 return false;
1049
1050         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1051 }
1052
1053 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1054 {
1055         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1056
1057         lockdep_assert_held(&dev_priv->pps_mutex);
1058
1059         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1060             intel_dp->pps_pipe == INVALID_PIPE)
1061                 return false;
1062
1063         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1064 }
1065
1066 static void
1067 intel_dp_check_edp(struct intel_dp *intel_dp)
1068 {
1069         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1070
1071         if (!intel_dp_is_edp(intel_dp))
1072                 return;
1073
1074         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1075                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1076                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1077                               I915_READ(_pp_stat_reg(intel_dp)),
1078                               I915_READ(_pp_ctrl_reg(intel_dp)));
1079         }
1080 }
1081
1082 static u32
1083 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1084 {
1085         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1086         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1087         u32 status;
1088         bool done;
1089
1090 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1091         done = wait_event_timeout(i915->gmbus_wait_queue, C,
1092                                   msecs_to_jiffies_timeout(10));
1093
1094         /* just trace the final value */
1095         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1096
1097         if (!done)
1098                 DRM_ERROR("dp aux hw did not signal timeout!\n");
1099 #undef C
1100
1101         return status;
1102 }
1103
1104 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1105 {
1106         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1107
1108         if (index)
1109                 return 0;
1110
1111         /*
1112          * The clock divider is based off the hrawclk, and would like to run at
1113          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1114          */
1115         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1116 }
1117
1118 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1119 {
1120         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1121         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1122
1123         if (index)
1124                 return 0;
1125
1126         /*
1127          * The clock divider is based off the cdclk or PCH rawclk, and would
1128          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1129          * divide by 2000 and use that
1130          */
1131         if (dig_port->aux_ch == AUX_CH_A)
1132                 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1133         else
1134                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1135 }
1136
1137 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1138 {
1139         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1140         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1141
1142         if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1143                 /* Workaround for non-ULT HSW */
1144                 switch (index) {
1145                 case 0: return 63;
1146                 case 1: return 72;
1147                 default: return 0;
1148                 }
1149         }
1150
1151         return ilk_get_aux_clock_divider(intel_dp, index);
1152 }
1153
1154 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1155 {
1156         /*
1157          * SKL doesn't need us to program the AUX clock divider (Hardware will
1158          * derive the clock from CDCLK automatically). We still implement the
1159          * get_aux_clock_divider vfunc to plug-in into the existing code.
1160          */
1161         return index ? 0 : 1;
1162 }
1163
1164 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1165                                 int send_bytes,
1166                                 u32 aux_clock_divider)
1167 {
1168         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1169         struct drm_i915_private *dev_priv =
1170                         to_i915(intel_dig_port->base.base.dev);
1171         u32 precharge, timeout;
1172
1173         if (IS_GEN(dev_priv, 6))
1174                 precharge = 3;
1175         else
1176                 precharge = 5;
1177
1178         if (IS_BROADWELL(dev_priv))
1179                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1180         else
1181                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1182
1183         return DP_AUX_CH_CTL_SEND_BUSY |
1184                DP_AUX_CH_CTL_DONE |
1185                DP_AUX_CH_CTL_INTERRUPT |
1186                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1187                timeout |
1188                DP_AUX_CH_CTL_RECEIVE_ERROR |
1189                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1190                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1191                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1192 }
1193
1194 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1195                                 int send_bytes,
1196                                 u32 unused)
1197 {
1198         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1199         u32 ret;
1200
1201         ret = DP_AUX_CH_CTL_SEND_BUSY |
1202               DP_AUX_CH_CTL_DONE |
1203               DP_AUX_CH_CTL_INTERRUPT |
1204               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1205               DP_AUX_CH_CTL_TIME_OUT_MAX |
1206               DP_AUX_CH_CTL_RECEIVE_ERROR |
1207               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1208               DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1209               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1210
1211         if (intel_dig_port->tc_type == TC_PORT_TBT)
1212                 ret |= DP_AUX_CH_CTL_TBT_IO;
1213
1214         return ret;
1215 }
1216
1217 static int
1218 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1219                   const u8 *send, int send_bytes,
1220                   u8 *recv, int recv_size,
1221                   u32 aux_send_ctl_flags)
1222 {
1223         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1224         struct drm_i915_private *i915 =
1225                         to_i915(intel_dig_port->base.base.dev);
1226         struct intel_uncore *uncore = &i915->uncore;
1227         i915_reg_t ch_ctl, ch_data[5];
1228         u32 aux_clock_divider;
1229         enum intel_display_power_domain aux_domain =
1230                 intel_aux_power_domain(intel_dig_port);
1231         intel_wakeref_t aux_wakeref;
1232         intel_wakeref_t pps_wakeref;
1233         int i, ret, recv_bytes;
1234         int try, clock = 0;
1235         u32 status;
1236         bool vdd;
1237
1238         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1239         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1240                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1241
1242         aux_wakeref = intel_display_power_get(i915, aux_domain);
1243         pps_wakeref = pps_lock(intel_dp);
1244
1245         /*
1246          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1247          * In such cases we want to leave VDD enabled and it's up to upper layers
1248          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1249          * ourselves.
1250          */
1251         vdd = edp_panel_vdd_on(intel_dp);
1252
1253         /* dp aux is extremely sensitive to irq latency, hence request the
1254          * lowest possible wakeup latency and so prevent the cpu from going into
1255          * deep sleep states.
1256          */
1257         pm_qos_update_request(&i915->pm_qos, 0);
1258
1259         intel_dp_check_edp(intel_dp);
1260
1261         /* Try to wait for any previous AUX channel activity */
1262         for (try = 0; try < 3; try++) {
1263                 status = intel_uncore_read_notrace(uncore, ch_ctl);
1264                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1265                         break;
1266                 msleep(1);
1267         }
1268         /* just trace the final value */
1269         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1270
1271         if (try == 3) {
1272                 static u32 last_status = -1;
1273                 const u32 status = intel_uncore_read(uncore, ch_ctl);
1274
1275                 if (status != last_status) {
1276                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
1277                              status);
1278                         last_status = status;
1279                 }
1280
1281                 ret = -EBUSY;
1282                 goto out;
1283         }
1284
1285         /* Only 5 data registers! */
1286         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1287                 ret = -E2BIG;
1288                 goto out;
1289         }
1290
1291         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1292                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1293                                                           send_bytes,
1294                                                           aux_clock_divider);
1295
1296                 send_ctl |= aux_send_ctl_flags;
1297
1298                 /* Must try at least 3 times according to DP spec */
1299                 for (try = 0; try < 5; try++) {
1300                         /* Load the send data into the aux channel data registers */
1301                         for (i = 0; i < send_bytes; i += 4)
1302                                 intel_uncore_write(uncore,
1303                                                    ch_data[i >> 2],
1304                                                    intel_dp_pack_aux(send + i,
1305                                                                      send_bytes - i));
1306
1307                         /* Send the command and wait for it to complete */
1308                         intel_uncore_write(uncore, ch_ctl, send_ctl);
1309
1310                         status = intel_dp_aux_wait_done(intel_dp);
1311
1312                         /* Clear done status and any errors */
1313                         intel_uncore_write(uncore,
1314                                            ch_ctl,
1315                                            status |
1316                                            DP_AUX_CH_CTL_DONE |
1317                                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
1318                                            DP_AUX_CH_CTL_RECEIVE_ERROR);
1319
1320                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1321                          *   400us delay required for errors and timeouts
1322                          *   Timeout errors from the HW already meet this
1323                          *   requirement so skip to next iteration
1324                          */
1325                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1326                                 continue;
1327
1328                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1329                                 usleep_range(400, 500);
1330                                 continue;
1331                         }
1332                         if (status & DP_AUX_CH_CTL_DONE)
1333                                 goto done;
1334                 }
1335         }
1336
1337         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1338                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1339                 ret = -EBUSY;
1340                 goto out;
1341         }
1342
1343 done:
1344         /* Check for timeout or receive error.
1345          * Timeouts occur when the sink is not connected
1346          */
1347         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1348                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1349                 ret = -EIO;
1350                 goto out;
1351         }
1352
1353         /* Timeouts occur when the device isn't connected, so they're
1354          * "normal" -- don't fill the kernel log with these */
1355         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1356                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1357                 ret = -ETIMEDOUT;
1358                 goto out;
1359         }
1360
1361         /* Unload any bytes sent back from the other side */
1362         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1363                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1364
1365         /*
1366          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1367          * We have no idea of what happened so we return -EBUSY so
1368          * drm layer takes care for the necessary retries.
1369          */
1370         if (recv_bytes == 0 || recv_bytes > 20) {
1371                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1372                               recv_bytes);
1373                 ret = -EBUSY;
1374                 goto out;
1375         }
1376
1377         if (recv_bytes > recv_size)
1378                 recv_bytes = recv_size;
1379
1380         for (i = 0; i < recv_bytes; i += 4)
1381                 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1382                                     recv + i, recv_bytes - i);
1383
1384         ret = recv_bytes;
1385 out:
1386         pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1387
1388         if (vdd)
1389                 edp_panel_vdd_off(intel_dp, false);
1390
1391         pps_unlock(intel_dp, pps_wakeref);
1392         intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1393
1394         return ret;
1395 }
1396
1397 #define BARE_ADDRESS_SIZE       3
1398 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1399
1400 static void
1401 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1402                     const struct drm_dp_aux_msg *msg)
1403 {
1404         txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1405         txbuf[1] = (msg->address >> 8) & 0xff;
1406         txbuf[2] = msg->address & 0xff;
1407         txbuf[3] = msg->size - 1;
1408 }
1409
1410 static ssize_t
1411 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1412 {
1413         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1414         u8 txbuf[20], rxbuf[20];
1415         size_t txsize, rxsize;
1416         int ret;
1417
1418         intel_dp_aux_header(txbuf, msg);
1419
1420         switch (msg->request & ~DP_AUX_I2C_MOT) {
1421         case DP_AUX_NATIVE_WRITE:
1422         case DP_AUX_I2C_WRITE:
1423         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1424                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1425                 rxsize = 2; /* 0 or 1 data bytes */
1426
1427                 if (WARN_ON(txsize > 20))
1428                         return -E2BIG;
1429
1430                 WARN_ON(!msg->buffer != !msg->size);
1431
1432                 if (msg->buffer)
1433                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1434
1435                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1436                                         rxbuf, rxsize, 0);
1437                 if (ret > 0) {
1438                         msg->reply = rxbuf[0] >> 4;
1439
1440                         if (ret > 1) {
1441                                 /* Number of bytes written in a short write. */
1442                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1443                         } else {
1444                                 /* Return payload size. */
1445                                 ret = msg->size;
1446                         }
1447                 }
1448                 break;
1449
1450         case DP_AUX_NATIVE_READ:
1451         case DP_AUX_I2C_READ:
1452                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1453                 rxsize = msg->size + 1;
1454
1455                 if (WARN_ON(rxsize > 20))
1456                         return -E2BIG;
1457
1458                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1459                                         rxbuf, rxsize, 0);
1460                 if (ret > 0) {
1461                         msg->reply = rxbuf[0] >> 4;
1462                         /*
1463                          * Assume happy day, and copy the data. The caller is
1464                          * expected to check msg->reply before touching it.
1465                          *
1466                          * Return payload size.
1467                          */
1468                         ret--;
1469                         memcpy(msg->buffer, rxbuf + 1, ret);
1470                 }
1471                 break;
1472
1473         default:
1474                 ret = -EINVAL;
1475                 break;
1476         }
1477
1478         return ret;
1479 }
1480
1481
1482 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1483 {
1484         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1485         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1486         enum aux_ch aux_ch = dig_port->aux_ch;
1487
1488         switch (aux_ch) {
1489         case AUX_CH_B:
1490         case AUX_CH_C:
1491         case AUX_CH_D:
1492                 return DP_AUX_CH_CTL(aux_ch);
1493         default:
1494                 MISSING_CASE(aux_ch);
1495                 return DP_AUX_CH_CTL(AUX_CH_B);
1496         }
1497 }
1498
1499 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1500 {
1501         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1502         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1503         enum aux_ch aux_ch = dig_port->aux_ch;
1504
1505         switch (aux_ch) {
1506         case AUX_CH_B:
1507         case AUX_CH_C:
1508         case AUX_CH_D:
1509                 return DP_AUX_CH_DATA(aux_ch, index);
1510         default:
1511                 MISSING_CASE(aux_ch);
1512                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1513         }
1514 }
1515
1516 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1517 {
1518         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1519         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1520         enum aux_ch aux_ch = dig_port->aux_ch;
1521
1522         switch (aux_ch) {
1523         case AUX_CH_A:
1524                 return DP_AUX_CH_CTL(aux_ch);
1525         case AUX_CH_B:
1526         case AUX_CH_C:
1527         case AUX_CH_D:
1528                 return PCH_DP_AUX_CH_CTL(aux_ch);
1529         default:
1530                 MISSING_CASE(aux_ch);
1531                 return DP_AUX_CH_CTL(AUX_CH_A);
1532         }
1533 }
1534
1535 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1536 {
1537         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1538         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1539         enum aux_ch aux_ch = dig_port->aux_ch;
1540
1541         switch (aux_ch) {
1542         case AUX_CH_A:
1543                 return DP_AUX_CH_DATA(aux_ch, index);
1544         case AUX_CH_B:
1545         case AUX_CH_C:
1546         case AUX_CH_D:
1547                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1548         default:
1549                 MISSING_CASE(aux_ch);
1550                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1551         }
1552 }
1553
1554 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1555 {
1556         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1557         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1558         enum aux_ch aux_ch = dig_port->aux_ch;
1559
1560         switch (aux_ch) {
1561         case AUX_CH_A:
1562         case AUX_CH_B:
1563         case AUX_CH_C:
1564         case AUX_CH_D:
1565         case AUX_CH_E:
1566         case AUX_CH_F:
1567                 return DP_AUX_CH_CTL(aux_ch);
1568         default:
1569                 MISSING_CASE(aux_ch);
1570                 return DP_AUX_CH_CTL(AUX_CH_A);
1571         }
1572 }
1573
1574 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1575 {
1576         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1577         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1578         enum aux_ch aux_ch = dig_port->aux_ch;
1579
1580         switch (aux_ch) {
1581         case AUX_CH_A:
1582         case AUX_CH_B:
1583         case AUX_CH_C:
1584         case AUX_CH_D:
1585         case AUX_CH_E:
1586         case AUX_CH_F:
1587                 return DP_AUX_CH_DATA(aux_ch, index);
1588         default:
1589                 MISSING_CASE(aux_ch);
1590                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1591         }
1592 }
1593
1594 static void
1595 intel_dp_aux_fini(struct intel_dp *intel_dp)
1596 {
1597         kfree(intel_dp->aux.name);
1598 }
1599
1600 static void
1601 intel_dp_aux_init(struct intel_dp *intel_dp)
1602 {
1603         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1604         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1605         struct intel_encoder *encoder = &dig_port->base;
1606
1607         if (INTEL_GEN(dev_priv) >= 9) {
1608                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1609                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1610         } else if (HAS_PCH_SPLIT(dev_priv)) {
1611                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1612                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1613         } else {
1614                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1615                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1616         }
1617
1618         if (INTEL_GEN(dev_priv) >= 9)
1619                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1620         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1621                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1622         else if (HAS_PCH_SPLIT(dev_priv))
1623                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1624         else
1625                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1626
1627         if (INTEL_GEN(dev_priv) >= 9)
1628                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1629         else
1630                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1631
1632         drm_dp_aux_init(&intel_dp->aux);
1633
1634         /* Failure to allocate our preferred name is not critical */
1635         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1636                                        port_name(encoder->port));
1637         intel_dp->aux.transfer = intel_dp_aux_transfer;
1638 }
1639
1640 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1641 {
1642         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1643
1644         return max_rate >= 540000;
1645 }
1646
1647 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1648 {
1649         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1650
1651         return max_rate >= 810000;
1652 }
1653
1654 static void
1655 intel_dp_set_clock(struct intel_encoder *encoder,
1656                    struct intel_crtc_state *pipe_config)
1657 {
1658         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1659         const struct dp_link_dpll *divisor = NULL;
1660         int i, count = 0;
1661
1662         if (IS_G4X(dev_priv)) {
1663                 divisor = g4x_dpll;
1664                 count = ARRAY_SIZE(g4x_dpll);
1665         } else if (HAS_PCH_SPLIT(dev_priv)) {
1666                 divisor = pch_dpll;
1667                 count = ARRAY_SIZE(pch_dpll);
1668         } else if (IS_CHERRYVIEW(dev_priv)) {
1669                 divisor = chv_dpll;
1670                 count = ARRAY_SIZE(chv_dpll);
1671         } else if (IS_VALLEYVIEW(dev_priv)) {
1672                 divisor = vlv_dpll;
1673                 count = ARRAY_SIZE(vlv_dpll);
1674         }
1675
1676         if (divisor && count) {
1677                 for (i = 0; i < count; i++) {
1678                         if (pipe_config->port_clock == divisor[i].clock) {
1679                                 pipe_config->dpll = divisor[i].dpll;
1680                                 pipe_config->clock_set = true;
1681                                 break;
1682                         }
1683                 }
1684         }
1685 }
1686
1687 static void snprintf_int_array(char *str, size_t len,
1688                                const int *array, int nelem)
1689 {
1690         int i;
1691
1692         str[0] = '\0';
1693
1694         for (i = 0; i < nelem; i++) {
1695                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1696                 if (r >= len)
1697                         return;
1698                 str += r;
1699                 len -= r;
1700         }
1701 }
1702
1703 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1704 {
1705         char str[128]; /* FIXME: too big for stack? */
1706
1707         if ((drm_debug & DRM_UT_KMS) == 0)
1708                 return;
1709
1710         snprintf_int_array(str, sizeof(str),
1711                            intel_dp->source_rates, intel_dp->num_source_rates);
1712         DRM_DEBUG_KMS("source rates: %s\n", str);
1713
1714         snprintf_int_array(str, sizeof(str),
1715                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1716         DRM_DEBUG_KMS("sink rates: %s\n", str);
1717
1718         snprintf_int_array(str, sizeof(str),
1719                            intel_dp->common_rates, intel_dp->num_common_rates);
1720         DRM_DEBUG_KMS("common rates: %s\n", str);
1721 }
1722
1723 int
1724 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1725 {
1726         int len;
1727
1728         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1729         if (WARN_ON(len <= 0))
1730                 return 162000;
1731
1732         return intel_dp->common_rates[len - 1];
1733 }
1734
1735 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1736 {
1737         int i = intel_dp_rate_index(intel_dp->sink_rates,
1738                                     intel_dp->num_sink_rates, rate);
1739
1740         if (WARN_ON(i < 0))
1741                 i = 0;
1742
1743         return i;
1744 }
1745
1746 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1747                            u8 *link_bw, u8 *rate_select)
1748 {
1749         /* eDP 1.4 rate select method. */
1750         if (intel_dp->use_rate_select) {
1751                 *link_bw = 0;
1752                 *rate_select =
1753                         intel_dp_rate_select(intel_dp, port_clock);
1754         } else {
1755                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1756                 *rate_select = 0;
1757         }
1758 }
1759
1760 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1761                                          const struct intel_crtc_state *pipe_config)
1762 {
1763         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1764
1765         return INTEL_GEN(dev_priv) >= 11 &&
1766                 pipe_config->cpu_transcoder != TRANSCODER_A;
1767 }
1768
1769 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1770                                   const struct intel_crtc_state *pipe_config)
1771 {
1772         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1773                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1774 }
1775
1776 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1777                                          const struct intel_crtc_state *pipe_config)
1778 {
1779         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1780
1781         return INTEL_GEN(dev_priv) >= 10 &&
1782                 pipe_config->cpu_transcoder != TRANSCODER_A;
1783 }
1784
1785 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1786                                   const struct intel_crtc_state *pipe_config)
1787 {
1788         if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1789                 return false;
1790
1791         return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1792                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1793 }
1794
1795 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1796                                 struct intel_crtc_state *pipe_config)
1797 {
1798         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1799         struct intel_connector *intel_connector = intel_dp->attached_connector;
1800         int bpp, bpc;
1801
1802         bpp = pipe_config->pipe_bpp;
1803         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1804
1805         if (bpc > 0)
1806                 bpp = min(bpp, 3*bpc);
1807
1808         if (intel_dp_is_edp(intel_dp)) {
1809                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1810                 if (intel_connector->base.display_info.bpc == 0 &&
1811                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1812                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1813                                       dev_priv->vbt.edp.bpp);
1814                         bpp = dev_priv->vbt.edp.bpp;
1815                 }
1816         }
1817
1818         return bpp;
1819 }
1820
1821 /* Adjust link config limits based on compliance test requests. */
1822 void
1823 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1824                                   struct intel_crtc_state *pipe_config,
1825                                   struct link_config_limits *limits)
1826 {
1827         /* For DP Compliance we override the computed bpp for the pipe */
1828         if (intel_dp->compliance.test_data.bpc != 0) {
1829                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1830
1831                 limits->min_bpp = limits->max_bpp = bpp;
1832                 pipe_config->dither_force_disable = bpp == 6 * 3;
1833
1834                 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1835         }
1836
1837         /* Use values requested by Compliance Test Request */
1838         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1839                 int index;
1840
1841                 /* Validate the compliance test data since max values
1842                  * might have changed due to link train fallback.
1843                  */
1844                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1845                                                intel_dp->compliance.test_lane_count)) {
1846                         index = intel_dp_rate_index(intel_dp->common_rates,
1847                                                     intel_dp->num_common_rates,
1848                                                     intel_dp->compliance.test_link_rate);
1849                         if (index >= 0)
1850                                 limits->min_clock = limits->max_clock = index;
1851                         limits->min_lane_count = limits->max_lane_count =
1852                                 intel_dp->compliance.test_lane_count;
1853                 }
1854         }
1855 }
1856
1857 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1858 {
1859         /*
1860          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1861          * format of the number of bytes per pixel will be half the number
1862          * of bytes of RGB pixel.
1863          */
1864         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1865                 bpp /= 2;
1866
1867         return bpp;
1868 }
1869
1870 /* Optimize link config in order: max bpp, min clock, min lanes */
1871 static int
1872 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1873                                   struct intel_crtc_state *pipe_config,
1874                                   const struct link_config_limits *limits)
1875 {
1876         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1877         int bpp, clock, lane_count;
1878         int mode_rate, link_clock, link_avail;
1879
1880         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1881                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1882                                                    bpp);
1883
1884                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1885                         for (lane_count = limits->min_lane_count;
1886                              lane_count <= limits->max_lane_count;
1887                              lane_count <<= 1) {
1888                                 link_clock = intel_dp->common_rates[clock];
1889                                 link_avail = intel_dp_max_data_rate(link_clock,
1890                                                                     lane_count);
1891
1892                                 if (mode_rate <= link_avail) {
1893                                         pipe_config->lane_count = lane_count;
1894                                         pipe_config->pipe_bpp = bpp;
1895                                         pipe_config->port_clock = link_clock;
1896
1897                                         return 0;
1898                                 }
1899                         }
1900                 }
1901         }
1902
1903         return -EINVAL;
1904 }
1905
1906 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1907 {
1908         int i, num_bpc;
1909         u8 dsc_bpc[3] = {0};
1910
1911         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1912                                                        dsc_bpc);
1913         for (i = 0; i < num_bpc; i++) {
1914                 if (dsc_max_bpc >= dsc_bpc[i])
1915                         return dsc_bpc[i] * 3;
1916         }
1917
1918         return 0;
1919 }
1920
1921 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1922                                        struct intel_crtc_state *pipe_config,
1923                                        struct drm_connector_state *conn_state,
1924                                        struct link_config_limits *limits)
1925 {
1926         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1927         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1928         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1929         u8 dsc_max_bpc;
1930         int pipe_bpp;
1931         int ret;
1932
1933         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1934                 intel_dp_supports_fec(intel_dp, pipe_config);
1935
1936         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1937                 return -EINVAL;
1938
1939         dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1940                             conn_state->max_requested_bpc);
1941
1942         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1943         if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1944                 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
1945                 return -EINVAL;
1946         }
1947
1948         /*
1949          * For now enable DSC for max bpp, max link rate, max lane count.
1950          * Optimize this later for the minimum possible link rate/lane count
1951          * with DSC enabled for the requested mode.
1952          */
1953         pipe_config->pipe_bpp = pipe_bpp;
1954         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1955         pipe_config->lane_count = limits->max_lane_count;
1956
1957         if (intel_dp_is_edp(intel_dp)) {
1958                 pipe_config->dsc_params.compressed_bpp =
1959                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1960                               pipe_config->pipe_bpp);
1961                 pipe_config->dsc_params.slice_count =
1962                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1963                                                         true);
1964         } else {
1965                 u16 dsc_max_output_bpp;
1966                 u8 dsc_dp_slice_count;
1967
1968                 dsc_max_output_bpp =
1969                         intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1970                                                     pipe_config->lane_count,
1971                                                     adjusted_mode->crtc_clock,
1972                                                     adjusted_mode->crtc_hdisplay);
1973                 dsc_dp_slice_count =
1974                         intel_dp_dsc_get_slice_count(intel_dp,
1975                                                      adjusted_mode->crtc_clock,
1976                                                      adjusted_mode->crtc_hdisplay);
1977                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1978                         DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
1979                         return -EINVAL;
1980                 }
1981                 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1982                                                                dsc_max_output_bpp >> 4,
1983                                                                pipe_config->pipe_bpp);
1984                 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1985         }
1986         /*
1987          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1988          * is greater than the maximum Cdclock and if slice count is even
1989          * then we need to use 2 VDSC instances.
1990          */
1991         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1992                 if (pipe_config->dsc_params.slice_count > 1) {
1993                         pipe_config->dsc_params.dsc_split = true;
1994                 } else {
1995                         DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
1996                         return -EINVAL;
1997                 }
1998         }
1999
2000         ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
2001         if (ret < 0) {
2002                 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
2003                               "Compressed BPP = %d\n",
2004                               pipe_config->pipe_bpp,
2005                               pipe_config->dsc_params.compressed_bpp);
2006                 return ret;
2007         }
2008
2009         pipe_config->dsc_params.compression_enable = true;
2010         DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2011                       "Compressed Bpp = %d Slice Count = %d\n",
2012                       pipe_config->pipe_bpp,
2013                       pipe_config->dsc_params.compressed_bpp,
2014                       pipe_config->dsc_params.slice_count);
2015
2016         return 0;
2017 }
2018
2019 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2020 {
2021         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2022                 return 6 * 3;
2023         else
2024                 return 8 * 3;
2025 }
2026
2027 static int
2028 intel_dp_compute_link_config(struct intel_encoder *encoder,
2029                              struct intel_crtc_state *pipe_config,
2030                              struct drm_connector_state *conn_state)
2031 {
2032         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2033         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2034         struct link_config_limits limits;
2035         int common_len;
2036         int ret;
2037
2038         common_len = intel_dp_common_len_rate_limit(intel_dp,
2039                                                     intel_dp->max_link_rate);
2040
2041         /* No common link rates between source and sink */
2042         WARN_ON(common_len <= 0);
2043
2044         limits.min_clock = 0;
2045         limits.max_clock = common_len - 1;
2046
2047         limits.min_lane_count = 1;
2048         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2049
2050         limits.min_bpp = intel_dp_min_bpp(pipe_config);
2051         limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2052
2053         if (intel_dp_is_edp(intel_dp)) {
2054                 /*
2055                  * Use the maximum clock and number of lanes the eDP panel
2056                  * advertizes being capable of. The panels are generally
2057                  * designed to support only a single clock and lane
2058                  * configuration, and typically these values correspond to the
2059                  * native resolution of the panel.
2060                  */
2061                 limits.min_lane_count = limits.max_lane_count;
2062                 limits.min_clock = limits.max_clock;
2063         }
2064
2065         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2066
2067         DRM_DEBUG_KMS("DP link computation with max lane count %i "
2068                       "max rate %d max bpp %d pixel clock %iKHz\n",
2069                       limits.max_lane_count,
2070                       intel_dp->common_rates[limits.max_clock],
2071                       limits.max_bpp, adjusted_mode->crtc_clock);
2072
2073         /*
2074          * Optimize for slow and wide. This is the place to add alternative
2075          * optimization policy.
2076          */
2077         ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2078
2079         /* enable compression if the mode doesn't fit available BW */
2080         DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2081         if (ret || intel_dp->force_dsc_en) {
2082                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2083                                                   conn_state, &limits);
2084                 if (ret < 0)
2085                         return ret;
2086         }
2087
2088         if (pipe_config->dsc_params.compression_enable) {
2089                 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2090                               pipe_config->lane_count, pipe_config->port_clock,
2091                               pipe_config->pipe_bpp,
2092                               pipe_config->dsc_params.compressed_bpp);
2093
2094                 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2095                               intel_dp_link_required(adjusted_mode->crtc_clock,
2096                                                      pipe_config->dsc_params.compressed_bpp),
2097                               intel_dp_max_data_rate(pipe_config->port_clock,
2098                                                      pipe_config->lane_count));
2099         } else {
2100                 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2101                               pipe_config->lane_count, pipe_config->port_clock,
2102                               pipe_config->pipe_bpp);
2103
2104                 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2105                               intel_dp_link_required(adjusted_mode->crtc_clock,
2106                                                      pipe_config->pipe_bpp),
2107                               intel_dp_max_data_rate(pipe_config->port_clock,
2108                                                      pipe_config->lane_count));
2109         }
2110         return 0;
2111 }
2112
2113 static int
2114 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2115                          struct drm_connector *connector,
2116                          struct intel_crtc_state *crtc_state)
2117 {
2118         const struct drm_display_info *info = &connector->display_info;
2119         const struct drm_display_mode *adjusted_mode =
2120                 &crtc_state->base.adjusted_mode;
2121         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2122         int ret;
2123
2124         if (!drm_mode_is_420_only(info, adjusted_mode) ||
2125             !intel_dp_get_colorimetry_status(intel_dp) ||
2126             !connector->ycbcr_420_allowed)
2127                 return 0;
2128
2129         crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2130
2131         /* YCBCR 420 output conversion needs a scaler */
2132         ret = skl_update_scaler_crtc(crtc_state);
2133         if (ret) {
2134                 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2135                 return ret;
2136         }
2137
2138         intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2139
2140         return 0;
2141 }
2142
2143 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2144                                   const struct drm_connector_state *conn_state)
2145 {
2146         const struct intel_digital_connector_state *intel_conn_state =
2147                 to_intel_digital_connector_state(conn_state);
2148         const struct drm_display_mode *adjusted_mode =
2149                 &crtc_state->base.adjusted_mode;
2150
2151         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2152                 /*
2153                  * See:
2154                  * CEA-861-E - 5.1 Default Encoding Parameters
2155                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2156                  */
2157                 return crtc_state->pipe_bpp != 18 &&
2158                         drm_default_rgb_quant_range(adjusted_mode) ==
2159                         HDMI_QUANTIZATION_RANGE_LIMITED;
2160         } else {
2161                 return intel_conn_state->broadcast_rgb ==
2162                         INTEL_BROADCAST_RGB_LIMITED;
2163         }
2164 }
2165
2166 int
2167 intel_dp_compute_config(struct intel_encoder *encoder,
2168                         struct intel_crtc_state *pipe_config,
2169                         struct drm_connector_state *conn_state)
2170 {
2171         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2172         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2173         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2174         struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2175         enum port port = encoder->port;
2176         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2177         struct intel_connector *intel_connector = intel_dp->attached_connector;
2178         struct intel_digital_connector_state *intel_conn_state =
2179                 to_intel_digital_connector_state(conn_state);
2180         bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2181                                            DP_DPCD_QUIRK_CONSTANT_N);
2182         int ret = 0, output_bpp;
2183
2184         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2185                 pipe_config->has_pch_encoder = true;
2186
2187         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2188         if (lspcon->active)
2189                 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2190         else
2191                 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2192                                                pipe_config);
2193
2194         if (ret)
2195                 return ret;
2196
2197         pipe_config->has_drrs = false;
2198         if (IS_G4X(dev_priv) || port == PORT_A)
2199                 pipe_config->has_audio = false;
2200         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2201                 pipe_config->has_audio = intel_dp->has_audio;
2202         else
2203                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2204
2205         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2206                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2207                                        adjusted_mode);
2208
2209                 if (INTEL_GEN(dev_priv) >= 9) {
2210                         ret = skl_update_scaler_crtc(pipe_config);
2211                         if (ret)
2212                                 return ret;
2213                 }
2214
2215                 if (HAS_GMCH(dev_priv))
2216                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
2217                                                  conn_state->scaling_mode);
2218                 else
2219                         intel_pch_panel_fitting(intel_crtc, pipe_config,
2220                                                 conn_state->scaling_mode);
2221         }
2222
2223         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2224                 return -EINVAL;
2225
2226         if (HAS_GMCH(dev_priv) &&
2227             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2228                 return -EINVAL;
2229
2230         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2231                 return -EINVAL;
2232
2233         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2234         if (ret < 0)
2235                 return ret;
2236
2237         pipe_config->limited_color_range =
2238                 intel_dp_limited_color_range(pipe_config, conn_state);
2239
2240         if (pipe_config->dsc_params.compression_enable)
2241                 output_bpp = pipe_config->dsc_params.compressed_bpp;
2242         else
2243                 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2244
2245         intel_link_compute_m_n(output_bpp,
2246                                pipe_config->lane_count,
2247                                adjusted_mode->crtc_clock,
2248                                pipe_config->port_clock,
2249                                &pipe_config->dp_m_n,
2250                                constant_n);
2251
2252         if (intel_connector->panel.downclock_mode != NULL &&
2253                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2254                         pipe_config->has_drrs = true;
2255                         intel_link_compute_m_n(output_bpp,
2256                                                pipe_config->lane_count,
2257                                                intel_connector->panel.downclock_mode->clock,
2258                                                pipe_config->port_clock,
2259                                                &pipe_config->dp_m2_n2,
2260                                                constant_n);
2261         }
2262
2263         if (!HAS_DDI(dev_priv))
2264                 intel_dp_set_clock(encoder, pipe_config);
2265
2266         intel_psr_compute_config(intel_dp, pipe_config);
2267
2268         return 0;
2269 }
2270
2271 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2272                               int link_rate, u8 lane_count,
2273                               bool link_mst)
2274 {
2275         intel_dp->link_trained = false;
2276         intel_dp->link_rate = link_rate;
2277         intel_dp->lane_count = lane_count;
2278         intel_dp->link_mst = link_mst;
2279 }
2280
2281 static void intel_dp_prepare(struct intel_encoder *encoder,
2282                              const struct intel_crtc_state *pipe_config)
2283 {
2284         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2285         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2286         enum port port = encoder->port;
2287         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2288         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2289
2290         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2291                                  pipe_config->lane_count,
2292                                  intel_crtc_has_type(pipe_config,
2293                                                      INTEL_OUTPUT_DP_MST));
2294
2295         /*
2296          * There are four kinds of DP registers:
2297          *
2298          *      IBX PCH
2299          *      SNB CPU
2300          *      IVB CPU
2301          *      CPT PCH
2302          *
2303          * IBX PCH and CPU are the same for almost everything,
2304          * except that the CPU DP PLL is configured in this
2305          * register
2306          *
2307          * CPT PCH is quite different, having many bits moved
2308          * to the TRANS_DP_CTL register instead. That
2309          * configuration happens (oddly) in ironlake_pch_enable
2310          */
2311
2312         /* Preserve the BIOS-computed detected bit. This is
2313          * supposed to be read-only.
2314          */
2315         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2316
2317         /* Handle DP bits in common between all three register formats */
2318         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2319         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2320
2321         /* Split out the IBX/CPU vs CPT settings */
2322
2323         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2324                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2325                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2326                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2327                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2328                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2329
2330                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2331                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2332
2333                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2334         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2335                 u32 trans_dp;
2336
2337                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2338
2339                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2340                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2341                         trans_dp |= TRANS_DP_ENH_FRAMING;
2342                 else
2343                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
2344                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2345         } else {
2346                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2347                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
2348
2349                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2350                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2351                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2352                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2353                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2354
2355                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2356                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2357
2358                 if (IS_CHERRYVIEW(dev_priv))
2359                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2360                 else
2361                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2362         }
2363 }
2364
2365 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2366 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2367
2368 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2369 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
2370
2371 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2372 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2373
2374 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2375
2376 static void wait_panel_status(struct intel_dp *intel_dp,
2377                                        u32 mask,
2378                                        u32 value)
2379 {
2380         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2381         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2382
2383         lockdep_assert_held(&dev_priv->pps_mutex);
2384
2385         intel_pps_verify_state(intel_dp);
2386
2387         pp_stat_reg = _pp_stat_reg(intel_dp);
2388         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2389
2390         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2391                         mask, value,
2392                         I915_READ(pp_stat_reg),
2393                         I915_READ(pp_ctrl_reg));
2394
2395         if (intel_wait_for_register(&dev_priv->uncore,
2396                                     pp_stat_reg, mask, value,
2397                                     5000))
2398                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2399                                 I915_READ(pp_stat_reg),
2400                                 I915_READ(pp_ctrl_reg));
2401
2402         DRM_DEBUG_KMS("Wait complete\n");
2403 }
2404
2405 static void wait_panel_on(struct intel_dp *intel_dp)
2406 {
2407         DRM_DEBUG_KMS("Wait for panel power on\n");
2408         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2409 }
2410
2411 static void wait_panel_off(struct intel_dp *intel_dp)
2412 {
2413         DRM_DEBUG_KMS("Wait for panel power off time\n");
2414         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2415 }
2416
2417 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2418 {
2419         ktime_t panel_power_on_time;
2420         s64 panel_power_off_duration;
2421
2422         DRM_DEBUG_KMS("Wait for panel power cycle\n");
2423
2424         /* take the difference of currrent time and panel power off time
2425          * and then make panel wait for t11_t12 if needed. */
2426         panel_power_on_time = ktime_get_boottime();
2427         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2428
2429         /* When we disable the VDD override bit last we have to do the manual
2430          * wait. */
2431         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2432                 wait_remaining_ms_from_jiffies(jiffies,
2433                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2434
2435         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2436 }
2437
2438 static void wait_backlight_on(struct intel_dp *intel_dp)
2439 {
2440         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2441                                        intel_dp->backlight_on_delay);
2442 }
2443
2444 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2445 {
2446         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2447                                        intel_dp->backlight_off_delay);
2448 }
2449
2450 /* Read the current pp_control value, unlocking the register if it
2451  * is locked
2452  */
2453
2454 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2455 {
2456         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2457         u32 control;
2458
2459         lockdep_assert_held(&dev_priv->pps_mutex);
2460
2461         control = I915_READ(_pp_ctrl_reg(intel_dp));
2462         if (WARN_ON(!HAS_DDI(dev_priv) &&
2463                     (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2464                 control &= ~PANEL_UNLOCK_MASK;
2465                 control |= PANEL_UNLOCK_REGS;
2466         }
2467         return control;
2468 }
2469
2470 /*
2471  * Must be paired with edp_panel_vdd_off().
2472  * Must hold pps_mutex around the whole on/off sequence.
2473  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2474  */
2475 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2476 {
2477         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2478         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2479         u32 pp;
2480         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2481         bool need_to_disable = !intel_dp->want_panel_vdd;
2482
2483         lockdep_assert_held(&dev_priv->pps_mutex);
2484
2485         if (!intel_dp_is_edp(intel_dp))
2486                 return false;
2487
2488         cancel_delayed_work(&intel_dp->panel_vdd_work);
2489         intel_dp->want_panel_vdd = true;
2490
2491         if (edp_have_panel_vdd(intel_dp))
2492                 return need_to_disable;
2493
2494         intel_display_power_get(dev_priv,
2495                                 intel_aux_power_domain(intel_dig_port));
2496
2497         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2498                       port_name(intel_dig_port->base.port));
2499
2500         if (!edp_have_panel_power(intel_dp))
2501                 wait_panel_power_cycle(intel_dp);
2502
2503         pp = ironlake_get_pp_control(intel_dp);
2504         pp |= EDP_FORCE_VDD;
2505
2506         pp_stat_reg = _pp_stat_reg(intel_dp);
2507         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2508
2509         I915_WRITE(pp_ctrl_reg, pp);
2510         POSTING_READ(pp_ctrl_reg);
2511         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2512                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2513         /*
2514          * If the panel wasn't on, delay before accessing aux channel
2515          */
2516         if (!edp_have_panel_power(intel_dp)) {
2517                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2518                               port_name(intel_dig_port->base.port));
2519                 msleep(intel_dp->panel_power_up_delay);
2520         }
2521
2522         return need_to_disable;
2523 }
2524
2525 /*
2526  * Must be paired with intel_edp_panel_vdd_off() or
2527  * intel_edp_panel_off().
2528  * Nested calls to these functions are not allowed since
2529  * we drop the lock. Caller must use some higher level
2530  * locking to prevent nested calls from other threads.
2531  */
2532 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2533 {
2534         intel_wakeref_t wakeref;
2535         bool vdd;
2536
2537         if (!intel_dp_is_edp(intel_dp))
2538                 return;
2539
2540         vdd = false;
2541         with_pps_lock(intel_dp, wakeref)
2542                 vdd = edp_panel_vdd_on(intel_dp);
2543         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2544              port_name(dp_to_dig_port(intel_dp)->base.port));
2545 }
2546
2547 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2548 {
2549         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2550         struct intel_digital_port *intel_dig_port =
2551                 dp_to_dig_port(intel_dp);
2552         u32 pp;
2553         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2554
2555         lockdep_assert_held(&dev_priv->pps_mutex);
2556
2557         WARN_ON(intel_dp->want_panel_vdd);
2558
2559         if (!edp_have_panel_vdd(intel_dp))
2560                 return;
2561
2562         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2563                       port_name(intel_dig_port->base.port));
2564
2565         pp = ironlake_get_pp_control(intel_dp);
2566         pp &= ~EDP_FORCE_VDD;
2567
2568         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2569         pp_stat_reg = _pp_stat_reg(intel_dp);
2570
2571         I915_WRITE(pp_ctrl_reg, pp);
2572         POSTING_READ(pp_ctrl_reg);
2573
2574         /* Make sure sequencer is idle before allowing subsequent activity */
2575         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2576         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2577
2578         if ((pp & PANEL_POWER_ON) == 0)
2579                 intel_dp->panel_power_off_time = ktime_get_boottime();
2580
2581         intel_display_power_put_unchecked(dev_priv,
2582                                           intel_aux_power_domain(intel_dig_port));
2583 }
2584
2585 static void edp_panel_vdd_work(struct work_struct *__work)
2586 {
2587         struct intel_dp *intel_dp =
2588                 container_of(to_delayed_work(__work),
2589                              struct intel_dp, panel_vdd_work);
2590         intel_wakeref_t wakeref;
2591
2592         with_pps_lock(intel_dp, wakeref) {
2593                 if (!intel_dp->want_panel_vdd)
2594                         edp_panel_vdd_off_sync(intel_dp);
2595         }
2596 }
2597
2598 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2599 {
2600         unsigned long delay;
2601
2602         /*
2603          * Queue the timer to fire a long time from now (relative to the power
2604          * down delay) to keep the panel power up across a sequence of
2605          * operations.
2606          */
2607         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2608         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2609 }
2610
2611 /*
2612  * Must be paired with edp_panel_vdd_on().
2613  * Must hold pps_mutex around the whole on/off sequence.
2614  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2615  */
2616 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2617 {
2618         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2619
2620         lockdep_assert_held(&dev_priv->pps_mutex);
2621
2622         if (!intel_dp_is_edp(intel_dp))
2623                 return;
2624
2625         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2626              port_name(dp_to_dig_port(intel_dp)->base.port));
2627
2628         intel_dp->want_panel_vdd = false;
2629
2630         if (sync)
2631                 edp_panel_vdd_off_sync(intel_dp);
2632         else
2633                 edp_panel_vdd_schedule_off(intel_dp);
2634 }
2635
2636 static void edp_panel_on(struct intel_dp *intel_dp)
2637 {
2638         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2639         u32 pp;
2640         i915_reg_t pp_ctrl_reg;
2641
2642         lockdep_assert_held(&dev_priv->pps_mutex);
2643
2644         if (!intel_dp_is_edp(intel_dp))
2645                 return;
2646
2647         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2648                       port_name(dp_to_dig_port(intel_dp)->base.port));
2649
2650         if (WARN(edp_have_panel_power(intel_dp),
2651                  "eDP port %c panel power already on\n",
2652                  port_name(dp_to_dig_port(intel_dp)->base.port)))
2653                 return;
2654
2655         wait_panel_power_cycle(intel_dp);
2656
2657         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2658         pp = ironlake_get_pp_control(intel_dp);
2659         if (IS_GEN(dev_priv, 5)) {
2660                 /* ILK workaround: disable reset around power sequence */
2661                 pp &= ~PANEL_POWER_RESET;
2662                 I915_WRITE(pp_ctrl_reg, pp);
2663                 POSTING_READ(pp_ctrl_reg);
2664         }
2665
2666         pp |= PANEL_POWER_ON;
2667         if (!IS_GEN(dev_priv, 5))
2668                 pp |= PANEL_POWER_RESET;
2669
2670         I915_WRITE(pp_ctrl_reg, pp);
2671         POSTING_READ(pp_ctrl_reg);
2672
2673         wait_panel_on(intel_dp);
2674         intel_dp->last_power_on = jiffies;
2675
2676         if (IS_GEN(dev_priv, 5)) {
2677                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2678                 I915_WRITE(pp_ctrl_reg, pp);
2679                 POSTING_READ(pp_ctrl_reg);
2680         }
2681 }
2682
2683 void intel_edp_panel_on(struct intel_dp *intel_dp)
2684 {
2685         intel_wakeref_t wakeref;
2686
2687         if (!intel_dp_is_edp(intel_dp))
2688                 return;
2689
2690         with_pps_lock(intel_dp, wakeref)
2691                 edp_panel_on(intel_dp);
2692 }
2693
2694
2695 static void edp_panel_off(struct intel_dp *intel_dp)
2696 {
2697         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2698         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2699         u32 pp;
2700         i915_reg_t pp_ctrl_reg;
2701
2702         lockdep_assert_held(&dev_priv->pps_mutex);
2703
2704         if (!intel_dp_is_edp(intel_dp))
2705                 return;
2706
2707         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2708                       port_name(dig_port->base.port));
2709
2710         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2711              port_name(dig_port->base.port));
2712
2713         pp = ironlake_get_pp_control(intel_dp);
2714         /* We need to switch off panel power _and_ force vdd, for otherwise some
2715          * panels get very unhappy and cease to work. */
2716         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2717                 EDP_BLC_ENABLE);
2718
2719         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2720
2721         intel_dp->want_panel_vdd = false;
2722
2723         I915_WRITE(pp_ctrl_reg, pp);
2724         POSTING_READ(pp_ctrl_reg);
2725
2726         wait_panel_off(intel_dp);
2727         intel_dp->panel_power_off_time = ktime_get_boottime();
2728
2729         /* We got a reference when we enabled the VDD. */
2730         intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2731 }
2732
2733 void intel_edp_panel_off(struct intel_dp *intel_dp)
2734 {
2735         intel_wakeref_t wakeref;
2736
2737         if (!intel_dp_is_edp(intel_dp))
2738                 return;
2739
2740         with_pps_lock(intel_dp, wakeref)
2741                 edp_panel_off(intel_dp);
2742 }
2743
2744 /* Enable backlight in the panel power control. */
2745 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2746 {
2747         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2748         intel_wakeref_t wakeref;
2749
2750         /*
2751          * If we enable the backlight right away following a panel power
2752          * on, we may see slight flicker as the panel syncs with the eDP
2753          * link.  So delay a bit to make sure the image is solid before
2754          * allowing it to appear.
2755          */
2756         wait_backlight_on(intel_dp);
2757
2758         with_pps_lock(intel_dp, wakeref) {
2759                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2760                 u32 pp;
2761
2762                 pp = ironlake_get_pp_control(intel_dp);
2763                 pp |= EDP_BLC_ENABLE;
2764
2765                 I915_WRITE(pp_ctrl_reg, pp);
2766                 POSTING_READ(pp_ctrl_reg);
2767         }
2768 }
2769
2770 /* Enable backlight PWM and backlight PP control. */
2771 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2772                             const struct drm_connector_state *conn_state)
2773 {
2774         struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2775
2776         if (!intel_dp_is_edp(intel_dp))
2777                 return;
2778
2779         DRM_DEBUG_KMS("\n");
2780
2781         intel_panel_enable_backlight(crtc_state, conn_state);
2782         _intel_edp_backlight_on(intel_dp);
2783 }
2784
2785 /* Disable backlight in the panel power control. */
2786 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2787 {
2788         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2789         intel_wakeref_t wakeref;
2790
2791         if (!intel_dp_is_edp(intel_dp))
2792                 return;
2793
2794         with_pps_lock(intel_dp, wakeref) {
2795                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2796                 u32 pp;
2797
2798                 pp = ironlake_get_pp_control(intel_dp);
2799                 pp &= ~EDP_BLC_ENABLE;
2800
2801                 I915_WRITE(pp_ctrl_reg, pp);
2802                 POSTING_READ(pp_ctrl_reg);
2803         }
2804
2805         intel_dp->last_backlight_off = jiffies;
2806         edp_wait_backlight_off(intel_dp);
2807 }
2808
2809 /* Disable backlight PP control and backlight PWM. */
2810 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2811 {
2812         struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2813
2814         if (!intel_dp_is_edp(intel_dp))
2815                 return;
2816
2817         DRM_DEBUG_KMS("\n");
2818
2819         _intel_edp_backlight_off(intel_dp);
2820         intel_panel_disable_backlight(old_conn_state);
2821 }
2822
2823 /*
2824  * Hook for controlling the panel power control backlight through the bl_power
2825  * sysfs attribute. Take care to handle multiple calls.
2826  */
2827 static void intel_edp_backlight_power(struct intel_connector *connector,
2828                                       bool enable)
2829 {
2830         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2831         intel_wakeref_t wakeref;
2832         bool is_enabled;
2833
2834         is_enabled = false;
2835         with_pps_lock(intel_dp, wakeref)
2836                 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2837         if (is_enabled == enable)
2838                 return;
2839
2840         DRM_DEBUG_KMS("panel power control backlight %s\n",
2841                       enable ? "enable" : "disable");
2842
2843         if (enable)
2844                 _intel_edp_backlight_on(intel_dp);
2845         else
2846                 _intel_edp_backlight_off(intel_dp);
2847 }
2848
2849 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2850 {
2851         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2852         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2853         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2854
2855         I915_STATE_WARN(cur_state != state,
2856                         "DP port %c state assertion failure (expected %s, current %s)\n",
2857                         port_name(dig_port->base.port),
2858                         onoff(state), onoff(cur_state));
2859 }
2860 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2861
2862 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2863 {
2864         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2865
2866         I915_STATE_WARN(cur_state != state,
2867                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2868                         onoff(state), onoff(cur_state));
2869 }
2870 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2871 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2872
2873 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2874                                 const struct intel_crtc_state *pipe_config)
2875 {
2876         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2877         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2878
2879         assert_pipe_disabled(dev_priv, crtc->pipe);
2880         assert_dp_port_disabled(intel_dp);
2881         assert_edp_pll_disabled(dev_priv);
2882
2883         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2884                       pipe_config->port_clock);
2885
2886         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2887
2888         if (pipe_config->port_clock == 162000)
2889                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2890         else
2891                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2892
2893         I915_WRITE(DP_A, intel_dp->DP);
2894         POSTING_READ(DP_A);
2895         udelay(500);
2896
2897         /*
2898          * [DevILK] Work around required when enabling DP PLL
2899          * while a pipe is enabled going to FDI:
2900          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2901          * 2. Program DP PLL enable
2902          */
2903         if (IS_GEN(dev_priv, 5))
2904                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2905
2906         intel_dp->DP |= DP_PLL_ENABLE;
2907
2908         I915_WRITE(DP_A, intel_dp->DP);
2909         POSTING_READ(DP_A);
2910         udelay(200);
2911 }
2912
2913 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2914                                  const struct intel_crtc_state *old_crtc_state)
2915 {
2916         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2917         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2918
2919         assert_pipe_disabled(dev_priv, crtc->pipe);
2920         assert_dp_port_disabled(intel_dp);
2921         assert_edp_pll_enabled(dev_priv);
2922
2923         DRM_DEBUG_KMS("disabling eDP PLL\n");
2924
2925         intel_dp->DP &= ~DP_PLL_ENABLE;
2926
2927         I915_WRITE(DP_A, intel_dp->DP);
2928         POSTING_READ(DP_A);
2929         udelay(200);
2930 }
2931
2932 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2933 {
2934         /*
2935          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2936          * be capable of signalling downstream hpd with a long pulse.
2937          * Whether or not that means D3 is safe to use is not clear,
2938          * but let's assume so until proven otherwise.
2939          *
2940          * FIXME should really check all downstream ports...
2941          */
2942         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2943                 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2944                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2945 }
2946
2947 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2948                                            const struct intel_crtc_state *crtc_state,
2949                                            bool enable)
2950 {
2951         int ret;
2952
2953         if (!crtc_state->dsc_params.compression_enable)
2954                 return;
2955
2956         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2957                                  enable ? DP_DECOMPRESSION_EN : 0);
2958         if (ret < 0)
2959                 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2960                               enable ? "enable" : "disable");
2961 }
2962
2963 /* If the sink supports it, try to set the power state appropriately */
2964 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2965 {
2966         int ret, i;
2967
2968         /* Should have a valid DPCD by this point */
2969         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2970                 return;
2971
2972         if (mode != DRM_MODE_DPMS_ON) {
2973                 if (downstream_hpd_needs_d0(intel_dp))
2974                         return;
2975
2976                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2977                                          DP_SET_POWER_D3);
2978         } else {
2979                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2980
2981                 /*
2982                  * When turning on, we need to retry for 1ms to give the sink
2983                  * time to wake up.
2984                  */
2985                 for (i = 0; i < 3; i++) {
2986                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2987                                                  DP_SET_POWER_D0);
2988                         if (ret == 1)
2989                                 break;
2990                         msleep(1);
2991                 }
2992
2993                 if (ret == 1 && lspcon->active)
2994                         lspcon_wait_pcon_mode(lspcon);
2995         }
2996
2997         if (ret != 1)
2998                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2999                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3000 }
3001
3002 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3003                                  enum port port, enum pipe *pipe)
3004 {
3005         enum pipe p;
3006
3007         for_each_pipe(dev_priv, p) {
3008                 u32 val = I915_READ(TRANS_DP_CTL(p));
3009
3010                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3011                         *pipe = p;
3012                         return true;
3013                 }
3014         }
3015
3016         DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
3017
3018         /* must initialize pipe to something for the asserts */
3019         *pipe = PIPE_A;
3020
3021         return false;
3022 }
3023
3024 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3025                            i915_reg_t dp_reg, enum port port,
3026                            enum pipe *pipe)
3027 {
3028         bool ret;
3029         u32 val;
3030
3031         val = I915_READ(dp_reg);
3032
3033         ret = val & DP_PORT_EN;
3034
3035         /* asserts want to know the pipe even if the port is disabled */
3036         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3037                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3038         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3039                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3040         else if (IS_CHERRYVIEW(dev_priv))
3041                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3042         else
3043                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3044
3045         return ret;
3046 }
3047
3048 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3049                                   enum pipe *pipe)
3050 {
3051         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3052         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3053         intel_wakeref_t wakeref;
3054         bool ret;
3055
3056         wakeref = intel_display_power_get_if_enabled(dev_priv,
3057                                                      encoder->power_domain);
3058         if (!wakeref)
3059                 return false;
3060
3061         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3062                                     encoder->port, pipe);
3063
3064         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3065
3066         return ret;
3067 }
3068
3069 static void intel_dp_get_config(struct intel_encoder *encoder,
3070                                 struct intel_crtc_state *pipe_config)
3071 {
3072         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3073         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3074         u32 tmp, flags = 0;
3075         enum port port = encoder->port;
3076         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3077
3078         if (encoder->type == INTEL_OUTPUT_EDP)
3079                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3080         else
3081                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3082
3083         tmp = I915_READ(intel_dp->output_reg);
3084
3085         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3086
3087         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3088                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3089
3090                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3091                         flags |= DRM_MODE_FLAG_PHSYNC;
3092                 else
3093                         flags |= DRM_MODE_FLAG_NHSYNC;
3094
3095                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3096                         flags |= DRM_MODE_FLAG_PVSYNC;
3097                 else
3098                         flags |= DRM_MODE_FLAG_NVSYNC;
3099         } else {
3100                 if (tmp & DP_SYNC_HS_HIGH)
3101                         flags |= DRM_MODE_FLAG_PHSYNC;
3102                 else
3103                         flags |= DRM_MODE_FLAG_NHSYNC;
3104
3105                 if (tmp & DP_SYNC_VS_HIGH)
3106                         flags |= DRM_MODE_FLAG_PVSYNC;
3107                 else
3108                         flags |= DRM_MODE_FLAG_NVSYNC;
3109         }
3110
3111         pipe_config->base.adjusted_mode.flags |= flags;
3112
3113         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3114                 pipe_config->limited_color_range = true;
3115
3116         pipe_config->lane_count =
3117                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3118
3119         intel_dp_get_m_n(crtc, pipe_config);
3120
3121         if (port == PORT_A) {
3122                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3123                         pipe_config->port_clock = 162000;
3124                 else
3125                         pipe_config->port_clock = 270000;
3126         }
3127
3128         pipe_config->base.adjusted_mode.crtc_clock =
3129                 intel_dotclock_calculate(pipe_config->port_clock,
3130                                          &pipe_config->dp_m_n);
3131
3132         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3133             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3134                 /*
3135                  * This is a big fat ugly hack.
3136                  *
3137                  * Some machines in UEFI boot mode provide us a VBT that has 18
3138                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3139                  * unknown we fail to light up. Yet the same BIOS boots up with
3140                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3141                  * max, not what it tells us to use.
3142                  *
3143                  * Note: This will still be broken if the eDP panel is not lit
3144                  * up by the BIOS, and thus we can't get the mode at module
3145                  * load.
3146                  */
3147                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3148                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3149                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3150         }
3151 }
3152
3153 static void intel_disable_dp(struct intel_encoder *encoder,
3154                              const struct intel_crtc_state *old_crtc_state,
3155                              const struct drm_connector_state *old_conn_state)
3156 {
3157         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3158
3159         intel_dp->link_trained = false;
3160
3161         if (old_crtc_state->has_audio)
3162                 intel_audio_codec_disable(encoder,
3163                                           old_crtc_state, old_conn_state);
3164
3165         /* Make sure the panel is off before trying to change the mode. But also
3166          * ensure that we have vdd while we switch off the panel. */
3167         intel_edp_panel_vdd_on(intel_dp);
3168         intel_edp_backlight_off(old_conn_state);
3169         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3170         intel_edp_panel_off(intel_dp);
3171 }
3172
3173 static void g4x_disable_dp(struct intel_encoder *encoder,
3174                            const struct intel_crtc_state *old_crtc_state,
3175                            const struct drm_connector_state *old_conn_state)
3176 {
3177         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3178 }
3179
3180 static void vlv_disable_dp(struct intel_encoder *encoder,
3181                            const struct intel_crtc_state *old_crtc_state,
3182                            const struct drm_connector_state *old_conn_state)
3183 {
3184         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3185 }
3186
3187 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3188                                 const struct intel_crtc_state *old_crtc_state,
3189                                 const struct drm_connector_state *old_conn_state)
3190 {
3191         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3192         enum port port = encoder->port;
3193
3194         /*
3195          * Bspec does not list a specific disable sequence for g4x DP.
3196          * Follow the ilk+ sequence (disable pipe before the port) for
3197          * g4x DP as it does not suffer from underruns like the normal
3198          * g4x modeset sequence (disable pipe after the port).
3199          */
3200         intel_dp_link_down(encoder, old_crtc_state);
3201
3202         /* Only ilk+ has port A */
3203         if (port == PORT_A)
3204                 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3205 }
3206
3207 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3208                                 const struct intel_crtc_state *old_crtc_state,
3209                                 const struct drm_connector_state *old_conn_state)
3210 {
3211         intel_dp_link_down(encoder, old_crtc_state);
3212 }
3213
3214 static void chv_post_disable_dp(struct intel_encoder *encoder,
3215                                 const struct intel_crtc_state *old_crtc_state,
3216                                 const struct drm_connector_state *old_conn_state)
3217 {
3218         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3219
3220         intel_dp_link_down(encoder, old_crtc_state);
3221
3222         vlv_dpio_get(dev_priv);
3223
3224         /* Assert data lane reset */
3225         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3226
3227         vlv_dpio_put(dev_priv);
3228 }
3229
3230 static void
3231 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3232                          u32 *DP,
3233                          u8 dp_train_pat)
3234 {
3235         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3236         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3237         enum port port = intel_dig_port->base.port;
3238         u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3239
3240         if (dp_train_pat & train_pat_mask)
3241                 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3242                               dp_train_pat & train_pat_mask);
3243
3244         if (HAS_DDI(dev_priv)) {
3245                 u32 temp = I915_READ(DP_TP_CTL(port));
3246
3247                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3248                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3249                 else
3250                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3251
3252                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3253                 switch (dp_train_pat & train_pat_mask) {
3254                 case DP_TRAINING_PATTERN_DISABLE:
3255                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3256
3257                         break;
3258                 case DP_TRAINING_PATTERN_1:
3259                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3260                         break;
3261                 case DP_TRAINING_PATTERN_2:
3262                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3263                         break;
3264                 case DP_TRAINING_PATTERN_3:
3265                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3266                         break;
3267                 case DP_TRAINING_PATTERN_4:
3268                         temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3269                         break;
3270                 }
3271                 I915_WRITE(DP_TP_CTL(port), temp);
3272
3273         } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3274                    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3275                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3276
3277                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3278                 case DP_TRAINING_PATTERN_DISABLE:
3279                         *DP |= DP_LINK_TRAIN_OFF_CPT;
3280                         break;
3281                 case DP_TRAINING_PATTERN_1:
3282                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3283                         break;
3284                 case DP_TRAINING_PATTERN_2:
3285                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3286                         break;
3287                 case DP_TRAINING_PATTERN_3:
3288                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3289                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3290                         break;
3291                 }
3292
3293         } else {
3294                 *DP &= ~DP_LINK_TRAIN_MASK;
3295
3296                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3297                 case DP_TRAINING_PATTERN_DISABLE:
3298                         *DP |= DP_LINK_TRAIN_OFF;
3299                         break;
3300                 case DP_TRAINING_PATTERN_1:
3301                         *DP |= DP_LINK_TRAIN_PAT_1;
3302                         break;
3303                 case DP_TRAINING_PATTERN_2:
3304                         *DP |= DP_LINK_TRAIN_PAT_2;
3305                         break;
3306                 case DP_TRAINING_PATTERN_3:
3307                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3308                         *DP |= DP_LINK_TRAIN_PAT_2;
3309                         break;
3310                 }
3311         }
3312 }
3313
3314 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3315                                  const struct intel_crtc_state *old_crtc_state)
3316 {
3317         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3318
3319         /* enable with pattern 1 (as per spec) */
3320
3321         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3322
3323         /*
3324          * Magic for VLV/CHV. We _must_ first set up the register
3325          * without actually enabling the port, and then do another
3326          * write to enable the port. Otherwise link training will
3327          * fail when the power sequencer is freshly used for this port.
3328          */
3329         intel_dp->DP |= DP_PORT_EN;
3330         if (old_crtc_state->has_audio)
3331                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3332
3333         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3334         POSTING_READ(intel_dp->output_reg);
3335 }
3336
3337 static void intel_enable_dp(struct intel_encoder *encoder,
3338                             const struct intel_crtc_state *pipe_config,
3339                             const struct drm_connector_state *conn_state)
3340 {
3341         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3342         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3343         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3344         u32 dp_reg = I915_READ(intel_dp->output_reg);
3345         enum pipe pipe = crtc->pipe;
3346         intel_wakeref_t wakeref;
3347
3348         if (WARN_ON(dp_reg & DP_PORT_EN))
3349                 return;
3350
3351         with_pps_lock(intel_dp, wakeref) {
3352                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3353                         vlv_init_panel_power_sequencer(encoder, pipe_config);
3354
3355                 intel_dp_enable_port(intel_dp, pipe_config);
3356
3357                 edp_panel_vdd_on(intel_dp);
3358                 edp_panel_on(intel_dp);
3359                 edp_panel_vdd_off(intel_dp, true);
3360         }
3361
3362         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3363                 unsigned int lane_mask = 0x0;
3364
3365                 if (IS_CHERRYVIEW(dev_priv))
3366                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3367
3368                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3369                                     lane_mask);
3370         }
3371
3372         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3373         intel_dp_start_link_train(intel_dp);
3374         intel_dp_stop_link_train(intel_dp);
3375
3376         if (pipe_config->has_audio) {
3377                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3378                                  pipe_name(pipe));
3379                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3380         }
3381 }
3382
3383 static void g4x_enable_dp(struct intel_encoder *encoder,
3384                           const struct intel_crtc_state *pipe_config,
3385                           const struct drm_connector_state *conn_state)
3386 {
3387         intel_enable_dp(encoder, pipe_config, conn_state);
3388         intel_edp_backlight_on(pipe_config, conn_state);
3389 }
3390
3391 static void vlv_enable_dp(struct intel_encoder *encoder,
3392                           const struct intel_crtc_state *pipe_config,
3393                           const struct drm_connector_state *conn_state)
3394 {
3395         intel_edp_backlight_on(pipe_config, conn_state);
3396 }
3397
3398 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3399                               const struct intel_crtc_state *pipe_config,
3400                               const struct drm_connector_state *conn_state)
3401 {
3402         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3403         enum port port = encoder->port;
3404
3405         intel_dp_prepare(encoder, pipe_config);
3406
3407         /* Only ilk+ has port A */
3408         if (port == PORT_A)
3409                 ironlake_edp_pll_on(intel_dp, pipe_config);
3410 }
3411
3412 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3413 {
3414         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3415         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3416         enum pipe pipe = intel_dp->pps_pipe;
3417         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3418
3419         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3420
3421         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3422                 return;
3423
3424         edp_panel_vdd_off_sync(intel_dp);
3425
3426         /*
3427          * VLV seems to get confused when multiple power sequencers
3428          * have the same port selected (even if only one has power/vdd
3429          * enabled). The failure manifests as vlv_wait_port_ready() failing
3430          * CHV on the other hand doesn't seem to mind having the same port
3431          * selected in multiple power sequencers, but let's clear the
3432          * port select always when logically disconnecting a power sequencer
3433          * from a port.
3434          */
3435         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3436                       pipe_name(pipe), port_name(intel_dig_port->base.port));
3437         I915_WRITE(pp_on_reg, 0);
3438         POSTING_READ(pp_on_reg);
3439
3440         intel_dp->pps_pipe = INVALID_PIPE;
3441 }
3442
3443 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3444                                       enum pipe pipe)
3445 {
3446         struct intel_encoder *encoder;
3447
3448         lockdep_assert_held(&dev_priv->pps_mutex);
3449
3450         for_each_intel_dp(&dev_priv->drm, encoder) {
3451                 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3452                 enum port port = encoder->port;
3453
3454                 WARN(intel_dp->active_pipe == pipe,
3455                      "stealing pipe %c power sequencer from active (e)DP port %c\n",
3456                      pipe_name(pipe), port_name(port));
3457
3458                 if (intel_dp->pps_pipe != pipe)
3459                         continue;
3460
3461                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3462                               pipe_name(pipe), port_name(port));
3463
3464                 /* make sure vdd is off before we steal it */
3465                 vlv_detach_power_sequencer(intel_dp);
3466         }
3467 }
3468
3469 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3470                                            const struct intel_crtc_state *crtc_state)
3471 {
3472         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3473         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3474         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3475
3476         lockdep_assert_held(&dev_priv->pps_mutex);
3477
3478         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3479
3480         if (intel_dp->pps_pipe != INVALID_PIPE &&
3481             intel_dp->pps_pipe != crtc->pipe) {
3482                 /*
3483                  * If another power sequencer was being used on this
3484                  * port previously make sure to turn off vdd there while
3485                  * we still have control of it.
3486                  */
3487                 vlv_detach_power_sequencer(intel_dp);
3488         }
3489
3490         /*
3491          * We may be stealing the power
3492          * sequencer from another port.
3493          */
3494         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3495
3496         intel_dp->active_pipe = crtc->pipe;
3497
3498         if (!intel_dp_is_edp(intel_dp))
3499                 return;
3500
3501         /* now it's all ours */
3502         intel_dp->pps_pipe = crtc->pipe;
3503
3504         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3505                       pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3506
3507         /* init power sequencer on this pipe and port */
3508         intel_dp_init_panel_power_sequencer(intel_dp);
3509         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3510 }
3511
3512 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3513                               const struct intel_crtc_state *pipe_config,
3514                               const struct drm_connector_state *conn_state)
3515 {
3516         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3517
3518         intel_enable_dp(encoder, pipe_config, conn_state);
3519 }
3520
3521 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3522                                   const struct intel_crtc_state *pipe_config,
3523                                   const struct drm_connector_state *conn_state)
3524 {
3525         intel_dp_prepare(encoder, pipe_config);
3526
3527         vlv_phy_pre_pll_enable(encoder, pipe_config);
3528 }
3529
3530 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3531                               const struct intel_crtc_state *pipe_config,
3532                               const struct drm_connector_state *conn_state)
3533 {
3534         chv_phy_pre_encoder_enable(encoder, pipe_config);
3535
3536         intel_enable_dp(encoder, pipe_config, conn_state);
3537
3538         /* Second common lane will stay alive on its own now */
3539         chv_phy_release_cl2_override(encoder);
3540 }
3541
3542 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3543                                   const struct intel_crtc_state *pipe_config,
3544                                   const struct drm_connector_state *conn_state)
3545 {
3546         intel_dp_prepare(encoder, pipe_config);
3547
3548         chv_phy_pre_pll_enable(encoder, pipe_config);
3549 }
3550
3551 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3552                                     const struct intel_crtc_state *old_crtc_state,
3553                                     const struct drm_connector_state *old_conn_state)
3554 {
3555         chv_phy_post_pll_disable(encoder, old_crtc_state);
3556 }
3557
3558 /*
3559  * Fetch AUX CH registers 0x202 - 0x207 which contain
3560  * link status information
3561  */
3562 bool
3563 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3564 {
3565         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3566                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3567 }
3568
3569 /* These are source-specific values. */
3570 u8
3571 intel_dp_voltage_max(struct intel_dp *intel_dp)
3572 {
3573         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3574         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3575         enum port port = encoder->port;
3576
3577         if (HAS_DDI(dev_priv))
3578                 return intel_ddi_dp_voltage_max(encoder);
3579         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3580                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3581         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3582                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3583         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3584                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3585         else
3586                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3587 }
3588
3589 u8
3590 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3591 {
3592         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3593         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3594         enum port port = encoder->port;
3595
3596         if (HAS_DDI(dev_priv)) {
3597                 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3598         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3599                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3600                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3601                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3602                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3603                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3604                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3605                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3606                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3607                 default:
3608                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3609                 }
3610         } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3611                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3612                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3613                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3614                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3615                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3616                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3617                 default:
3618                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3619                 }
3620         } else {
3621                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3622                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3623                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3624                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3625                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3626                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3627                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3628                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3629                 default:
3630                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3631                 }
3632         }
3633 }
3634
3635 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3636 {
3637         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3638         unsigned long demph_reg_value, preemph_reg_value,
3639                 uniqtranscale_reg_value;
3640         u8 train_set = intel_dp->train_set[0];
3641
3642         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3643         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3644                 preemph_reg_value = 0x0004000;
3645                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3646                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3647                         demph_reg_value = 0x2B405555;
3648                         uniqtranscale_reg_value = 0x552AB83A;
3649                         break;
3650                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3651                         demph_reg_value = 0x2B404040;
3652                         uniqtranscale_reg_value = 0x5548B83A;
3653                         break;
3654                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3655                         demph_reg_value = 0x2B245555;
3656                         uniqtranscale_reg_value = 0x5560B83A;
3657                         break;
3658                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3659                         demph_reg_value = 0x2B405555;
3660                         uniqtranscale_reg_value = 0x5598DA3A;
3661                         break;
3662                 default:
3663                         return 0;
3664                 }
3665                 break;
3666         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3667                 preemph_reg_value = 0x0002000;
3668                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3669                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3670                         demph_reg_value = 0x2B404040;
3671                         uniqtranscale_reg_value = 0x5552B83A;
3672                         break;
3673                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3674                         demph_reg_value = 0x2B404848;
3675                         uniqtranscale_reg_value = 0x5580B83A;
3676                         break;
3677                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3678                         demph_reg_value = 0x2B404040;
3679                         uniqtranscale_reg_value = 0x55ADDA3A;
3680                         break;
3681                 default:
3682                         return 0;
3683                 }
3684                 break;
3685         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3686                 preemph_reg_value = 0x0000000;
3687                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3688                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3689                         demph_reg_value = 0x2B305555;
3690                         uniqtranscale_reg_value = 0x5570B83A;
3691                         break;
3692                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3693                         demph_reg_value = 0x2B2B4040;
3694                         uniqtranscale_reg_value = 0x55ADDA3A;
3695                         break;
3696                 default:
3697                         return 0;
3698                 }
3699                 break;
3700         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3701                 preemph_reg_value = 0x0006000;
3702                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3703                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3704                         demph_reg_value = 0x1B405555;
3705                         uniqtranscale_reg_value = 0x55ADDA3A;
3706                         break;
3707                 default:
3708                         return 0;
3709                 }
3710                 break;
3711         default:
3712                 return 0;
3713         }
3714
3715         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3716                                  uniqtranscale_reg_value, 0);
3717
3718         return 0;
3719 }
3720
3721 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3722 {
3723         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3724         u32 deemph_reg_value, margin_reg_value;
3725         bool uniq_trans_scale = false;
3726         u8 train_set = intel_dp->train_set[0];
3727
3728         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3729         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3730                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3731                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3732                         deemph_reg_value = 128;
3733                         margin_reg_value = 52;
3734                         break;
3735                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3736                         deemph_reg_value = 128;
3737                         margin_reg_value = 77;
3738                         break;
3739                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3740                         deemph_reg_value = 128;
3741                         margin_reg_value = 102;
3742                         break;
3743                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3744                         deemph_reg_value = 128;
3745                         margin_reg_value = 154;
3746                         uniq_trans_scale = true;
3747                         break;
3748                 default:
3749                         return 0;
3750                 }
3751                 break;
3752         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3753                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3754                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3755                         deemph_reg_value = 85;
3756                         margin_reg_value = 78;
3757                         break;
3758                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3759                         deemph_reg_value = 85;
3760                         margin_reg_value = 116;
3761                         break;
3762                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3763                         deemph_reg_value = 85;
3764                         margin_reg_value = 154;
3765                         break;
3766                 default:
3767                         return 0;
3768                 }
3769                 break;
3770         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3771                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3772                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3773                         deemph_reg_value = 64;
3774                         margin_reg_value = 104;
3775                         break;
3776                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3777                         deemph_reg_value = 64;
3778                         margin_reg_value = 154;
3779                         break;
3780                 default:
3781                         return 0;
3782                 }
3783                 break;
3784         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3785                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3786                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3787                         deemph_reg_value = 43;
3788                         margin_reg_value = 154;
3789                         break;
3790                 default:
3791                         return 0;
3792                 }
3793                 break;
3794         default:
3795                 return 0;
3796         }
3797
3798         chv_set_phy_signal_level(encoder, deemph_reg_value,
3799                                  margin_reg_value, uniq_trans_scale);
3800
3801         return 0;
3802 }
3803
3804 static u32
3805 g4x_signal_levels(u8 train_set)
3806 {
3807         u32 signal_levels = 0;
3808
3809         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3810         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3811         default:
3812                 signal_levels |= DP_VOLTAGE_0_4;
3813                 break;
3814         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3815                 signal_levels |= DP_VOLTAGE_0_6;
3816                 break;
3817         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3818                 signal_levels |= DP_VOLTAGE_0_8;
3819                 break;
3820         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3821                 signal_levels |= DP_VOLTAGE_1_2;
3822                 break;
3823         }
3824         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3825         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3826         default:
3827                 signal_levels |= DP_PRE_EMPHASIS_0;
3828                 break;
3829         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3830                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3831                 break;
3832         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3833                 signal_levels |= DP_PRE_EMPHASIS_6;
3834                 break;
3835         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3836                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3837                 break;
3838         }
3839         return signal_levels;
3840 }
3841
3842 /* SNB CPU eDP voltage swing and pre-emphasis control */
3843 static u32
3844 snb_cpu_edp_signal_levels(u8 train_set)
3845 {
3846         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3847                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3848         switch (signal_levels) {
3849         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3850         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3851                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3852         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3853                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3854         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3855         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3856                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3857         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3858         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3859                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3860         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3861         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3862                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3863         default:
3864                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3865                               "0x%x\n", signal_levels);
3866                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3867         }
3868 }
3869
3870 /* IVB CPU eDP voltage swing and pre-emphasis control */
3871 static u32
3872 ivb_cpu_edp_signal_levels(u8 train_set)
3873 {
3874         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3875                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3876         switch (signal_levels) {
3877         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3878                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3879         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3880                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3881         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3882                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3883
3884         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3885                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3886         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3887                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3888
3889         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3890                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3891         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3892                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3893
3894         default:
3895                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3896                               "0x%x\n", signal_levels);
3897                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3898         }
3899 }
3900
3901 void
3902 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3903 {
3904         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3905         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3906         enum port port = intel_dig_port->base.port;
3907         u32 signal_levels, mask = 0;
3908         u8 train_set = intel_dp->train_set[0];
3909
3910         if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3911                 signal_levels = bxt_signal_levels(intel_dp);
3912         } else if (HAS_DDI(dev_priv)) {
3913                 signal_levels = ddi_signal_levels(intel_dp);
3914                 mask = DDI_BUF_EMP_MASK;
3915         } else if (IS_CHERRYVIEW(dev_priv)) {
3916                 signal_levels = chv_signal_levels(intel_dp);
3917         } else if (IS_VALLEYVIEW(dev_priv)) {
3918                 signal_levels = vlv_signal_levels(intel_dp);
3919         } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3920                 signal_levels = ivb_cpu_edp_signal_levels(train_set);
3921                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3922         } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3923                 signal_levels = snb_cpu_edp_signal_levels(train_set);
3924                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3925         } else {
3926                 signal_levels = g4x_signal_levels(train_set);
3927                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3928         }
3929
3930         if (mask)
3931                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3932
3933         DRM_DEBUG_KMS("Using vswing level %d\n",
3934                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3935         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3936                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3937                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3938
3939         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3940
3941         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3942         POSTING_READ(intel_dp->output_reg);
3943 }
3944
3945 void
3946 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3947                                        u8 dp_train_pat)
3948 {
3949         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3950         struct drm_i915_private *dev_priv =
3951                 to_i915(intel_dig_port->base.base.dev);
3952
3953         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3954
3955         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3956         POSTING_READ(intel_dp->output_reg);
3957 }
3958
3959 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3960 {
3961         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3962         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3963         enum port port = intel_dig_port->base.port;
3964         u32 val;
3965
3966         if (!HAS_DDI(dev_priv))
3967                 return;
3968
3969         val = I915_READ(DP_TP_CTL(port));
3970         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3971         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3972         I915_WRITE(DP_TP_CTL(port), val);
3973
3974         /*
3975          * On PORT_A we can have only eDP in SST mode. There the only reason
3976          * we need to set idle transmission mode is to work around a HW issue
3977          * where we enable the pipe while not in idle link-training mode.
3978          * In this case there is requirement to wait for a minimum number of
3979          * idle patterns to be sent.
3980          */
3981         if (port == PORT_A)
3982                 return;
3983
3984         if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
3985                                     DP_TP_STATUS_IDLE_DONE,
3986                                     DP_TP_STATUS_IDLE_DONE,
3987                                     1))
3988                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3989 }
3990
3991 static void
3992 intel_dp_link_down(struct intel_encoder *encoder,
3993                    const struct intel_crtc_state *old_crtc_state)
3994 {
3995         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3996         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3997         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3998         enum port port = encoder->port;
3999         u32 DP = intel_dp->DP;
4000
4001         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4002                 return;
4003
4004         DRM_DEBUG_KMS("\n");
4005
4006         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4007             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4008                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4009                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4010         } else {
4011                 DP &= ~DP_LINK_TRAIN_MASK;
4012                 DP |= DP_LINK_TRAIN_PAT_IDLE;
4013         }
4014         I915_WRITE(intel_dp->output_reg, DP);
4015         POSTING_READ(intel_dp->output_reg);
4016
4017         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4018         I915_WRITE(intel_dp->output_reg, DP);
4019         POSTING_READ(intel_dp->output_reg);
4020
4021         /*
4022          * HW workaround for IBX, we need to move the port
4023          * to transcoder A after disabling it to allow the
4024          * matching HDMI port to be enabled on transcoder A.
4025          */
4026         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4027                 /*
4028                  * We get CPU/PCH FIFO underruns on the other pipe when
4029                  * doing the workaround. Sweep them under the rug.
4030                  */
4031                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4032                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4033
4034                 /* always enable with pattern 1 (as per spec) */
4035                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4036                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4037                         DP_LINK_TRAIN_PAT_1;
4038                 I915_WRITE(intel_dp->output_reg, DP);
4039                 POSTING_READ(intel_dp->output_reg);
4040
4041                 DP &= ~DP_PORT_EN;
4042                 I915_WRITE(intel_dp->output_reg, DP);
4043                 POSTING_READ(intel_dp->output_reg);
4044
4045                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4046                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4047                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4048         }
4049
4050         msleep(intel_dp->panel_power_down_delay);
4051
4052         intel_dp->DP = DP;
4053
4054         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4055                 intel_wakeref_t wakeref;
4056
4057                 with_pps_lock(intel_dp, wakeref)
4058                         intel_dp->active_pipe = INVALID_PIPE;
4059         }
4060 }
4061
4062 static void
4063 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4064 {
4065         u8 dpcd_ext[6];
4066
4067         /*
4068          * Prior to DP1.3 the bit represented by
4069          * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4070          * if it is set DP_DPCD_REV at 0000h could be at a value less than
4071          * the true capability of the panel. The only way to check is to
4072          * then compare 0000h and 2200h.
4073          */
4074         if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4075               DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4076                 return;
4077
4078         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4079                              &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4080                 DRM_ERROR("DPCD failed read at extended capabilities\n");
4081                 return;
4082         }
4083
4084         if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4085                 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4086                 return;
4087         }
4088
4089         if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4090                 return;
4091
4092         DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4093                       (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4094
4095         memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4096 }
4097
4098 bool
4099 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4100 {
4101         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4102                              sizeof(intel_dp->dpcd)) < 0)
4103                 return false; /* aux transfer failed */
4104
4105         intel_dp_extended_receiver_capabilities(intel_dp);
4106
4107         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4108
4109         return intel_dp->dpcd[DP_DPCD_REV] != 0;
4110 }
4111
4112 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4113 {
4114         u8 dprx = 0;
4115
4116         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4117                               &dprx) != 1)
4118                 return false;
4119         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4120 }
4121
4122 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4123 {
4124         /*
4125          * Clear the cached register set to avoid using stale values
4126          * for the sinks that do not support DSC.
4127          */
4128         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4129
4130         /* Clear fec_capable to avoid using stale values */
4131         intel_dp->fec_capable = 0;
4132
4133         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4134         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4135             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4136                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4137                                      intel_dp->dsc_dpcd,
4138                                      sizeof(intel_dp->dsc_dpcd)) < 0)
4139                         DRM_ERROR("Failed to read DPCD register 0x%x\n",
4140                                   DP_DSC_SUPPORT);
4141
4142                 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4143                               (int)sizeof(intel_dp->dsc_dpcd),
4144                               intel_dp->dsc_dpcd);
4145
4146                 /* FEC is supported only on DP 1.4 */
4147                 if (!intel_dp_is_edp(intel_dp) &&
4148                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4149                                       &intel_dp->fec_capable) < 0)
4150                         DRM_ERROR("Failed to read FEC DPCD register\n");
4151
4152                 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4153         }
4154 }
4155
4156 static bool
4157 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4158 {
4159         struct drm_i915_private *dev_priv =
4160                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4161
4162         /* this function is meant to be called only once */
4163         WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4164
4165         if (!intel_dp_read_dpcd(intel_dp))
4166                 return false;
4167
4168         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4169                          drm_dp_is_branch(intel_dp->dpcd));
4170
4171         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4172                 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4173                         DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4174
4175         /*
4176          * Read the eDP display control registers.
4177          *
4178          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4179          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4180          * set, but require eDP 1.4+ detection (e.g. for supported link rates
4181          * method). The display control registers should read zero if they're
4182          * not supported anyway.
4183          */
4184         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4185                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4186                              sizeof(intel_dp->edp_dpcd))
4187                 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4188                               intel_dp->edp_dpcd);
4189
4190         /*
4191          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4192          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4193          */
4194         intel_psr_init_dpcd(intel_dp);
4195
4196         /* Read the eDP 1.4+ supported link rates. */
4197         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4198                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4199                 int i;
4200
4201                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4202                                 sink_rates, sizeof(sink_rates));
4203
4204                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4205                         int val = le16_to_cpu(sink_rates[i]);
4206
4207                         if (val == 0)
4208                                 break;
4209
4210                         /* Value read multiplied by 200kHz gives the per-lane
4211                          * link rate in kHz. The source rates are, however,
4212                          * stored in terms of LS_Clk kHz. The full conversion
4213                          * back to symbols is
4214                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4215                          */
4216                         intel_dp->sink_rates[i] = (val * 200) / 10;
4217                 }
4218                 intel_dp->num_sink_rates = i;
4219         }
4220
4221         /*
4222          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4223          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4224          */
4225         if (intel_dp->num_sink_rates)
4226                 intel_dp->use_rate_select = true;
4227         else
4228                 intel_dp_set_sink_rates(intel_dp);
4229
4230         intel_dp_set_common_rates(intel_dp);
4231
4232         /* Read the eDP DSC DPCD registers */
4233         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4234                 intel_dp_get_dsc_sink_cap(intel_dp);
4235
4236         return true;
4237 }
4238
4239
4240 static bool
4241 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4242 {
4243         if (!intel_dp_read_dpcd(intel_dp))
4244                 return false;
4245
4246         /* Don't clobber cached eDP rates. */
4247         if (!intel_dp_is_edp(intel_dp)) {
4248                 intel_dp_set_sink_rates(intel_dp);
4249                 intel_dp_set_common_rates(intel_dp);
4250         }
4251
4252         /*
4253          * Some eDP panels do not set a valid value for sink count, that is why
4254          * it don't care about read it here and in intel_edp_init_dpcd().
4255          */
4256         if (!intel_dp_is_edp(intel_dp)) {
4257                 u8 count;
4258                 ssize_t r;
4259
4260                 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4261                 if (r < 1)
4262                         return false;
4263
4264                 /*
4265                  * Sink count can change between short pulse hpd hence
4266                  * a member variable in intel_dp will track any changes
4267                  * between short pulse interrupts.
4268                  */
4269                 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4270
4271                 /*
4272                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4273                  * a dongle is present but no display. Unless we require to know
4274                  * if a dongle is present or not, we don't need to update
4275                  * downstream port information. So, an early return here saves
4276                  * time from performing other operations which are not required.
4277                  */
4278                 if (!intel_dp->sink_count)
4279                         return false;
4280         }
4281
4282         if (!drm_dp_is_branch(intel_dp->dpcd))
4283                 return true; /* native DP sink */
4284
4285         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4286                 return true; /* no per-port downstream info */
4287
4288         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4289                              intel_dp->downstream_ports,
4290                              DP_MAX_DOWNSTREAM_PORTS) < 0)
4291                 return false; /* downstream port status fetch failed */
4292
4293         return true;
4294 }
4295
4296 static bool
4297 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4298 {
4299         u8 mstm_cap;
4300
4301         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4302                 return false;
4303
4304         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4305                 return false;
4306
4307         return mstm_cap & DP_MST_CAP;
4308 }
4309
4310 static bool
4311 intel_dp_can_mst(struct intel_dp *intel_dp)
4312 {
4313         return i915_modparams.enable_dp_mst &&
4314                 intel_dp->can_mst &&
4315                 intel_dp_sink_can_mst(intel_dp);
4316 }
4317
4318 static void
4319 intel_dp_configure_mst(struct intel_dp *intel_dp)
4320 {
4321         struct intel_encoder *encoder =
4322                 &dp_to_dig_port(intel_dp)->base;
4323         bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4324
4325         DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4326                       port_name(encoder->port), yesno(intel_dp->can_mst),
4327                       yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4328
4329         if (!intel_dp->can_mst)
4330                 return;
4331
4332         intel_dp->is_mst = sink_can_mst &&
4333                 i915_modparams.enable_dp_mst;
4334
4335         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4336                                         intel_dp->is_mst);
4337 }
4338
4339 static bool
4340 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4341 {
4342         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4343                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4344                 DP_DPRX_ESI_LEN;
4345 }
4346
4347 u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4348                                 int mode_clock, int mode_hdisplay)
4349 {
4350         u16 bits_per_pixel, max_bpp_small_joiner_ram;
4351         int i;
4352
4353         /*
4354          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4355          * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4356          * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4357          * for MST -> TimeSlotsPerMTP has to be calculated
4358          */
4359         bits_per_pixel = (link_clock * lane_count * 8 *
4360                           DP_DSC_FEC_OVERHEAD_FACTOR) /
4361                 mode_clock;
4362
4363         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4364         max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4365                 mode_hdisplay;
4366
4367         /*
4368          * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4369          * check, output bpp from small joiner RAM check)
4370          */
4371         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4372
4373         /* Error out if the max bpp is less than smallest allowed valid bpp */
4374         if (bits_per_pixel < valid_dsc_bpp[0]) {
4375                 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4376                 return 0;
4377         }
4378
4379         /* Find the nearest match in the array of known BPPs from VESA */
4380         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4381                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4382                         break;
4383         }
4384         bits_per_pixel = valid_dsc_bpp[i];
4385
4386         /*
4387          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4388          * fractional part is 0
4389          */
4390         return bits_per_pixel << 4;
4391 }
4392
4393 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4394                                 int mode_clock,
4395                                 int mode_hdisplay)
4396 {
4397         u8 min_slice_count, i;
4398         int max_slice_width;
4399
4400         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4401                 min_slice_count = DIV_ROUND_UP(mode_clock,
4402                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
4403         else
4404                 min_slice_count = DIV_ROUND_UP(mode_clock,
4405                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
4406
4407         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4408         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4409                 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4410                               max_slice_width);
4411                 return 0;
4412         }
4413         /* Also take into account max slice width */
4414         min_slice_count = min_t(u8, min_slice_count,
4415                                 DIV_ROUND_UP(mode_hdisplay,
4416                                              max_slice_width));
4417
4418         /* Find the closest match to the valid slice count values */
4419         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4420                 if (valid_dsc_slicecount[i] >
4421                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4422                                                     false))
4423                         break;
4424                 if (min_slice_count  <= valid_dsc_slicecount[i])
4425                         return valid_dsc_slicecount[i];
4426         }
4427
4428         DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4429         return 0;
4430 }
4431
4432 static void
4433 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4434                                const struct intel_crtc_state *crtc_state)
4435 {
4436         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4437         struct dp_sdp vsc_sdp = {};
4438
4439         /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4440         vsc_sdp.sdp_header.HB0 = 0;
4441         vsc_sdp.sdp_header.HB1 = 0x7;
4442
4443         /*
4444          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4445          * Colorimetry Format indication.
4446          */
4447         vsc_sdp.sdp_header.HB2 = 0x5;
4448
4449         /*
4450          * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4451          * Colorimetry Format indication (HB2 = 05h).
4452          */
4453         vsc_sdp.sdp_header.HB3 = 0x13;
4454
4455         /*
4456          * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4457          * DB16[3:0] DP 1.4a spec, Table 2-120
4458          */
4459         vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4460         /* RGB->YCBCR color conversion uses the BT.709 color space. */
4461         vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4462
4463         /*
4464          * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4465          * the following Component Bit Depth values are defined:
4466          * 001b = 8bpc.
4467          * 010b = 10bpc.
4468          * 011b = 12bpc.
4469          * 100b = 16bpc.
4470          */
4471         switch (crtc_state->pipe_bpp) {
4472         case 24: /* 8bpc */
4473                 vsc_sdp.db[17] = 0x1;
4474                 break;
4475         case 30: /* 10bpc */
4476                 vsc_sdp.db[17] = 0x2;
4477                 break;
4478         case 36: /* 12bpc */
4479                 vsc_sdp.db[17] = 0x3;
4480                 break;
4481         case 48: /* 16bpc */
4482                 vsc_sdp.db[17] = 0x4;
4483                 break;
4484         default:
4485                 MISSING_CASE(crtc_state->pipe_bpp);
4486                 break;
4487         }
4488
4489         /*
4490          * Dynamic Range (Bit 7)
4491          * 0 = VESA range, 1 = CTA range.
4492          * all YCbCr are always limited range
4493          */
4494         vsc_sdp.db[17] |= 0x80;
4495
4496         /*
4497          * Content Type (Bits 2:0)
4498          * 000b = Not defined.
4499          * 001b = Graphics.
4500          * 010b = Photo.
4501          * 011b = Video.
4502          * 100b = Game
4503          * All other values are RESERVED.
4504          * Note: See CTA-861-G for the definition and expected
4505          * processing by a stream sink for the above contect types.
4506          */
4507         vsc_sdp.db[18] = 0;
4508
4509         intel_dig_port->write_infoframe(&intel_dig_port->base,
4510                         crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4511 }
4512
4513 void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4514                                const struct intel_crtc_state *crtc_state)
4515 {
4516         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4517                 return;
4518
4519         intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4520 }
4521
4522 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4523 {
4524         int status = 0;
4525         int test_link_rate;
4526         u8 test_lane_count, test_link_bw;
4527         /* (DP CTS 1.2)
4528          * 4.3.1.11
4529          */
4530         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4531         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4532                                    &test_lane_count);
4533
4534         if (status <= 0) {
4535                 DRM_DEBUG_KMS("Lane count read failed\n");
4536                 return DP_TEST_NAK;
4537         }
4538         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4539
4540         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4541                                    &test_link_bw);
4542         if (status <= 0) {
4543                 DRM_DEBUG_KMS("Link Rate read failed\n");
4544                 return DP_TEST_NAK;
4545         }
4546         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4547
4548         /* Validate the requested link rate and lane count */
4549         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4550                                         test_lane_count))
4551                 return DP_TEST_NAK;
4552
4553         intel_dp->compliance.test_lane_count = test_lane_count;
4554         intel_dp->compliance.test_link_rate = test_link_rate;
4555
4556         return DP_TEST_ACK;
4557 }
4558
4559 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4560 {
4561         u8 test_pattern;
4562         u8 test_misc;
4563         __be16 h_width, v_height;
4564         int status = 0;
4565
4566         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4567         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4568                                    &test_pattern);
4569         if (status <= 0) {
4570                 DRM_DEBUG_KMS("Test pattern read failed\n");
4571                 return DP_TEST_NAK;
4572         }
4573         if (test_pattern != DP_COLOR_RAMP)
4574                 return DP_TEST_NAK;
4575
4576         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4577                                   &h_width, 2);
4578         if (status <= 0) {
4579                 DRM_DEBUG_KMS("H Width read failed\n");
4580                 return DP_TEST_NAK;
4581         }
4582
4583         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4584                                   &v_height, 2);
4585         if (status <= 0) {
4586                 DRM_DEBUG_KMS("V Height read failed\n");
4587                 return DP_TEST_NAK;
4588         }
4589
4590         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4591                                    &test_misc);
4592         if (status <= 0) {
4593                 DRM_DEBUG_KMS("TEST MISC read failed\n");
4594                 return DP_TEST_NAK;
4595         }
4596         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4597                 return DP_TEST_NAK;
4598         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4599                 return DP_TEST_NAK;
4600         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4601         case DP_TEST_BIT_DEPTH_6:
4602                 intel_dp->compliance.test_data.bpc = 6;
4603                 break;
4604         case DP_TEST_BIT_DEPTH_8:
4605                 intel_dp->compliance.test_data.bpc = 8;
4606                 break;
4607         default:
4608                 return DP_TEST_NAK;
4609         }
4610
4611         intel_dp->compliance.test_data.video_pattern = test_pattern;
4612         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4613         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4614         /* Set test active flag here so userspace doesn't interrupt things */
4615         intel_dp->compliance.test_active = 1;
4616
4617         return DP_TEST_ACK;
4618 }
4619
4620 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4621 {
4622         u8 test_result = DP_TEST_ACK;
4623         struct intel_connector *intel_connector = intel_dp->attached_connector;
4624         struct drm_connector *connector = &intel_connector->base;
4625
4626         if (intel_connector->detect_edid == NULL ||
4627             connector->edid_corrupt ||
4628             intel_dp->aux.i2c_defer_count > 6) {
4629                 /* Check EDID read for NACKs, DEFERs and corruption
4630                  * (DP CTS 1.2 Core r1.1)
4631                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4632                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4633                  *    4.2.2.6 : EDID corruption detected
4634                  * Use failsafe mode for all cases
4635                  */
4636                 if (intel_dp->aux.i2c_nack_count > 0 ||
4637                         intel_dp->aux.i2c_defer_count > 0)
4638                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4639                                       intel_dp->aux.i2c_nack_count,
4640                                       intel_dp->aux.i2c_defer_count);
4641                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4642         } else {
4643                 struct edid *block = intel_connector->detect_edid;
4644
4645                 /* We have to write the checksum
4646                  * of the last block read
4647                  */
4648                 block += intel_connector->detect_edid->extensions;
4649
4650                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4651                                        block->checksum) <= 0)
4652                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4653
4654                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4655                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4656         }
4657
4658         /* Set test active flag here so userspace doesn't interrupt things */
4659         intel_dp->compliance.test_active = 1;
4660
4661         return test_result;
4662 }
4663
4664 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4665 {
4666         u8 test_result = DP_TEST_NAK;
4667         return test_result;
4668 }
4669
4670 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4671 {
4672         u8 response = DP_TEST_NAK;
4673         u8 request = 0;
4674         int status;
4675
4676         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4677         if (status <= 0) {
4678                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4679                 goto update_status;
4680         }
4681
4682         switch (request) {
4683         case DP_TEST_LINK_TRAINING:
4684                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4685                 response = intel_dp_autotest_link_training(intel_dp);
4686                 break;
4687         case DP_TEST_LINK_VIDEO_PATTERN:
4688                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4689                 response = intel_dp_autotest_video_pattern(intel_dp);
4690                 break;
4691         case DP_TEST_LINK_EDID_READ:
4692                 DRM_DEBUG_KMS("EDID test requested\n");
4693                 response = intel_dp_autotest_edid(intel_dp);
4694                 break;
4695         case DP_TEST_LINK_PHY_TEST_PATTERN:
4696                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4697                 response = intel_dp_autotest_phy_pattern(intel_dp);
4698                 break;
4699         default:
4700                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4701                 break;
4702         }
4703
4704         if (response & DP_TEST_ACK)
4705                 intel_dp->compliance.test_type = request;
4706
4707 update_status:
4708         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4709         if (status <= 0)
4710                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4711 }
4712
4713 static int
4714 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4715 {
4716         bool bret;
4717
4718         if (intel_dp->is_mst) {
4719                 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4720                 int ret = 0;
4721                 int retry;
4722                 bool handled;
4723
4724                 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4725                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4726 go_again:
4727                 if (bret == true) {
4728
4729                         /* check link status - esi[10] = 0x200c */
4730                         if (intel_dp->active_mst_links > 0 &&
4731                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4732                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4733                                 intel_dp_start_link_train(intel_dp);
4734                                 intel_dp_stop_link_train(intel_dp);
4735                         }
4736
4737                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4738                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4739
4740                         if (handled) {
4741                                 for (retry = 0; retry < 3; retry++) {
4742                                         int wret;
4743                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4744                                                                  DP_SINK_COUNT_ESI+1,
4745                                                                  &esi[1], 3);
4746                                         if (wret == 3) {
4747                                                 break;
4748                                         }
4749                                 }
4750
4751                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4752                                 if (bret == true) {
4753                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4754                                         goto go_again;
4755                                 }
4756                         } else
4757                                 ret = 0;
4758
4759                         return ret;
4760                 } else {
4761                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4762                         intel_dp->is_mst = false;
4763                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4764                                                         intel_dp->is_mst);
4765                 }
4766         }
4767         return -EINVAL;
4768 }
4769
4770 static bool
4771 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4772 {
4773         u8 link_status[DP_LINK_STATUS_SIZE];
4774
4775         if (!intel_dp->link_trained)
4776                 return false;
4777
4778         /*
4779          * While PSR source HW is enabled, it will control main-link sending
4780          * frames, enabling and disabling it so trying to do a retrain will fail
4781          * as the link would or not be on or it could mix training patterns
4782          * and frame data at the same time causing retrain to fail.
4783          * Also when exiting PSR, HW will retrain the link anyways fixing
4784          * any link status error.
4785          */
4786         if (intel_psr_enabled(intel_dp))
4787                 return false;
4788
4789         if (!intel_dp_get_link_status(intel_dp, link_status))
4790                 return false;
4791
4792         /*
4793          * Validate the cached values of intel_dp->link_rate and
4794          * intel_dp->lane_count before attempting to retrain.
4795          */
4796         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4797                                         intel_dp->lane_count))
4798                 return false;
4799
4800         /* Retrain if Channel EQ or CR not ok */
4801         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4802 }
4803
4804 int intel_dp_retrain_link(struct intel_encoder *encoder,
4805                           struct drm_modeset_acquire_ctx *ctx)
4806 {
4807         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4808         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4809         struct intel_connector *connector = intel_dp->attached_connector;
4810         struct drm_connector_state *conn_state;
4811         struct intel_crtc_state *crtc_state;
4812         struct intel_crtc *crtc;
4813         int ret;
4814
4815         /* FIXME handle the MST connectors as well */
4816
4817         if (!connector || connector->base.status != connector_status_connected)
4818                 return 0;
4819
4820         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4821                                ctx);
4822         if (ret)
4823                 return ret;
4824
4825         conn_state = connector->base.state;
4826
4827         crtc = to_intel_crtc(conn_state->crtc);
4828         if (!crtc)
4829                 return 0;
4830
4831         ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4832         if (ret)
4833                 return ret;
4834
4835         crtc_state = to_intel_crtc_state(crtc->base.state);
4836
4837         WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4838
4839         if (!crtc_state->base.active)
4840                 return 0;
4841
4842         if (conn_state->commit &&
4843             !try_wait_for_completion(&conn_state->commit->hw_done))
4844                 return 0;
4845
4846         if (!intel_dp_needs_link_retrain(intel_dp))
4847                 return 0;
4848
4849         /* Suppress underruns caused by re-training */
4850         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4851         if (crtc_state->has_pch_encoder)
4852                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4853                                                       intel_crtc_pch_transcoder(crtc), false);
4854
4855         intel_dp_start_link_train(intel_dp);
4856         intel_dp_stop_link_train(intel_dp);
4857
4858         /* Keep underrun reporting disabled until things are stable */
4859         intel_wait_for_vblank(dev_priv, crtc->pipe);
4860
4861         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4862         if (crtc_state->has_pch_encoder)
4863                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4864                                                       intel_crtc_pch_transcoder(crtc), true);
4865
4866         return 0;
4867 }
4868
4869 /*
4870  * If display is now connected check links status,
4871  * there has been known issues of link loss triggering
4872  * long pulse.
4873  *
4874  * Some sinks (eg. ASUS PB287Q) seem to perform some
4875  * weird HPD ping pong during modesets. So we can apparently
4876  * end up with HPD going low during a modeset, and then
4877  * going back up soon after. And once that happens we must
4878  * retrain the link to get a picture. That's in case no
4879  * userspace component reacted to intermittent HPD dip.
4880  */
4881 static bool intel_dp_hotplug(struct intel_encoder *encoder,
4882                              struct intel_connector *connector)
4883 {
4884         struct drm_modeset_acquire_ctx ctx;
4885         bool changed;
4886         int ret;
4887
4888         changed = intel_encoder_hotplug(encoder, connector);
4889
4890         drm_modeset_acquire_init(&ctx, 0);
4891
4892         for (;;) {
4893                 ret = intel_dp_retrain_link(encoder, &ctx);
4894
4895                 if (ret == -EDEADLK) {
4896                         drm_modeset_backoff(&ctx);
4897                         continue;
4898                 }
4899
4900                 break;
4901         }
4902
4903         drm_modeset_drop_locks(&ctx);
4904         drm_modeset_acquire_fini(&ctx);
4905         WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4906
4907         return changed;
4908 }
4909
4910 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4911 {
4912         u8 val;
4913
4914         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4915                 return;
4916
4917         if (drm_dp_dpcd_readb(&intel_dp->aux,
4918                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4919                 return;
4920
4921         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4922
4923         if (val & DP_AUTOMATED_TEST_REQUEST)
4924                 intel_dp_handle_test_request(intel_dp);
4925
4926         if (val & DP_CP_IRQ)
4927                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4928
4929         if (val & DP_SINK_SPECIFIC_IRQ)
4930                 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4931 }
4932
4933 /*
4934  * According to DP spec
4935  * 5.1.2:
4936  *  1. Read DPCD
4937  *  2. Configure link according to Receiver Capabilities
4938  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4939  *  4. Check link status on receipt of hot-plug interrupt
4940  *
4941  * intel_dp_short_pulse -  handles short pulse interrupts
4942  * when full detection is not required.
4943  * Returns %true if short pulse is handled and full detection
4944  * is NOT required and %false otherwise.
4945  */
4946 static bool
4947 intel_dp_short_pulse(struct intel_dp *intel_dp)
4948 {
4949         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4950         u8 old_sink_count = intel_dp->sink_count;
4951         bool ret;
4952
4953         /*
4954          * Clearing compliance test variables to allow capturing
4955          * of values for next automated test request.
4956          */
4957         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4958
4959         /*
4960          * Now read the DPCD to see if it's actually running
4961          * If the current value of sink count doesn't match with
4962          * the value that was stored earlier or dpcd read failed
4963          * we need to do full detection
4964          */
4965         ret = intel_dp_get_dpcd(intel_dp);
4966
4967         if ((old_sink_count != intel_dp->sink_count) || !ret) {
4968                 /* No need to proceed if we are going to do full detect */
4969                 return false;
4970         }
4971
4972         intel_dp_check_service_irq(intel_dp);
4973
4974         /* Handle CEC interrupts, if any */
4975         drm_dp_cec_irq(&intel_dp->aux);
4976
4977         /* defer to the hotplug work for link retraining if needed */
4978         if (intel_dp_needs_link_retrain(intel_dp))
4979                 return false;
4980
4981         intel_psr_short_pulse(intel_dp);
4982
4983         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4984                 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4985                 /* Send a Hotplug Uevent to userspace to start modeset */
4986                 drm_kms_helper_hotplug_event(&dev_priv->drm);
4987         }
4988
4989         return true;
4990 }
4991
4992 /* XXX this is probably wrong for multiple downstream ports */
4993 static enum drm_connector_status
4994 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4995 {
4996         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4997         u8 *dpcd = intel_dp->dpcd;
4998         u8 type;
4999
5000         if (WARN_ON(intel_dp_is_edp(intel_dp)))
5001                 return connector_status_connected;
5002
5003         if (lspcon->active)
5004                 lspcon_resume(lspcon);
5005
5006         if (!intel_dp_get_dpcd(intel_dp))
5007                 return connector_status_disconnected;
5008
5009         /* if there's no downstream port, we're done */
5010         if (!drm_dp_is_branch(dpcd))
5011                 return connector_status_connected;
5012
5013         /* If we're HPD-aware, SINK_COUNT changes dynamically */
5014         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5015             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5016
5017                 return intel_dp->sink_count ?
5018                 connector_status_connected : connector_status_disconnected;
5019         }
5020
5021         if (intel_dp_can_mst(intel_dp))
5022                 return connector_status_connected;
5023
5024         /* If no HPD, poke DDC gently */
5025         if (drm_probe_ddc(&intel_dp->aux.ddc))
5026                 return connector_status_connected;
5027
5028         /* Well we tried, say unknown for unreliable port types */
5029         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5030                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5031                 if (type == DP_DS_PORT_TYPE_VGA ||
5032                     type == DP_DS_PORT_TYPE_NON_EDID)
5033                         return connector_status_unknown;
5034         } else {
5035                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5036                         DP_DWN_STRM_PORT_TYPE_MASK;
5037                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5038                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
5039                         return connector_status_unknown;
5040         }
5041
5042         /* Anything else is out of spec, warn and ignore */
5043         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5044         return connector_status_disconnected;
5045 }
5046
5047 static enum drm_connector_status
5048 edp_detect(struct intel_dp *intel_dp)
5049 {
5050         return connector_status_connected;
5051 }
5052
5053 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5054 {
5055         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5056         u32 bit;
5057
5058         switch (encoder->hpd_pin) {
5059         case HPD_PORT_B:
5060                 bit = SDE_PORTB_HOTPLUG;
5061                 break;
5062         case HPD_PORT_C:
5063                 bit = SDE_PORTC_HOTPLUG;
5064                 break;
5065         case HPD_PORT_D:
5066                 bit = SDE_PORTD_HOTPLUG;
5067                 break;
5068         default:
5069                 MISSING_CASE(encoder->hpd_pin);
5070                 return false;
5071         }
5072
5073         return I915_READ(SDEISR) & bit;
5074 }
5075
5076 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5077 {
5078         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5079         u32 bit;
5080
5081         switch (encoder->hpd_pin) {
5082         case HPD_PORT_B:
5083                 bit = SDE_PORTB_HOTPLUG_CPT;
5084                 break;
5085         case HPD_PORT_C:
5086                 bit = SDE_PORTC_HOTPLUG_CPT;
5087                 break;
5088         case HPD_PORT_D:
5089                 bit = SDE_PORTD_HOTPLUG_CPT;
5090                 break;
5091         default:
5092                 MISSING_CASE(encoder->hpd_pin);
5093                 return false;
5094         }
5095
5096         return I915_READ(SDEISR) & bit;
5097 }
5098
5099 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5100 {
5101         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5102         u32 bit;
5103
5104         switch (encoder->hpd_pin) {
5105         case HPD_PORT_A:
5106                 bit = SDE_PORTA_HOTPLUG_SPT;
5107                 break;
5108         case HPD_PORT_E:
5109                 bit = SDE_PORTE_HOTPLUG_SPT;
5110                 break;
5111         default:
5112                 return cpt_digital_port_connected(encoder);
5113         }
5114
5115         return I915_READ(SDEISR) & bit;
5116 }
5117
5118 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5119 {
5120         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5121         u32 bit;
5122
5123         switch (encoder->hpd_pin) {
5124         case HPD_PORT_B:
5125                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5126                 break;
5127         case HPD_PORT_C:
5128                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5129                 break;
5130         case HPD_PORT_D:
5131                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5132                 break;
5133         default:
5134                 MISSING_CASE(encoder->hpd_pin);
5135                 return false;
5136         }
5137
5138         return I915_READ(PORT_HOTPLUG_STAT) & bit;
5139 }
5140
5141 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5142 {
5143         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5144         u32 bit;
5145
5146         switch (encoder->hpd_pin) {
5147         case HPD_PORT_B:
5148                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5149                 break;
5150         case HPD_PORT_C:
5151                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5152                 break;
5153         case HPD_PORT_D:
5154                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5155                 break;
5156         default:
5157                 MISSING_CASE(encoder->hpd_pin);
5158                 return false;
5159         }
5160
5161         return I915_READ(PORT_HOTPLUG_STAT) & bit;
5162 }
5163
5164 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5165 {
5166         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5167
5168         if (encoder->hpd_pin == HPD_PORT_A)
5169                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5170         else
5171                 return ibx_digital_port_connected(encoder);
5172 }
5173
5174 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5175 {
5176         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5177
5178         if (encoder->hpd_pin == HPD_PORT_A)
5179                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5180         else
5181                 return cpt_digital_port_connected(encoder);
5182 }
5183
5184 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5185 {
5186         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5187
5188         if (encoder->hpd_pin == HPD_PORT_A)
5189                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5190         else
5191                 return cpt_digital_port_connected(encoder);
5192 }
5193
5194 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5195 {
5196         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5197
5198         if (encoder->hpd_pin == HPD_PORT_A)
5199                 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5200         else
5201                 return cpt_digital_port_connected(encoder);
5202 }
5203
5204 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5205 {
5206         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5207         u32 bit;
5208
5209         switch (encoder->hpd_pin) {
5210         case HPD_PORT_A:
5211                 bit = BXT_DE_PORT_HP_DDIA;
5212                 break;
5213         case HPD_PORT_B:
5214                 bit = BXT_DE_PORT_HP_DDIB;
5215                 break;
5216         case HPD_PORT_C:
5217                 bit = BXT_DE_PORT_HP_DDIC;
5218                 break;
5219         default:
5220                 MISSING_CASE(encoder->hpd_pin);
5221                 return false;
5222         }
5223
5224         return I915_READ(GEN8_DE_PORT_ISR) & bit;
5225 }
5226
5227 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5228                                      struct intel_digital_port *intel_dig_port)
5229 {
5230         enum port port = intel_dig_port->base.port;
5231
5232         return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5233 }
5234
5235 static const char *tc_type_name(enum tc_port_type type)
5236 {
5237         static const char * const names[] = {
5238                 [TC_PORT_UNKNOWN] = "unknown",
5239                 [TC_PORT_LEGACY] = "legacy",
5240                 [TC_PORT_TYPEC] = "typec",
5241                 [TC_PORT_TBT] = "tbt",
5242         };
5243
5244         if (WARN_ON(type >= ARRAY_SIZE(names)))
5245                 type = TC_PORT_UNKNOWN;
5246
5247         return names[type];
5248 }
5249
5250 static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5251                                     struct intel_digital_port *intel_dig_port,
5252                                     bool is_legacy, bool is_typec, bool is_tbt)
5253 {
5254         enum port port = intel_dig_port->base.port;
5255         enum tc_port_type old_type = intel_dig_port->tc_type;
5256
5257         WARN_ON(is_legacy + is_typec + is_tbt != 1);
5258
5259         if (is_legacy)
5260                 intel_dig_port->tc_type = TC_PORT_LEGACY;
5261         else if (is_typec)
5262                 intel_dig_port->tc_type = TC_PORT_TYPEC;
5263         else if (is_tbt)
5264                 intel_dig_port->tc_type = TC_PORT_TBT;
5265         else
5266                 return;
5267
5268         /* Types are not supposed to be changed at runtime. */
5269         WARN_ON(old_type != TC_PORT_UNKNOWN &&
5270                 old_type != intel_dig_port->tc_type);
5271
5272         if (old_type != intel_dig_port->tc_type)
5273                 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
5274                               tc_type_name(intel_dig_port->tc_type));
5275 }
5276
5277 /*
5278  * This function implements the first part of the Connect Flow described by our
5279  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5280  * lanes, EDID, etc) is done as needed in the typical places.
5281  *
5282  * Unlike the other ports, type-C ports are not available to use as soon as we
5283  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5284  * display, USB, etc. As a result, handshaking through FIA is required around
5285  * connect and disconnect to cleanly transfer ownership with the controller and
5286  * set the type-C power state.
5287  *
5288  * We could opt to only do the connect flow when we actually try to use the AUX
5289  * channels or do a modeset, then immediately run the disconnect flow after
5290  * usage, but there are some implications on this for a dynamic environment:
5291  * things may go away or change behind our backs. So for now our driver is
5292  * always trying to acquire ownership of the controller as soon as it gets an
5293  * interrupt (or polls state and sees a port is connected) and only gives it
5294  * back when it sees a disconnect. Implementation of a more fine-grained model
5295  * will require a lot of coordination with user space and thorough testing for
5296  * the extra possible cases.
5297  */
5298 static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5299                                struct intel_digital_port *dig_port)
5300 {
5301         enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5302         u32 val;
5303
5304         if (dig_port->tc_type != TC_PORT_LEGACY &&
5305             dig_port->tc_type != TC_PORT_TYPEC)
5306                 return true;
5307
5308         val = I915_READ(PORT_TX_DFLEXDPPMS);
5309         if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5310                 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
5311                 WARN_ON(dig_port->tc_legacy_port);
5312                 return false;
5313         }
5314
5315         /*
5316          * This function may be called many times in a row without an HPD event
5317          * in between, so try to avoid the write when we can.
5318          */
5319         val = I915_READ(PORT_TX_DFLEXDPCSSS);
5320         if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
5321                 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5322                 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5323         }
5324
5325         /*
5326          * Now we have to re-check the live state, in case the port recently
5327          * became disconnected. Not necessary for legacy mode.
5328          */
5329         if (dig_port->tc_type == TC_PORT_TYPEC &&
5330             !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
5331                 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
5332                 icl_tc_phy_disconnect(dev_priv, dig_port);
5333                 return false;
5334         }
5335
5336         return true;
5337 }
5338
5339 /*
5340  * See the comment at the connect function. This implements the Disconnect
5341  * Flow.
5342  */
5343 void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5344                            struct intel_digital_port *dig_port)
5345 {
5346         enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5347
5348         if (dig_port->tc_type == TC_PORT_UNKNOWN)
5349                 return;
5350
5351         /*
5352          * TBT disconnection flow is read the live status, what was done in
5353          * caller.
5354          */
5355         if (dig_port->tc_type == TC_PORT_TYPEC ||
5356             dig_port->tc_type == TC_PORT_LEGACY) {
5357                 u32 val;
5358
5359                 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5360                 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5361                 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5362         }
5363
5364         DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5365                       port_name(dig_port->base.port),
5366                       tc_type_name(dig_port->tc_type));
5367
5368         dig_port->tc_type = TC_PORT_UNKNOWN;
5369 }
5370
5371 /*
5372  * The type-C ports are different because even when they are connected, they may
5373  * not be available/usable by the graphics driver: see the comment on
5374  * icl_tc_phy_connect(). So in our driver instead of adding the additional
5375  * concept of "usable" and make everything check for "connected and usable" we
5376  * define a port as "connected" when it is not only connected, but also when it
5377  * is usable by the rest of the driver. That maintains the old assumption that
5378  * connected ports are usable, and avoids exposing to the users objects they
5379  * can't really use.
5380  */
5381 static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5382                                   struct intel_digital_port *intel_dig_port)
5383 {
5384         enum port port = intel_dig_port->base.port;
5385         enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
5386         bool is_legacy, is_typec, is_tbt;
5387         u32 dpsp;
5388
5389         /*
5390          * Complain if we got a legacy port HPD, but VBT didn't mark the port as
5391          * legacy. Treat the port as legacy from now on.
5392          */
5393         if (!intel_dig_port->tc_legacy_port &&
5394             I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
5395                 DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
5396                           port_name(port));
5397                 intel_dig_port->tc_legacy_port = true;
5398         }
5399         is_legacy = intel_dig_port->tc_legacy_port;
5400
5401         /*
5402          * The spec says we shouldn't be using the ISR bits for detecting
5403          * between TC and TBT. We should use DFLEXDPSP.
5404          */
5405         dpsp = I915_READ(PORT_TX_DFLEXDPSP);
5406         is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
5407         is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
5408
5409         if (!is_legacy && !is_typec && !is_tbt) {
5410                 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
5411
5412                 return false;
5413         }
5414
5415         icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
5416                                 is_tbt);
5417
5418         if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
5419                 return false;
5420
5421         return true;
5422 }
5423
5424 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5425 {
5426         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5427         struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5428
5429         if (intel_port_is_combophy(dev_priv, encoder->port))
5430                 return icl_combo_port_connected(dev_priv, dig_port);
5431         else if (intel_port_is_tc(dev_priv, encoder->port))
5432                 return icl_tc_port_connected(dev_priv, dig_port);
5433         else
5434                 MISSING_CASE(encoder->hpd_pin);
5435
5436         return false;
5437 }
5438
5439 /*
5440  * intel_digital_port_connected - is the specified port connected?
5441  * @encoder: intel_encoder
5442  *
5443  * In cases where there's a connector physically connected but it can't be used
5444  * by our hardware we also return false, since the rest of the driver should
5445  * pretty much treat the port as disconnected. This is relevant for type-C
5446  * (starting on ICL) where there's ownership involved.
5447  *
5448  * Return %true if port is connected, %false otherwise.
5449  */
5450 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5451 {
5452         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5453
5454         if (HAS_GMCH(dev_priv)) {
5455                 if (IS_GM45(dev_priv))
5456                         return gm45_digital_port_connected(encoder);
5457                 else
5458                         return g4x_digital_port_connected(encoder);
5459         }
5460
5461         if (INTEL_GEN(dev_priv) >= 11)
5462                 return icl_digital_port_connected(encoder);
5463         else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5464                 return spt_digital_port_connected(encoder);
5465         else if (IS_GEN9_LP(dev_priv))
5466                 return bxt_digital_port_connected(encoder);
5467         else if (IS_GEN(dev_priv, 8))
5468                 return bdw_digital_port_connected(encoder);
5469         else if (IS_GEN(dev_priv, 7))
5470                 return ivb_digital_port_connected(encoder);
5471         else if (IS_GEN(dev_priv, 6))
5472                 return snb_digital_port_connected(encoder);
5473         else if (IS_GEN(dev_priv, 5))
5474                 return ilk_digital_port_connected(encoder);
5475
5476         MISSING_CASE(INTEL_GEN(dev_priv));
5477         return false;
5478 }
5479
5480 bool intel_digital_port_connected(struct intel_encoder *encoder)
5481 {
5482         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5483         bool is_connected = false;
5484         intel_wakeref_t wakeref;
5485
5486         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5487                 is_connected = __intel_digital_port_connected(encoder);
5488
5489         return is_connected;
5490 }
5491
5492 static struct edid *
5493 intel_dp_get_edid(struct intel_dp *intel_dp)
5494 {
5495         struct intel_connector *intel_connector = intel_dp->attached_connector;
5496
5497         /* use cached edid if we have one */
5498         if (intel_connector->edid) {
5499                 /* invalid edid */
5500                 if (IS_ERR(intel_connector->edid))
5501                         return NULL;
5502
5503                 return drm_edid_duplicate(intel_connector->edid);
5504         } else
5505                 return drm_get_edid(&intel_connector->base,
5506                                     &intel_dp->aux.ddc);
5507 }
5508
5509 static void
5510 intel_dp_set_edid(struct intel_dp *intel_dp)
5511 {
5512         struct intel_connector *intel_connector = intel_dp->attached_connector;
5513         struct edid *edid;
5514
5515         intel_dp_unset_edid(intel_dp);
5516         edid = intel_dp_get_edid(intel_dp);
5517         intel_connector->detect_edid = edid;
5518
5519         intel_dp->has_audio = drm_detect_monitor_audio(edid);
5520         drm_dp_cec_set_edid(&intel_dp->aux, edid);
5521 }
5522
5523 static void
5524 intel_dp_unset_edid(struct intel_dp *intel_dp)
5525 {
5526         struct intel_connector *intel_connector = intel_dp->attached_connector;
5527
5528         drm_dp_cec_unset_edid(&intel_dp->aux);
5529         kfree(intel_connector->detect_edid);
5530         intel_connector->detect_edid = NULL;
5531
5532         intel_dp->has_audio = false;
5533 }
5534
5535 static int
5536 intel_dp_detect(struct drm_connector *connector,
5537                 struct drm_modeset_acquire_ctx *ctx,
5538                 bool force)
5539 {
5540         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5541         struct intel_dp *intel_dp = intel_attached_dp(connector);
5542         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5543         struct intel_encoder *encoder = &dig_port->base;
5544         enum drm_connector_status status;
5545
5546         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5547                       connector->base.id, connector->name);
5548         WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5549
5550         /* Can't disconnect eDP */
5551         if (intel_dp_is_edp(intel_dp))
5552                 status = edp_detect(intel_dp);
5553         else if (intel_digital_port_connected(encoder))
5554                 status = intel_dp_detect_dpcd(intel_dp);
5555         else
5556                 status = connector_status_disconnected;
5557
5558         if (status == connector_status_disconnected) {
5559                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5560                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5561
5562                 if (intel_dp->is_mst) {
5563                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5564                                       intel_dp->is_mst,
5565                                       intel_dp->mst_mgr.mst_state);
5566                         intel_dp->is_mst = false;
5567                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5568                                                         intel_dp->is_mst);
5569                 }
5570
5571                 goto out;
5572         }
5573
5574         if (intel_dp->reset_link_params) {
5575                 /* Initial max link lane count */
5576                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5577
5578                 /* Initial max link rate */
5579                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5580
5581                 intel_dp->reset_link_params = false;
5582         }
5583
5584         intel_dp_print_rates(intel_dp);
5585
5586         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5587         if (INTEL_GEN(dev_priv) >= 11)
5588                 intel_dp_get_dsc_sink_cap(intel_dp);
5589
5590         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5591                          drm_dp_is_branch(intel_dp->dpcd));
5592
5593         intel_dp_configure_mst(intel_dp);
5594
5595         if (intel_dp->is_mst) {
5596                 /*
5597                  * If we are in MST mode then this connector
5598                  * won't appear connected or have anything
5599                  * with EDID on it
5600                  */
5601                 status = connector_status_disconnected;
5602                 goto out;
5603         }
5604
5605         /*
5606          * Some external monitors do not signal loss of link synchronization
5607          * with an IRQ_HPD, so force a link status check.
5608          */
5609         if (!intel_dp_is_edp(intel_dp)) {
5610                 int ret;
5611
5612                 ret = intel_dp_retrain_link(encoder, ctx);
5613                 if (ret)
5614                         return ret;
5615         }
5616
5617         /*
5618          * Clearing NACK and defer counts to get their exact values
5619          * while reading EDID which are required by Compliance tests
5620          * 4.2.2.4 and 4.2.2.5
5621          */
5622         intel_dp->aux.i2c_nack_count = 0;
5623         intel_dp->aux.i2c_defer_count = 0;
5624
5625         intel_dp_set_edid(intel_dp);
5626         if (intel_dp_is_edp(intel_dp) ||
5627             to_intel_connector(connector)->detect_edid)
5628                 status = connector_status_connected;
5629
5630         intel_dp_check_service_irq(intel_dp);
5631
5632 out:
5633         if (status != connector_status_connected && !intel_dp->is_mst)
5634                 intel_dp_unset_edid(intel_dp);
5635
5636         return status;
5637 }
5638
5639 static void
5640 intel_dp_force(struct drm_connector *connector)
5641 {
5642         struct intel_dp *intel_dp = intel_attached_dp(connector);
5643         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5644         struct intel_encoder *intel_encoder = &dig_port->base;
5645         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5646         enum intel_display_power_domain aux_domain =
5647                 intel_aux_power_domain(dig_port);
5648         intel_wakeref_t wakeref;
5649
5650         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5651                       connector->base.id, connector->name);
5652         intel_dp_unset_edid(intel_dp);
5653
5654         if (connector->status != connector_status_connected)
5655                 return;
5656
5657         wakeref = intel_display_power_get(dev_priv, aux_domain);
5658
5659         intel_dp_set_edid(intel_dp);
5660
5661         intel_display_power_put(dev_priv, aux_domain, wakeref);
5662 }
5663
5664 static int intel_dp_get_modes(struct drm_connector *connector)
5665 {
5666         struct intel_connector *intel_connector = to_intel_connector(connector);
5667         struct edid *edid;
5668
5669         edid = intel_connector->detect_edid;
5670         if (edid) {
5671                 int ret = intel_connector_update_modes(connector, edid);
5672                 if (ret)
5673                         return ret;
5674         }
5675
5676         /* if eDP has no EDID, fall back to fixed mode */
5677         if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5678             intel_connector->panel.fixed_mode) {
5679                 struct drm_display_mode *mode;
5680
5681                 mode = drm_mode_duplicate(connector->dev,
5682                                           intel_connector->panel.fixed_mode);
5683                 if (mode) {
5684                         drm_mode_probed_add(connector, mode);
5685                         return 1;
5686                 }
5687         }
5688
5689         return 0;
5690 }
5691
5692 static int
5693 intel_dp_connector_register(struct drm_connector *connector)
5694 {
5695         struct intel_dp *intel_dp = intel_attached_dp(connector);
5696         struct drm_device *dev = connector->dev;
5697         int ret;
5698
5699         ret = intel_connector_register(connector);
5700         if (ret)
5701                 return ret;
5702
5703         i915_debugfs_connector_add(connector);
5704
5705         DRM_DEBUG_KMS("registering %s bus for %s\n",
5706                       intel_dp->aux.name, connector->kdev->kobj.name);
5707
5708         intel_dp->aux.dev = connector->kdev;
5709         ret = drm_dp_aux_register(&intel_dp->aux);
5710         if (!ret)
5711                 drm_dp_cec_register_connector(&intel_dp->aux,
5712                                               connector->name, dev->dev);
5713         return ret;
5714 }
5715
5716 static void
5717 intel_dp_connector_unregister(struct drm_connector *connector)
5718 {
5719         struct intel_dp *intel_dp = intel_attached_dp(connector);
5720
5721         drm_dp_cec_unregister_connector(&intel_dp->aux);
5722         drm_dp_aux_unregister(&intel_dp->aux);
5723         intel_connector_unregister(connector);
5724 }
5725
5726 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5727 {
5728         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5729         struct intel_dp *intel_dp = &intel_dig_port->dp;
5730
5731         intel_dp_mst_encoder_cleanup(intel_dig_port);
5732         if (intel_dp_is_edp(intel_dp)) {
5733                 intel_wakeref_t wakeref;
5734
5735                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5736                 /*
5737                  * vdd might still be enabled do to the delayed vdd off.
5738                  * Make sure vdd is actually turned off here.
5739                  */
5740                 with_pps_lock(intel_dp, wakeref)
5741                         edp_panel_vdd_off_sync(intel_dp);
5742
5743                 if (intel_dp->edp_notifier.notifier_call) {
5744                         unregister_reboot_notifier(&intel_dp->edp_notifier);
5745                         intel_dp->edp_notifier.notifier_call = NULL;
5746                 }
5747         }
5748
5749         intel_dp_aux_fini(intel_dp);
5750 }
5751
5752 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5753 {
5754         intel_dp_encoder_flush_work(encoder);
5755
5756         drm_encoder_cleanup(encoder);
5757         kfree(enc_to_dig_port(encoder));
5758 }
5759
5760 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5761 {
5762         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5763         intel_wakeref_t wakeref;
5764
5765         if (!intel_dp_is_edp(intel_dp))
5766                 return;
5767
5768         /*
5769          * vdd might still be enabled do to the delayed vdd off.
5770          * Make sure vdd is actually turned off here.
5771          */
5772         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5773         with_pps_lock(intel_dp, wakeref)
5774                 edp_panel_vdd_off_sync(intel_dp);
5775 }
5776
5777 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5778 {
5779         long ret;
5780
5781 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5782         ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5783                                                msecs_to_jiffies(timeout));
5784
5785         if (!ret)
5786                 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5787 }
5788
5789 static
5790 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5791                                 u8 *an)
5792 {
5793         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5794         static const struct drm_dp_aux_msg msg = {
5795                 .request = DP_AUX_NATIVE_WRITE,
5796                 .address = DP_AUX_HDCP_AKSV,
5797                 .size = DRM_HDCP_KSV_LEN,
5798         };
5799         u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5800         ssize_t dpcd_ret;
5801         int ret;
5802
5803         /* Output An first, that's easy */
5804         dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5805                                      an, DRM_HDCP_AN_LEN);
5806         if (dpcd_ret != DRM_HDCP_AN_LEN) {
5807                 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5808                               dpcd_ret);
5809                 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5810         }
5811
5812         /*
5813          * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5814          * order to get it on the wire, we need to create the AUX header as if
5815          * we were writing the data, and then tickle the hardware to output the
5816          * data once the header is sent out.
5817          */
5818         intel_dp_aux_header(txbuf, &msg);
5819
5820         ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5821                                 rxbuf, sizeof(rxbuf),
5822                                 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5823         if (ret < 0) {
5824                 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5825                 return ret;
5826         } else if (ret == 0) {
5827                 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5828                 return -EIO;
5829         }
5830
5831         reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5832         if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5833                 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5834                               reply);
5835                 return -EIO;
5836         }
5837         return 0;
5838 }
5839
5840 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5841                                    u8 *bksv)
5842 {
5843         ssize_t ret;
5844         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5845                                DRM_HDCP_KSV_LEN);
5846         if (ret != DRM_HDCP_KSV_LEN) {
5847                 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5848                 return ret >= 0 ? -EIO : ret;
5849         }
5850         return 0;
5851 }
5852
5853 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5854                                       u8 *bstatus)
5855 {
5856         ssize_t ret;
5857         /*
5858          * For some reason the HDMI and DP HDCP specs call this register
5859          * definition by different names. In the HDMI spec, it's called BSTATUS,
5860          * but in DP it's called BINFO.
5861          */
5862         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5863                                bstatus, DRM_HDCP_BSTATUS_LEN);
5864         if (ret != DRM_HDCP_BSTATUS_LEN) {
5865                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5866                 return ret >= 0 ? -EIO : ret;
5867         }
5868         return 0;
5869 }
5870
5871 static
5872 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5873                              u8 *bcaps)
5874 {
5875         ssize_t ret;
5876
5877         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5878                                bcaps, 1);
5879         if (ret != 1) {
5880                 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5881                 return ret >= 0 ? -EIO : ret;
5882         }
5883
5884         return 0;
5885 }
5886
5887 static
5888 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5889                                    bool *repeater_present)
5890 {
5891         ssize_t ret;
5892         u8 bcaps;
5893
5894         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5895         if (ret)
5896                 return ret;
5897
5898         *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5899         return 0;
5900 }
5901
5902 static
5903 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5904                                 u8 *ri_prime)
5905 {
5906         ssize_t ret;
5907         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5908                                ri_prime, DRM_HDCP_RI_LEN);
5909         if (ret != DRM_HDCP_RI_LEN) {
5910                 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5911                 return ret >= 0 ? -EIO : ret;
5912         }
5913         return 0;
5914 }
5915
5916 static
5917 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5918                                  bool *ksv_ready)
5919 {
5920         ssize_t ret;
5921         u8 bstatus;
5922         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5923                                &bstatus, 1);
5924         if (ret != 1) {
5925                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5926                 return ret >= 0 ? -EIO : ret;
5927         }
5928         *ksv_ready = bstatus & DP_BSTATUS_READY;
5929         return 0;
5930 }
5931
5932 static
5933 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5934                                 int num_downstream, u8 *ksv_fifo)
5935 {
5936         ssize_t ret;
5937         int i;
5938
5939         /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5940         for (i = 0; i < num_downstream; i += 3) {
5941                 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5942                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5943                                        DP_AUX_HDCP_KSV_FIFO,
5944                                        ksv_fifo + i * DRM_HDCP_KSV_LEN,
5945                                        len);
5946                 if (ret != len) {
5947                         DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5948                                       i, ret);
5949                         return ret >= 0 ? -EIO : ret;
5950                 }
5951         }
5952         return 0;
5953 }
5954
5955 static
5956 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5957                                     int i, u32 *part)
5958 {
5959         ssize_t ret;
5960
5961         if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5962                 return -EINVAL;
5963
5964         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5965                                DP_AUX_HDCP_V_PRIME(i), part,
5966                                DRM_HDCP_V_PRIME_PART_LEN);
5967         if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5968                 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5969                 return ret >= 0 ? -EIO : ret;
5970         }
5971         return 0;
5972 }
5973
5974 static
5975 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5976                                     bool enable)
5977 {
5978         /* Not used for single stream DisplayPort setups */
5979         return 0;
5980 }
5981
5982 static
5983 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5984 {
5985         ssize_t ret;
5986         u8 bstatus;
5987
5988         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5989                                &bstatus, 1);
5990         if (ret != 1) {
5991                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5992                 return false;
5993         }
5994
5995         return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5996 }
5997
5998 static
5999 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
6000                           bool *hdcp_capable)
6001 {
6002         ssize_t ret;
6003         u8 bcaps;
6004
6005         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
6006         if (ret)
6007                 return ret;
6008
6009         *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
6010         return 0;
6011 }
6012
6013 struct hdcp2_dp_errata_stream_type {
6014         u8      msg_id;
6015         u8      stream_type;
6016 } __packed;
6017
6018 static struct hdcp2_dp_msg_data {
6019         u8 msg_id;
6020         u32 offset;
6021         bool msg_detectable;
6022         u32 timeout;
6023         u32 timeout2; /* Added for non_paired situation */
6024         } hdcp2_msg_data[] = {
6025                 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
6026                 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
6027                                 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
6028                 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
6029                                 false, 0, 0},
6030                 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
6031                                 false, 0, 0},
6032                 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
6033                                 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
6034                                 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
6035                 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
6036                                 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
6037                                 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
6038                 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
6039                 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
6040                                 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
6041                 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
6042                                 0, 0},
6043                 {HDCP_2_2_REP_SEND_RECVID_LIST,
6044                                 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
6045                                 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
6046                 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
6047                                 0, 0},
6048                 {HDCP_2_2_REP_STREAM_MANAGE,
6049                                 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
6050                                 0, 0},
6051                 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
6052                                 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
6053 /* local define to shovel this through the write_2_2 interface */
6054 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE  50
6055                 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
6056                                 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
6057                                 0, 0},
6058                 };
6059
6060 static inline
6061 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
6062                                   u8 *rx_status)
6063 {
6064         ssize_t ret;
6065
6066         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6067                                DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
6068                                HDCP_2_2_DP_RXSTATUS_LEN);
6069         if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
6070                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
6071                 return ret >= 0 ? -EIO : ret;
6072         }
6073
6074         return 0;
6075 }
6076
6077 static
6078 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
6079                                   u8 msg_id, bool *msg_ready)
6080 {
6081         u8 rx_status;
6082         int ret;
6083
6084         *msg_ready = false;
6085         ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6086         if (ret < 0)
6087                 return ret;
6088
6089         switch (msg_id) {
6090         case HDCP_2_2_AKE_SEND_HPRIME:
6091                 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
6092                         *msg_ready = true;
6093                 break;
6094         case HDCP_2_2_AKE_SEND_PAIRING_INFO:
6095                 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
6096                         *msg_ready = true;
6097                 break;
6098         case HDCP_2_2_REP_SEND_RECVID_LIST:
6099                 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6100                         *msg_ready = true;
6101                 break;
6102         default:
6103                 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
6104                 return -EINVAL;
6105         }
6106
6107         return 0;
6108 }
6109
6110 static ssize_t
6111 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
6112                             struct hdcp2_dp_msg_data *hdcp2_msg_data)
6113 {
6114         struct intel_dp *dp = &intel_dig_port->dp;
6115         struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6116         u8 msg_id = hdcp2_msg_data->msg_id;
6117         int ret, timeout;
6118         bool msg_ready = false;
6119
6120         if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
6121                 timeout = hdcp2_msg_data->timeout2;
6122         else
6123                 timeout = hdcp2_msg_data->timeout;
6124
6125         /*
6126          * There is no way to detect the CERT, LPRIME and STREAM_READY
6127          * availability. So Wait for timeout and read the msg.
6128          */
6129         if (!hdcp2_msg_data->msg_detectable) {
6130                 mdelay(timeout);
6131                 ret = 0;
6132         } else {
6133                 /*
6134                  * As we want to check the msg availability at timeout, Ignoring
6135                  * the timeout at wait for CP_IRQ.
6136                  */
6137                 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6138                 ret = hdcp2_detect_msg_availability(intel_dig_port,
6139                                                     msg_id, &msg_ready);
6140                 if (!msg_ready)
6141                         ret = -ETIMEDOUT;
6142         }
6143
6144         if (ret)
6145                 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6146                               hdcp2_msg_data->msg_id, ret, timeout);
6147
6148         return ret;
6149 }
6150
6151 static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6152 {
6153         int i;
6154
6155         for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
6156                 if (hdcp2_msg_data[i].msg_id == msg_id)
6157                         return &hdcp2_msg_data[i];
6158
6159         return NULL;
6160 }
6161
6162 static
6163 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6164                              void *buf, size_t size)
6165 {
6166         struct intel_dp *dp = &intel_dig_port->dp;
6167         struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6168         unsigned int offset;
6169         u8 *byte = buf;
6170         ssize_t ret, bytes_to_write, len;
6171         struct hdcp2_dp_msg_data *hdcp2_msg_data;
6172
6173         hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6174         if (!hdcp2_msg_data)
6175                 return -EINVAL;
6176
6177         offset = hdcp2_msg_data->offset;
6178
6179         /* No msg_id in DP HDCP2.2 msgs */
6180         bytes_to_write = size - 1;
6181         byte++;
6182
6183         hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6184
6185         while (bytes_to_write) {
6186                 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6187                                 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6188
6189                 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6190                                         offset, (void *)byte, len);
6191                 if (ret < 0)
6192                         return ret;
6193
6194                 bytes_to_write -= ret;
6195                 byte += ret;
6196                 offset += ret;
6197         }
6198
6199         return size;
6200 }
6201
6202 static
6203 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6204 {
6205         u8 rx_info[HDCP_2_2_RXINFO_LEN];
6206         u32 dev_cnt;
6207         ssize_t ret;
6208
6209         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6210                                DP_HDCP_2_2_REG_RXINFO_OFFSET,
6211                                (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6212         if (ret != HDCP_2_2_RXINFO_LEN)
6213                 return ret >= 0 ? -EIO : ret;
6214
6215         dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6216                    HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6217
6218         if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6219                 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6220
6221         ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6222                 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6223                 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6224
6225         return ret;
6226 }
6227
6228 static
6229 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6230                             u8 msg_id, void *buf, size_t size)
6231 {
6232         unsigned int offset;
6233         u8 *byte = buf;
6234         ssize_t ret, bytes_to_recv, len;
6235         struct hdcp2_dp_msg_data *hdcp2_msg_data;
6236
6237         hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6238         if (!hdcp2_msg_data)
6239                 return -EINVAL;
6240         offset = hdcp2_msg_data->offset;
6241
6242         ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6243         if (ret < 0)
6244                 return ret;
6245
6246         if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6247                 ret = get_receiver_id_list_size(intel_dig_port);
6248                 if (ret < 0)
6249                         return ret;
6250
6251                 size = ret;
6252         }
6253         bytes_to_recv = size - 1;
6254
6255         /* DP adaptation msgs has no msg_id */
6256         byte++;
6257
6258         while (bytes_to_recv) {
6259                 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6260                       DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6261
6262                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6263                                        (void *)byte, len);
6264                 if (ret < 0) {
6265                         DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6266                         return ret;
6267                 }
6268
6269                 bytes_to_recv -= ret;
6270                 byte += ret;
6271                 offset += ret;
6272         }
6273         byte = buf;
6274         *byte = msg_id;
6275
6276         return size;
6277 }
6278
6279 static
6280 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6281                                       bool is_repeater, u8 content_type)
6282 {
6283         struct hdcp2_dp_errata_stream_type stream_type_msg;
6284
6285         if (is_repeater)
6286                 return 0;
6287
6288         /*
6289          * Errata for DP: As Stream type is used for encryption, Receiver
6290          * should be communicated with stream type for the decryption of the
6291          * content.
6292          * Repeater will be communicated with stream type as a part of it's
6293          * auth later in time.
6294          */
6295         stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6296         stream_type_msg.stream_type = content_type;
6297
6298         return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6299                                         sizeof(stream_type_msg));
6300 }
6301
6302 static
6303 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6304 {
6305         u8 rx_status;
6306         int ret;
6307
6308         ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6309         if (ret)
6310                 return ret;
6311
6312         if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6313                 ret = HDCP_REAUTH_REQUEST;
6314         else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6315                 ret = HDCP_LINK_INTEGRITY_FAILURE;
6316         else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6317                 ret = HDCP_TOPOLOGY_CHANGE;
6318
6319         return ret;
6320 }
6321
6322 static
6323 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6324                            bool *capable)
6325 {
6326         u8 rx_caps[3];
6327         int ret;
6328
6329         *capable = false;
6330         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6331                                DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6332                                rx_caps, HDCP_2_2_RXCAPS_LEN);
6333         if (ret != HDCP_2_2_RXCAPS_LEN)
6334                 return ret >= 0 ? -EIO : ret;
6335
6336         if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6337             HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6338                 *capable = true;
6339
6340         return 0;
6341 }
6342
6343 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6344         .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6345         .read_bksv = intel_dp_hdcp_read_bksv,
6346         .read_bstatus = intel_dp_hdcp_read_bstatus,
6347         .repeater_present = intel_dp_hdcp_repeater_present,
6348         .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6349         .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6350         .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6351         .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6352         .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6353         .check_link = intel_dp_hdcp_check_link,
6354         .hdcp_capable = intel_dp_hdcp_capable,
6355         .write_2_2_msg = intel_dp_hdcp2_write_msg,
6356         .read_2_2_msg = intel_dp_hdcp2_read_msg,
6357         .config_stream_type = intel_dp_hdcp2_config_stream_type,
6358         .check_2_2_link = intel_dp_hdcp2_check_link,
6359         .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6360         .protocol = HDCP_PROTOCOL_DP,
6361 };
6362
6363 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6364 {
6365         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6366         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6367
6368         lockdep_assert_held(&dev_priv->pps_mutex);
6369
6370         if (!edp_have_panel_vdd(intel_dp))
6371                 return;
6372
6373         /*
6374          * The VDD bit needs a power domain reference, so if the bit is
6375          * already enabled when we boot or resume, grab this reference and
6376          * schedule a vdd off, so we don't hold on to the reference
6377          * indefinitely.
6378          */
6379         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6380         intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6381
6382         edp_panel_vdd_schedule_off(intel_dp);
6383 }
6384
6385 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6386 {
6387         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6388         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6389         enum pipe pipe;
6390
6391         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6392                                   encoder->port, &pipe))
6393                 return pipe;
6394
6395         return INVALID_PIPE;
6396 }
6397
6398 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6399 {
6400         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6401         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6402         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6403         intel_wakeref_t wakeref;
6404
6405         if (!HAS_DDI(dev_priv))
6406                 intel_dp->DP = I915_READ(intel_dp->output_reg);
6407
6408         if (lspcon->active)
6409                 lspcon_resume(lspcon);
6410
6411         intel_dp->reset_link_params = true;
6412
6413         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6414             !intel_dp_is_edp(intel_dp))
6415                 return;
6416
6417         with_pps_lock(intel_dp, wakeref) {
6418                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6419                         intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6420
6421                 if (intel_dp_is_edp(intel_dp)) {
6422                         /*
6423                          * Reinit the power sequencer, in case BIOS did
6424                          * something nasty with it.
6425                          */
6426                         intel_dp_pps_init(intel_dp);
6427                         intel_edp_panel_vdd_sanitize(intel_dp);
6428                 }
6429         }
6430 }
6431
6432 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6433         .force = intel_dp_force,
6434         .fill_modes = drm_helper_probe_single_connector_modes,
6435         .atomic_get_property = intel_digital_connector_atomic_get_property,
6436         .atomic_set_property = intel_digital_connector_atomic_set_property,
6437         .late_register = intel_dp_connector_register,
6438         .early_unregister = intel_dp_connector_unregister,
6439         .destroy = intel_connector_destroy,
6440         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6441         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6442 };
6443
6444 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6445         .detect_ctx = intel_dp_detect,
6446         .get_modes = intel_dp_get_modes,
6447         .mode_valid = intel_dp_mode_valid,
6448         .atomic_check = intel_digital_connector_atomic_check,
6449 };
6450
6451 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6452         .reset = intel_dp_encoder_reset,
6453         .destroy = intel_dp_encoder_destroy,
6454 };
6455
6456 enum irqreturn
6457 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6458 {
6459         struct intel_dp *intel_dp = &intel_dig_port->dp;
6460
6461         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6462                 /*
6463                  * vdd off can generate a long pulse on eDP which
6464                  * would require vdd on to handle it, and thus we
6465                  * would end up in an endless cycle of
6466                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6467                  */
6468                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6469                               port_name(intel_dig_port->base.port));
6470                 return IRQ_HANDLED;
6471         }
6472
6473         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6474                       port_name(intel_dig_port->base.port),
6475                       long_hpd ? "long" : "short");
6476
6477         if (long_hpd) {
6478                 intel_dp->reset_link_params = true;
6479                 return IRQ_NONE;
6480         }
6481
6482         if (intel_dp->is_mst) {
6483                 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6484                         /*
6485                          * If we were in MST mode, and device is not
6486                          * there, get out of MST mode
6487                          */
6488                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6489                                       intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6490                         intel_dp->is_mst = false;
6491                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6492                                                         intel_dp->is_mst);
6493
6494                         return IRQ_NONE;
6495                 }
6496         }
6497
6498         if (!intel_dp->is_mst) {
6499                 bool handled;
6500
6501                 handled = intel_dp_short_pulse(intel_dp);
6502
6503                 if (!handled)
6504                         return IRQ_NONE;
6505         }
6506
6507         return IRQ_HANDLED;
6508 }
6509
6510 /* check the VBT to see whether the eDP is on another port */
6511 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6512 {
6513         /*
6514          * eDP not supported on g4x. so bail out early just
6515          * for a bit extra safety in case the VBT is bonkers.
6516          */
6517         if (INTEL_GEN(dev_priv) < 5)
6518                 return false;
6519
6520         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6521                 return true;
6522
6523         return intel_bios_is_port_edp(dev_priv, port);
6524 }
6525
6526 static void
6527 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6528 {
6529         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6530         enum port port = dp_to_dig_port(intel_dp)->base.port;
6531
6532         if (!IS_G4X(dev_priv) && port != PORT_A)
6533                 intel_attach_force_audio_property(connector);
6534
6535         intel_attach_broadcast_rgb_property(connector);
6536         if (HAS_GMCH(dev_priv))
6537                 drm_connector_attach_max_bpc_property(connector, 6, 10);
6538         else if (INTEL_GEN(dev_priv) >= 5)
6539                 drm_connector_attach_max_bpc_property(connector, 6, 12);
6540
6541         if (intel_dp_is_edp(intel_dp)) {
6542                 u32 allowed_scalers;
6543
6544                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6545                 if (!HAS_GMCH(dev_priv))
6546                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6547
6548                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6549
6550                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6551
6552         }
6553 }
6554
6555 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6556 {
6557         intel_dp->panel_power_off_time = ktime_get_boottime();
6558         intel_dp->last_power_on = jiffies;
6559         intel_dp->last_backlight_off = jiffies;
6560 }
6561
6562 static void
6563 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6564 {
6565         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6566         u32 pp_on, pp_off, pp_ctl;
6567         struct pps_registers regs;
6568
6569         intel_pps_get_registers(intel_dp, &regs);
6570
6571         pp_ctl = ironlake_get_pp_control(intel_dp);
6572
6573         /* Ensure PPS is unlocked */
6574         if (!HAS_DDI(dev_priv))
6575                 I915_WRITE(regs.pp_ctrl, pp_ctl);
6576
6577         pp_on = I915_READ(regs.pp_on);
6578         pp_off = I915_READ(regs.pp_off);
6579
6580         /* Pull timing values out of registers */
6581         seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6582         seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6583         seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6584         seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6585
6586         if (i915_mmio_reg_valid(regs.pp_div)) {
6587                 u32 pp_div;
6588
6589                 pp_div = I915_READ(regs.pp_div);
6590
6591                 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6592         } else {
6593                 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6594         }
6595 }
6596
6597 static void
6598 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6599 {
6600         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6601                       state_name,
6602                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6603 }
6604
6605 static void
6606 intel_pps_verify_state(struct intel_dp *intel_dp)
6607 {
6608         struct edp_power_seq hw;
6609         struct edp_power_seq *sw = &intel_dp->pps_delays;
6610
6611         intel_pps_readout_hw_state(intel_dp, &hw);
6612
6613         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6614             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6615                 DRM_ERROR("PPS state mismatch\n");
6616                 intel_pps_dump_state("sw", sw);
6617                 intel_pps_dump_state("hw", &hw);
6618         }
6619 }
6620
6621 static void
6622 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6623 {
6624         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6625         struct edp_power_seq cur, vbt, spec,
6626                 *final = &intel_dp->pps_delays;
6627
6628         lockdep_assert_held(&dev_priv->pps_mutex);
6629
6630         /* already initialized? */
6631         if (final->t11_t12 != 0)
6632                 return;
6633
6634         intel_pps_readout_hw_state(intel_dp, &cur);
6635
6636         intel_pps_dump_state("cur", &cur);
6637
6638         vbt = dev_priv->vbt.edp.pps;
6639         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6640          * of 500ms appears to be too short. Ocassionally the panel
6641          * just fails to power back on. Increasing the delay to 800ms
6642          * seems sufficient to avoid this problem.
6643          */
6644         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6645                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6646                 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6647                               vbt.t11_t12);
6648         }
6649         /* T11_T12 delay is special and actually in units of 100ms, but zero
6650          * based in the hw (so we need to add 100 ms). But the sw vbt
6651          * table multiplies it with 1000 to make it in units of 100usec,
6652          * too. */
6653         vbt.t11_t12 += 100 * 10;
6654
6655         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6656          * our hw here, which are all in 100usec. */
6657         spec.t1_t3 = 210 * 10;
6658         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6659         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6660         spec.t10 = 500 * 10;
6661         /* This one is special and actually in units of 100ms, but zero
6662          * based in the hw (so we need to add 100 ms). But the sw vbt
6663          * table multiplies it with 1000 to make it in units of 100usec,
6664          * too. */
6665         spec.t11_t12 = (510 + 100) * 10;
6666
6667         intel_pps_dump_state("vbt", &vbt);
6668
6669         /* Use the max of the register settings and vbt. If both are
6670          * unset, fall back to the spec limits. */
6671 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
6672                                        spec.field : \
6673                                        max(cur.field, vbt.field))
6674         assign_final(t1_t3);
6675         assign_final(t8);
6676         assign_final(t9);
6677         assign_final(t10);
6678         assign_final(t11_t12);
6679 #undef assign_final
6680
6681 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
6682         intel_dp->panel_power_up_delay = get_delay(t1_t3);
6683         intel_dp->backlight_on_delay = get_delay(t8);
6684         intel_dp->backlight_off_delay = get_delay(t9);
6685         intel_dp->panel_power_down_delay = get_delay(t10);
6686         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6687 #undef get_delay
6688
6689         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6690                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6691                       intel_dp->panel_power_cycle_delay);
6692
6693         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6694                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6695
6696         /*
6697          * We override the HW backlight delays to 1 because we do manual waits
6698          * on them. For T8, even BSpec recommends doing it. For T9, if we
6699          * don't do this, we'll end up waiting for the backlight off delay
6700          * twice: once when we do the manual sleep, and once when we disable
6701          * the panel and wait for the PP_STATUS bit to become zero.
6702          */
6703         final->t8 = 1;
6704         final->t9 = 1;
6705
6706         /*
6707          * HW has only a 100msec granularity for t11_t12 so round it up
6708          * accordingly.
6709          */
6710         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6711 }
6712
6713 static void
6714 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6715                                               bool force_disable_vdd)
6716 {
6717         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6718         u32 pp_on, pp_off, port_sel = 0;
6719         int div = dev_priv->rawclk_freq / 1000;
6720         struct pps_registers regs;
6721         enum port port = dp_to_dig_port(intel_dp)->base.port;
6722         const struct edp_power_seq *seq = &intel_dp->pps_delays;
6723
6724         lockdep_assert_held(&dev_priv->pps_mutex);
6725
6726         intel_pps_get_registers(intel_dp, &regs);
6727
6728         /*
6729          * On some VLV machines the BIOS can leave the VDD
6730          * enabled even on power sequencers which aren't
6731          * hooked up to any port. This would mess up the
6732          * power domain tracking the first time we pick
6733          * one of these power sequencers for use since
6734          * edp_panel_vdd_on() would notice that the VDD was
6735          * already on and therefore wouldn't grab the power
6736          * domain reference. Disable VDD first to avoid this.
6737          * This also avoids spuriously turning the VDD on as
6738          * soon as the new power sequencer gets initialized.
6739          */
6740         if (force_disable_vdd) {
6741                 u32 pp = ironlake_get_pp_control(intel_dp);
6742
6743                 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6744
6745                 if (pp & EDP_FORCE_VDD)
6746                         DRM_DEBUG_KMS("VDD already on, disabling first\n");
6747
6748                 pp &= ~EDP_FORCE_VDD;
6749
6750                 I915_WRITE(regs.pp_ctrl, pp);
6751         }
6752
6753         pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6754                 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6755         pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6756                 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6757
6758         /* Haswell doesn't have any port selection bits for the panel
6759          * power sequencer any more. */
6760         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6761                 port_sel = PANEL_PORT_SELECT_VLV(port);
6762         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6763                 switch (port) {
6764                 case PORT_A:
6765                         port_sel = PANEL_PORT_SELECT_DPA;
6766                         break;
6767                 case PORT_C:
6768                         port_sel = PANEL_PORT_SELECT_DPC;
6769                         break;
6770                 case PORT_D:
6771                         port_sel = PANEL_PORT_SELECT_DPD;
6772                         break;
6773                 default:
6774                         MISSING_CASE(port);
6775                         break;
6776                 }
6777         }
6778
6779         pp_on |= port_sel;
6780
6781         I915_WRITE(regs.pp_on, pp_on);
6782         I915_WRITE(regs.pp_off, pp_off);
6783
6784         /*
6785          * Compute the divisor for the pp clock, simply match the Bspec formula.
6786          */
6787         if (i915_mmio_reg_valid(regs.pp_div)) {
6788                 I915_WRITE(regs.pp_div,
6789                            REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6790                            REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6791         } else {
6792                 u32 pp_ctl;
6793
6794                 pp_ctl = I915_READ(regs.pp_ctrl);
6795                 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6796                 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6797                 I915_WRITE(regs.pp_ctrl, pp_ctl);
6798         }
6799
6800         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6801                       I915_READ(regs.pp_on),
6802                       I915_READ(regs.pp_off),
6803                       i915_mmio_reg_valid(regs.pp_div) ?
6804                       I915_READ(regs.pp_div) :
6805                       (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6806 }
6807
6808 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6809 {
6810         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6811
6812         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6813                 vlv_initial_power_sequencer_setup(intel_dp);
6814         } else {
6815                 intel_dp_init_panel_power_sequencer(intel_dp);
6816                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6817         }
6818 }
6819
6820 /**
6821  * intel_dp_set_drrs_state - program registers for RR switch to take effect
6822  * @dev_priv: i915 device
6823  * @crtc_state: a pointer to the active intel_crtc_state
6824  * @refresh_rate: RR to be programmed
6825  *
6826  * This function gets called when refresh rate (RR) has to be changed from
6827  * one frequency to another. Switches can be between high and low RR
6828  * supported by the panel or to any other RR based on media playback (in
6829  * this case, RR value needs to be passed from user space).
6830  *
6831  * The caller of this function needs to take a lock on dev_priv->drrs.
6832  */
6833 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6834                                     const struct intel_crtc_state *crtc_state,
6835                                     int refresh_rate)
6836 {
6837         struct intel_encoder *encoder;
6838         struct intel_digital_port *dig_port = NULL;
6839         struct intel_dp *intel_dp = dev_priv->drrs.dp;
6840         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6841         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6842
6843         if (refresh_rate <= 0) {
6844                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6845                 return;
6846         }
6847
6848         if (intel_dp == NULL) {
6849                 DRM_DEBUG_KMS("DRRS not supported.\n");
6850                 return;
6851         }
6852
6853         dig_port = dp_to_dig_port(intel_dp);
6854         encoder = &dig_port->base;
6855
6856         if (!intel_crtc) {
6857                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6858                 return;
6859         }
6860
6861         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6862                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6863                 return;
6864         }
6865
6866         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6867                         refresh_rate)
6868                 index = DRRS_LOW_RR;
6869
6870         if (index == dev_priv->drrs.refresh_rate_type) {
6871                 DRM_DEBUG_KMS(
6872                         "DRRS requested for previously set RR...ignoring\n");
6873                 return;
6874         }
6875
6876         if (!crtc_state->base.active) {
6877                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6878                 return;
6879         }
6880
6881         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6882                 switch (index) {
6883                 case DRRS_HIGH_RR:
6884                         intel_dp_set_m_n(crtc_state, M1_N1);
6885                         break;
6886                 case DRRS_LOW_RR:
6887                         intel_dp_set_m_n(crtc_state, M2_N2);
6888                         break;
6889                 case DRRS_MAX_RR:
6890                 default:
6891                         DRM_ERROR("Unsupported refreshrate type\n");
6892                 }
6893         } else if (INTEL_GEN(dev_priv) > 6) {
6894                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6895                 u32 val;
6896
6897                 val = I915_READ(reg);
6898                 if (index > DRRS_HIGH_RR) {
6899                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6900                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6901                         else
6902                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6903                 } else {
6904                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6905                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6906                         else
6907                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6908                 }
6909                 I915_WRITE(reg, val);
6910         }
6911
6912         dev_priv->drrs.refresh_rate_type = index;
6913
6914         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6915 }
6916
6917 /**
6918  * intel_edp_drrs_enable - init drrs struct if supported
6919  * @intel_dp: DP struct
6920  * @crtc_state: A pointer to the active crtc state.
6921  *
6922  * Initializes frontbuffer_bits and drrs.dp
6923  */
6924 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6925                            const struct intel_crtc_state *crtc_state)
6926 {
6927         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6928
6929         if (!crtc_state->has_drrs) {
6930                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6931                 return;
6932         }
6933
6934         if (dev_priv->psr.enabled) {
6935                 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6936                 return;
6937         }
6938
6939         mutex_lock(&dev_priv->drrs.mutex);
6940         if (dev_priv->drrs.dp) {
6941                 DRM_DEBUG_KMS("DRRS already enabled\n");
6942                 goto unlock;
6943         }
6944
6945         dev_priv->drrs.busy_frontbuffer_bits = 0;
6946
6947         dev_priv->drrs.dp = intel_dp;
6948
6949 unlock:
6950         mutex_unlock(&dev_priv->drrs.mutex);
6951 }
6952
6953 /**
6954  * intel_edp_drrs_disable - Disable DRRS
6955  * @intel_dp: DP struct
6956  * @old_crtc_state: Pointer to old crtc_state.
6957  *
6958  */
6959 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6960                             const struct intel_crtc_state *old_crtc_state)
6961 {
6962         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6963
6964         if (!old_crtc_state->has_drrs)
6965                 return;
6966
6967         mutex_lock(&dev_priv->drrs.mutex);
6968         if (!dev_priv->drrs.dp) {
6969                 mutex_unlock(&dev_priv->drrs.mutex);
6970                 return;
6971         }
6972
6973         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6974                 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6975                         intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6976
6977         dev_priv->drrs.dp = NULL;
6978         mutex_unlock(&dev_priv->drrs.mutex);
6979
6980         cancel_delayed_work_sync(&dev_priv->drrs.work);
6981 }
6982
6983 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6984 {
6985         struct drm_i915_private *dev_priv =
6986                 container_of(work, typeof(*dev_priv), drrs.work.work);
6987         struct intel_dp *intel_dp;
6988
6989         mutex_lock(&dev_priv->drrs.mutex);
6990
6991         intel_dp = dev_priv->drrs.dp;
6992
6993         if (!intel_dp)
6994                 goto unlock;
6995
6996         /*
6997          * The delayed work can race with an invalidate hence we need to
6998          * recheck.
6999          */
7000
7001         if (dev_priv->drrs.busy_frontbuffer_bits)
7002                 goto unlock;
7003
7004         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
7005                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7006
7007                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7008                         intel_dp->attached_connector->panel.downclock_mode->vrefresh);
7009         }
7010
7011 unlock:
7012         mutex_unlock(&dev_priv->drrs.mutex);
7013 }
7014
7015 /**
7016  * intel_edp_drrs_invalidate - Disable Idleness DRRS
7017  * @dev_priv: i915 device
7018  * @frontbuffer_bits: frontbuffer plane tracking bits
7019  *
7020  * This function gets called everytime rendering on the given planes start.
7021  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
7022  *
7023  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7024  */
7025 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
7026                                unsigned int frontbuffer_bits)
7027 {
7028         struct drm_crtc *crtc;
7029         enum pipe pipe;
7030
7031         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7032                 return;
7033
7034         cancel_delayed_work(&dev_priv->drrs.work);
7035
7036         mutex_lock(&dev_priv->drrs.mutex);
7037         if (!dev_priv->drrs.dp) {
7038                 mutex_unlock(&dev_priv->drrs.mutex);
7039                 return;
7040         }
7041
7042         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7043         pipe = to_intel_crtc(crtc)->pipe;
7044
7045         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7046         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
7047
7048         /* invalidate means busy screen hence upclock */
7049         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7050                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7051                         dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
7052
7053         mutex_unlock(&dev_priv->drrs.mutex);
7054 }
7055
7056 /**
7057  * intel_edp_drrs_flush - Restart Idleness DRRS
7058  * @dev_priv: i915 device
7059  * @frontbuffer_bits: frontbuffer plane tracking bits
7060  *
7061  * This function gets called every time rendering on the given planes has
7062  * completed or flip on a crtc is completed. So DRRS should be upclocked
7063  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7064  * if no other planes are dirty.
7065  *
7066  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7067  */
7068 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
7069                           unsigned int frontbuffer_bits)
7070 {
7071         struct drm_crtc *crtc;
7072         enum pipe pipe;
7073
7074         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7075                 return;
7076
7077         cancel_delayed_work(&dev_priv->drrs.work);
7078
7079         mutex_lock(&dev_priv->drrs.mutex);
7080         if (!dev_priv->drrs.dp) {
7081                 mutex_unlock(&dev_priv->drrs.mutex);
7082                 return;
7083         }
7084
7085         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7086         pipe = to_intel_crtc(crtc)->pipe;
7087
7088         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7089         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
7090
7091         /* flush means busy screen hence upclock */
7092         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7093                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7094                                 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
7095
7096         /*
7097          * flush also means no more activity hence schedule downclock, if all
7098          * other fbs are quiescent too
7099          */
7100         if (!dev_priv->drrs.busy_frontbuffer_bits)
7101                 schedule_delayed_work(&dev_priv->drrs.work,
7102                                 msecs_to_jiffies(1000));
7103         mutex_unlock(&dev_priv->drrs.mutex);
7104 }
7105
7106 /**
7107  * DOC: Display Refresh Rate Switching (DRRS)
7108  *
7109  * Display Refresh Rate Switching (DRRS) is a power conservation feature
7110  * which enables swtching between low and high refresh rates,
7111  * dynamically, based on the usage scenario. This feature is applicable
7112  * for internal panels.
7113  *
7114  * Indication that the panel supports DRRS is given by the panel EDID, which
7115  * would list multiple refresh rates for one resolution.
7116  *
7117  * DRRS is of 2 types - static and seamless.
7118  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7119  * (may appear as a blink on screen) and is used in dock-undock scenario.
7120  * Seamless DRRS involves changing RR without any visual effect to the user
7121  * and can be used during normal system usage. This is done by programming
7122  * certain registers.
7123  *
7124  * Support for static/seamless DRRS may be indicated in the VBT based on
7125  * inputs from the panel spec.
7126  *
7127  * DRRS saves power by switching to low RR based on usage scenarios.
7128  *
7129  * The implementation is based on frontbuffer tracking implementation.  When
7130  * there is a disturbance on the screen triggered by user activity or a periodic
7131  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
7132  * no movement on screen, after a timeout of 1 second, a switch to low RR is
7133  * made.
7134  *
7135  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7136  * and intel_edp_drrs_flush() are called.
7137  *
7138  * DRRS can be further extended to support other internal panels and also
7139  * the scenario of video playback wherein RR is set based on the rate
7140  * requested by userspace.
7141  */
7142
7143 /**
7144  * intel_dp_drrs_init - Init basic DRRS work and mutex.
7145  * @connector: eDP connector
7146  * @fixed_mode: preferred mode of panel
7147  *
7148  * This function is  called only once at driver load to initialize basic
7149  * DRRS stuff.
7150  *
7151  * Returns:
7152  * Downclock mode if panel supports it, else return NULL.
7153  * DRRS support is determined by the presence of downclock mode (apart
7154  * from VBT setting).
7155  */
7156 static struct drm_display_mode *
7157 intel_dp_drrs_init(struct intel_connector *connector,
7158                    struct drm_display_mode *fixed_mode)
7159 {
7160         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7161         struct drm_display_mode *downclock_mode = NULL;
7162
7163         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7164         mutex_init(&dev_priv->drrs.mutex);
7165
7166         if (INTEL_GEN(dev_priv) <= 6) {
7167                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7168                 return NULL;
7169         }
7170
7171         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7172                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
7173                 return NULL;
7174         }
7175
7176         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7177         if (!downclock_mode) {
7178                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
7179                 return NULL;
7180         }
7181
7182         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7183
7184         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7185         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
7186         return downclock_mode;
7187 }
7188
7189 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7190                                      struct intel_connector *intel_connector)
7191 {
7192         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7193         struct drm_device *dev = &dev_priv->drm;
7194         struct drm_connector *connector = &intel_connector->base;
7195         struct drm_display_mode *fixed_mode = NULL;
7196         struct drm_display_mode *downclock_mode = NULL;
7197         bool has_dpcd;
7198         enum pipe pipe = INVALID_PIPE;
7199         intel_wakeref_t wakeref;
7200         struct edid *edid;
7201
7202         if (!intel_dp_is_edp(intel_dp))
7203                 return true;
7204
7205         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7206
7207         /*
7208          * On IBX/CPT we may get here with LVDS already registered. Since the
7209          * driver uses the only internal power sequencer available for both
7210          * eDP and LVDS bail out early in this case to prevent interfering
7211          * with an already powered-on LVDS power sequencer.
7212          */
7213         if (intel_get_lvds_encoder(dev_priv)) {
7214                 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7215                 DRM_INFO("LVDS was detected, not registering eDP\n");
7216
7217                 return false;
7218         }
7219
7220         with_pps_lock(intel_dp, wakeref) {
7221                 intel_dp_init_panel_power_timestamps(intel_dp);
7222                 intel_dp_pps_init(intel_dp);
7223                 intel_edp_panel_vdd_sanitize(intel_dp);
7224         }
7225
7226         /* Cache DPCD and EDID for edp. */
7227         has_dpcd = intel_edp_init_dpcd(intel_dp);
7228
7229         if (!has_dpcd) {
7230                 /* if this fails, presume the device is a ghost */
7231                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7232                 goto out_vdd_off;
7233         }
7234
7235         mutex_lock(&dev->mode_config.mutex);
7236         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7237         if (edid) {
7238                 if (drm_add_edid_modes(connector, edid)) {
7239                         drm_connector_update_edid_property(connector,
7240                                                                 edid);
7241                 } else {
7242                         kfree(edid);
7243                         edid = ERR_PTR(-EINVAL);
7244                 }
7245         } else {
7246                 edid = ERR_PTR(-ENOENT);
7247         }
7248         intel_connector->edid = edid;
7249
7250         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7251         if (fixed_mode)
7252                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7253
7254         /* fallback to VBT if available for eDP */
7255         if (!fixed_mode)
7256                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7257         mutex_unlock(&dev->mode_config.mutex);
7258
7259         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7260                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7261                 register_reboot_notifier(&intel_dp->edp_notifier);
7262
7263                 /*
7264                  * Figure out the current pipe for the initial backlight setup.
7265                  * If the current pipe isn't valid, try the PPS pipe, and if that
7266                  * fails just assume pipe A.
7267                  */
7268                 pipe = vlv_active_pipe(intel_dp);
7269
7270                 if (pipe != PIPE_A && pipe != PIPE_B)
7271                         pipe = intel_dp->pps_pipe;
7272
7273                 if (pipe != PIPE_A && pipe != PIPE_B)
7274                         pipe = PIPE_A;
7275
7276                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7277                               pipe_name(pipe));
7278         }
7279
7280         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7281         intel_connector->panel.backlight.power = intel_edp_backlight_power;
7282         intel_panel_setup_backlight(connector, pipe);
7283
7284         if (fixed_mode)
7285                 drm_connector_init_panel_orientation_property(
7286                         connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7287
7288         return true;
7289
7290 out_vdd_off:
7291         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7292         /*
7293          * vdd might still be enabled do to the delayed vdd off.
7294          * Make sure vdd is actually turned off here.
7295          */
7296         with_pps_lock(intel_dp, wakeref)
7297                 edp_panel_vdd_off_sync(intel_dp);
7298
7299         return false;
7300 }
7301
7302 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7303 {
7304         struct intel_connector *intel_connector;
7305         struct drm_connector *connector;
7306
7307         intel_connector = container_of(work, typeof(*intel_connector),
7308                                        modeset_retry_work);
7309         connector = &intel_connector->base;
7310         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7311                       connector->name);
7312
7313         /* Grab the locks before changing connector property*/
7314         mutex_lock(&connector->dev->mode_config.mutex);
7315         /* Set connector link status to BAD and send a Uevent to notify
7316          * userspace to do a modeset.
7317          */
7318         drm_connector_set_link_status_property(connector,
7319                                                DRM_MODE_LINK_STATUS_BAD);
7320         mutex_unlock(&connector->dev->mode_config.mutex);
7321         /* Send Hotplug uevent so userspace can reprobe */
7322         drm_kms_helper_hotplug_event(connector->dev);
7323 }
7324
7325 bool
7326 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7327                         struct intel_connector *intel_connector)
7328 {
7329         struct drm_connector *connector = &intel_connector->base;
7330         struct intel_dp *intel_dp = &intel_dig_port->dp;
7331         struct intel_encoder *intel_encoder = &intel_dig_port->base;
7332         struct drm_device *dev = intel_encoder->base.dev;
7333         struct drm_i915_private *dev_priv = to_i915(dev);
7334         enum port port = intel_encoder->port;
7335         int type;
7336
7337         /* Initialize the work for modeset in case of link train failure */
7338         INIT_WORK(&intel_connector->modeset_retry_work,
7339                   intel_dp_modeset_retry_work_fn);
7340
7341         if (WARN(intel_dig_port->max_lanes < 1,
7342                  "Not enough lanes (%d) for DP on port %c\n",
7343                  intel_dig_port->max_lanes, port_name(port)))
7344                 return false;
7345
7346         intel_dp_set_source_rates(intel_dp);
7347
7348         intel_dp->reset_link_params = true;
7349         intel_dp->pps_pipe = INVALID_PIPE;
7350         intel_dp->active_pipe = INVALID_PIPE;
7351
7352         /* Preserve the current hw state. */
7353         intel_dp->DP = I915_READ(intel_dp->output_reg);
7354         intel_dp->attached_connector = intel_connector;
7355
7356         if (intel_dp_is_port_edp(dev_priv, port)) {
7357                 /*
7358                  * Currently we don't support eDP on TypeC ports, although in
7359                  * theory it could work on TypeC legacy ports.
7360                  */
7361                 WARN_ON(intel_port_is_tc(dev_priv, port));
7362                 type = DRM_MODE_CONNECTOR_eDP;
7363         } else {
7364                 type = DRM_MODE_CONNECTOR_DisplayPort;
7365         }
7366
7367         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7368                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7369
7370         /*
7371          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7372          * for DP the encoder type can be set by the caller to
7373          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7374          */
7375         if (type == DRM_MODE_CONNECTOR_eDP)
7376                 intel_encoder->type = INTEL_OUTPUT_EDP;
7377
7378         /* eDP only on port B and/or C on vlv/chv */
7379         if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7380                     intel_dp_is_edp(intel_dp) &&
7381                     port != PORT_B && port != PORT_C))
7382                 return false;
7383
7384         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7385                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7386                         port_name(port));
7387
7388         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7389         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7390
7391         if (!HAS_GMCH(dev_priv))
7392                 connector->interlace_allowed = true;
7393         connector->doublescan_allowed = 0;
7394
7395         if (INTEL_GEN(dev_priv) >= 11)
7396                 connector->ycbcr_420_allowed = true;
7397
7398         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7399
7400         intel_dp_aux_init(intel_dp);
7401
7402         intel_connector_attach_encoder(intel_connector, intel_encoder);
7403
7404         if (HAS_DDI(dev_priv))
7405                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7406         else
7407                 intel_connector->get_hw_state = intel_connector_get_hw_state;
7408
7409         /* init MST on ports that can support it */
7410         if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7411             (port == PORT_B || port == PORT_C ||
7412              port == PORT_D || port == PORT_F))
7413                 intel_dp_mst_encoder_init(intel_dig_port,
7414                                           intel_connector->base.base.id);
7415
7416         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7417                 intel_dp_aux_fini(intel_dp);
7418                 intel_dp_mst_encoder_cleanup(intel_dig_port);
7419                 goto fail;
7420         }
7421
7422         intel_dp_add_properties(intel_dp, connector);
7423
7424         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7425                 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7426                 if (ret)
7427                         DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7428         }
7429
7430         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7431          * 0xd.  Failure to do so will result in spurious interrupts being
7432          * generated on the port when a cable is not attached.
7433          */
7434         if (IS_G45(dev_priv)) {
7435                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7436                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7437         }
7438
7439         return true;
7440
7441 fail:
7442         drm_connector_cleanup(connector);
7443
7444         return false;
7445 }
7446
7447 bool intel_dp_init(struct drm_i915_private *dev_priv,
7448                    i915_reg_t output_reg,
7449                    enum port port)
7450 {
7451         struct intel_digital_port *intel_dig_port;
7452         struct intel_encoder *intel_encoder;
7453         struct drm_encoder *encoder;
7454         struct intel_connector *intel_connector;
7455
7456         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7457         if (!intel_dig_port)
7458                 return false;
7459
7460         intel_connector = intel_connector_alloc();
7461         if (!intel_connector)
7462                 goto err_connector_alloc;
7463
7464         intel_encoder = &intel_dig_port->base;
7465         encoder = &intel_encoder->base;
7466
7467         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7468                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7469                              "DP %c", port_name(port)))
7470                 goto err_encoder_init;
7471
7472         intel_encoder->hotplug = intel_dp_hotplug;
7473         intel_encoder->compute_config = intel_dp_compute_config;
7474         intel_encoder->get_hw_state = intel_dp_get_hw_state;
7475         intel_encoder->get_config = intel_dp_get_config;
7476         intel_encoder->update_pipe = intel_panel_update_backlight;
7477         intel_encoder->suspend = intel_dp_encoder_suspend;
7478         if (IS_CHERRYVIEW(dev_priv)) {
7479                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7480                 intel_encoder->pre_enable = chv_pre_enable_dp;
7481                 intel_encoder->enable = vlv_enable_dp;
7482                 intel_encoder->disable = vlv_disable_dp;
7483                 intel_encoder->post_disable = chv_post_disable_dp;
7484                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7485         } else if (IS_VALLEYVIEW(dev_priv)) {
7486                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7487                 intel_encoder->pre_enable = vlv_pre_enable_dp;
7488                 intel_encoder->enable = vlv_enable_dp;
7489                 intel_encoder->disable = vlv_disable_dp;
7490                 intel_encoder->post_disable = vlv_post_disable_dp;
7491         } else {
7492                 intel_encoder->pre_enable = g4x_pre_enable_dp;
7493                 intel_encoder->enable = g4x_enable_dp;
7494                 intel_encoder->disable = g4x_disable_dp;
7495                 intel_encoder->post_disable = g4x_post_disable_dp;
7496         }
7497
7498         intel_dig_port->dp.output_reg = output_reg;
7499         intel_dig_port->max_lanes = 4;
7500
7501         intel_encoder->type = INTEL_OUTPUT_DP;
7502         intel_encoder->power_domain = intel_port_to_power_domain(port);
7503         if (IS_CHERRYVIEW(dev_priv)) {
7504                 if (port == PORT_D)
7505                         intel_encoder->crtc_mask = 1 << 2;
7506                 else
7507                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7508         } else {
7509                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7510         }
7511         intel_encoder->cloneable = 0;
7512         intel_encoder->port = port;
7513
7514         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7515
7516         if (port != PORT_A)
7517                 intel_infoframe_init(intel_dig_port);
7518
7519         intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7520         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7521                 goto err_init_connector;
7522
7523         return true;
7524
7525 err_init_connector:
7526         drm_encoder_cleanup(encoder);
7527 err_encoder_init:
7528         kfree(intel_connector);
7529 err_connector_alloc:
7530         kfree(intel_dig_port);
7531         return false;
7532 }
7533
7534 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7535 {
7536         struct intel_encoder *encoder;
7537
7538         for_each_intel_encoder(&dev_priv->drm, encoder) {
7539                 struct intel_dp *intel_dp;
7540
7541                 if (encoder->type != INTEL_OUTPUT_DDI)
7542                         continue;
7543
7544                 intel_dp = enc_to_intel_dp(&encoder->base);
7545
7546                 if (!intel_dp->can_mst)
7547                         continue;
7548
7549                 if (intel_dp->is_mst)
7550                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7551         }
7552 }
7553
7554 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7555 {
7556         struct intel_encoder *encoder;
7557
7558         for_each_intel_encoder(&dev_priv->drm, encoder) {
7559                 struct intel_dp *intel_dp;
7560                 int ret;
7561
7562                 if (encoder->type != INTEL_OUTPUT_DDI)
7563                         continue;
7564
7565                 intel_dp = enc_to_intel_dp(&encoder->base);
7566
7567                 if (!intel_dp->can_mst)
7568                         continue;
7569
7570                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7571                 if (ret) {
7572                         intel_dp->is_mst = false;
7573                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7574                                                         false);
7575                 }
7576         }
7577 }