Merge tag 'drm-intel-fixes-2015-05-08' of git://anongit.freedesktop.org/drm-intel...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
fe51bfb9
VS
90static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
f4896f15 93static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 94
cfcb0fc9
JB
95/**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102static bool is_edp(struct intel_dp *intel_dp)
103{
da63a9f2
PZ
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
107}
108
68b4d824 109static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 110{
68b4d824
ID
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
114}
115
df0e9248
CW
116static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117{
fa90ecef 118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
119}
120
ea5b213a 121static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 122static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 123static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 124static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
125static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
a4fc5ed6 127
ed4e9c1d
VS
128static int
129intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 130{
7183dc29 131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
1db10e28 136 case DP_LINK_BW_5_4:
d4eead50 137 break;
a4fc5ed6 138 default:
d4eead50
ID
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
a4fc5ed6
KP
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145}
146
eeb6324d
PZ
147static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148{
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161}
162
cd9dde44
AJ
163/*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
a4fc5ed6 180static int
c898261c 181intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 182{
cd9dde44 183 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
184}
185
fe27d53e
DA
186static int
187intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188{
189 return (max_link_clock * max_lanes * 8) / 10;
190}
191
c19de8eb 192static enum drm_mode_status
a4fc5ed6
KP
193intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195{
df0e9248 196 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 201
dd06f90e
JN
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
204 return MODE_PANEL;
205
dd06f90e 206 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 207 return MODE_PANEL;
03afc4a2
DV
208
209 target_clock = fixed_mode->clock;
7de56f43
ZY
210 }
211
50fec21a 212 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 213 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
c4867936 219 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
0af78a2b
DV
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
a4fc5ed6
KP
227 return MODE_OK;
228}
229
a4f1289e 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
231{
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240}
241
c2af70e2 242static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
243{
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249}
250
fb0f8fbf
KP
251/* hrawclock is 1/4 the FSB frequency */
252static int
253intel_hrawclk(struct drm_device *dev)
254{
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
9473c8f4
VP
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
fb0f8fbf
KP
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283}
284
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b
JN
288static void
289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 290 struct intel_dp *intel_dp);
bf13e81b 291
773538e8
VS
292static void pps_lock(struct intel_dp *intel_dp)
293{
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308}
309
310static void pps_unlock(struct intel_dp *intel_dp)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322}
323
961a0db0
VS
324static void
325vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326{
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 331 bool pll_enabled;
961a0db0
VS
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
d288f65f
VS
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
961a0db0
VS
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
961a0db0
VS
382}
383
bf13e81b
JN
384static enum pipe
385vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386{
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 392 enum pipe pipe;
bf13e81b 393
e39b999a 394 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 395
a8c3344e
VS
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
a4a5d2f8
VS
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
a8c3344e
VS
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
a4a5d2f8 427
a8c3344e
VS
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
36b5f425
VS
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 438
961a0db0
VS
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
444
445 return intel_dp->pps_pipe;
446}
447
6491ab27
VS
448typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453{
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455}
456
457static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461}
462
463static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return true;
467}
bf13e81b 468
a4a5d2f8 469static enum pipe
6491ab27
VS
470vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
a4a5d2f8
VS
473{
474 enum pipe pipe;
bf13e81b 475
bf13e81b
JN
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
6491ab27
VS
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
a4a5d2f8 486 return pipe;
bf13e81b
JN
487 }
488
a4a5d2f8
VS
489 return INVALID_PIPE;
490}
491
492static void
493vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494{
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
6491ab27
VS
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
a4a5d2f8
VS
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
bf13e81b
JN
520 }
521
a4a5d2f8
VS
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
36b5f425
VS
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
527}
528
773538e8
VS
529void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530{
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
bf13e81b
JN
556}
557
558static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566}
567
568static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569{
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576}
577
01527b31
CT
578/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582{
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
773538e8 593 pps_lock(intel_dp);
e39b999a 594
01527b31 595 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
01527b31
CT
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
773538e8 609 pps_unlock(intel_dp);
e39b999a 610
01527b31
CT
611 return 0;
612}
613
4be73780 614static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 615{
30add22d 616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
e39b999a
VS
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
9a42356b
VS
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
bf13e81b 625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
626}
627
4be73780 628static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 629{
30add22d 630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
e39b999a
VS
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
9a42356b
VS
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
773538e8 639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
640}
641
9b984dae
KP
642static void
643intel_dp_check_edp(struct intel_dp *intel_dp)
644{
30add22d 645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 646 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 647
9b984dae
KP
648 if (!is_edp(intel_dp))
649 return;
453c5420 650
4be73780 651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
656 }
657}
658
9ee32fea
DV
659static uint32_t
660intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661{
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
666 uint32_t status;
667 bool done;
668
ef04f00d 669#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 670 if (has_aux_irq)
b18ac466 671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 672 msecs_to_jiffies_timeout(10));
9ee32fea
DV
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678#undef C
679
680 return status;
681}
682
ec5b01dd 683static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 684{
174edf1f
PZ
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 687
ec5b01dd
DL
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 691 */
ec5b01dd
DL
692 return index ? 0 : intel_hrawclk(dev) / 2;
693}
694
695static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 if (index)
701 return 0;
702
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 706 else
b84a1cf8 707 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
708 } else {
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710 }
711}
712
713static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714{
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
718
719 if (intel_dig_port->port == PORT_A) {
720 if (index)
721 return 0;
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
bc86625a
CW
725 switch (index) {
726 case 0: return 63;
727 case 1: return 72;
728 default: return 0;
729 }
ec5b01dd 730 } else {
bc86625a 731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 732 }
b84a1cf8
RV
733}
734
ec5b01dd
DL
735static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736{
737 return index ? 0 : 100;
738}
739
b6b5e383
DL
740static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741{
742 /*
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 */
747 return index ? 0 : 1;
748}
749
5ed12a19
DL
750static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751 bool has_aux_irq,
752 int send_bytes,
753 uint32_t aux_clock_divider)
754{
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
758
759 if (IS_GEN6(dev))
760 precharge = 3;
761 else
762 precharge = 5;
763
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766 else
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 770 DP_AUX_CH_CTL_DONE |
5ed12a19 771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 773 timeout |
788d4433 774 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
778}
779
b9ca5fad
DL
780static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 bool has_aux_irq,
782 int send_bytes,
783 uint32_t unused)
784{
785 return DP_AUX_CH_CTL_SEND_BUSY |
786 DP_AUX_CH_CTL_DONE |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793}
794
b84a1cf8
RV
795static int
796intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 797 const uint8_t *send, int send_bytes,
b84a1cf8
RV
798 uint8_t *recv, int recv_size)
799{
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
bc86625a 805 uint32_t aux_clock_divider;
b84a1cf8
RV
806 int i, ret, recv_bytes;
807 uint32_t status;
5ed12a19 808 int try, clock = 0;
4e6b788c 809 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
810 bool vdd;
811
773538e8 812 pps_lock(intel_dp);
e39b999a 813
72c3500a
VS
814 /*
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818 * ourselves.
819 */
1e0560e0 820 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
821
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
824 * deep sleep states.
825 */
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828 intel_dp_check_edp(intel_dp);
5eb08b69 829
c67a470b
PZ
830 intel_aux_display_runtime_get(dev_priv);
831
11bee43e
JB
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
ef04f00d 834 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 break;
837 msleep(1);
838 }
839
840 if (try == 3) {
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 I915_READ(ch_ctl));
9ee32fea
DV
843 ret = -EBUSY;
844 goto out;
4f7f7b7e
CW
845 }
846
46a5ae9f
PZ
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 ret = -E2BIG;
850 goto out;
851 }
852
ec5b01dd 853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 has_aux_irq,
856 send_bytes,
857 aux_clock_divider);
5ed12a19 858
bc86625a
CW
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
a4f1289e
RV
864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
bc86625a
CW
866
867 /* Send the command and wait for it to complete */
5ed12a19 868 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
869
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872 /* Clear done status and any errors */
873 I915_WRITE(ch_ctl,
874 status |
875 DP_AUX_CH_CTL_DONE |
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue;
882 if (status & DP_AUX_CH_CTL_DONE)
883 break;
884 }
4f7f7b7e 885 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
886 break;
887 }
888
a4fc5ed6 889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
891 ret = -EBUSY;
892 goto out;
a4fc5ed6
KP
893 }
894
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
897 */
a5b3da54 898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
900 ret = -EIO;
901 goto out;
a5b3da54 902 }
1ae8c0a5
KP
903
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
a5b3da54 906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
908 ret = -ETIMEDOUT;
909 goto out;
a4fc5ed6
KP
910 }
911
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
0206e353 917
4f7f7b7e 918 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
a4fc5ed6 921
9ee32fea
DV
922 ret = recv_bytes;
923out:
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 925 intel_aux_display_runtime_put(dev_priv);
9ee32fea 926
884f19e9
JN
927 if (vdd)
928 edp_panel_vdd_off(intel_dp, false);
929
773538e8 930 pps_unlock(intel_dp);
e39b999a 931
9ee32fea 932 return ret;
a4fc5ed6
KP
933}
934
a6c8aff0
JN
935#define BARE_ADDRESS_SIZE 3
936#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
937static ssize_t
938intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 939{
9d1a1031
JN
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
a4fc5ed6 943 int ret;
a4fc5ed6 944
d2d9cbbd
VS
945 txbuf[0] = (msg->request << 4) |
946 ((msg->address >> 16) & 0xf);
947 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
948 txbuf[2] = msg->address & 0xff;
949 txbuf[3] = msg->size - 1;
46a5ae9f 950
9d1a1031
JN
951 switch (msg->request & ~DP_AUX_I2C_MOT) {
952 case DP_AUX_NATIVE_WRITE:
953 case DP_AUX_I2C_WRITE:
a6c8aff0 954 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 955 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 956
9d1a1031
JN
957 if (WARN_ON(txsize > 20))
958 return -E2BIG;
a4fc5ed6 959
9d1a1031 960 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 961
9d1a1031
JN
962 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963 if (ret > 0) {
964 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 965
a1ddefd8
JN
966 if (ret > 1) {
967 /* Number of bytes written in a short write. */
968 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969 } else {
970 /* Return payload size. */
971 ret = msg->size;
972 }
9d1a1031
JN
973 }
974 break;
46a5ae9f 975
9d1a1031
JN
976 case DP_AUX_NATIVE_READ:
977 case DP_AUX_I2C_READ:
a6c8aff0 978 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 979 rxsize = msg->size + 1;
a4fc5ed6 980
9d1a1031
JN
981 if (WARN_ON(rxsize > 20))
982 return -E2BIG;
a4fc5ed6 983
9d1a1031
JN
984 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985 if (ret > 0) {
986 msg->reply = rxbuf[0] >> 4;
987 /*
988 * Assume happy day, and copy the data. The caller is
989 * expected to check msg->reply before touching it.
990 *
991 * Return payload size.
992 */
993 ret--;
994 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 995 }
9d1a1031
JN
996 break;
997
998 default:
999 ret = -EINVAL;
1000 break;
a4fc5ed6 1001 }
f51a44b9 1002
9d1a1031 1003 return ret;
a4fc5ed6
KP
1004}
1005
9d1a1031
JN
1006static void
1007intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1008{
1009 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1010 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1011 enum port port = intel_dig_port->port;
0b99836f 1012 const char *name = NULL;
ab2c0672
DA
1013 int ret;
1014
33ad6626
JN
1015 switch (port) {
1016 case PORT_A:
1017 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1018 name = "DPDDC-A";
ab2c0672 1019 break;
33ad6626
JN
1020 case PORT_B:
1021 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1022 name = "DPDDC-B";
ab2c0672 1023 break;
33ad6626
JN
1024 case PORT_C:
1025 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1026 name = "DPDDC-C";
ab2c0672 1027 break;
33ad6626
JN
1028 case PORT_D:
1029 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1030 name = "DPDDC-D";
33ad6626
JN
1031 break;
1032 default:
1033 BUG();
ab2c0672
DA
1034 }
1035
1b1aad75
DL
1036 /*
1037 * The AUX_CTL register is usually DP_CTL + 0x10.
1038 *
1039 * On Haswell and Broadwell though:
1040 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1041 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042 *
1043 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044 */
1045 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1046 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1047
0b99836f 1048 intel_dp->aux.name = name;
9d1a1031
JN
1049 intel_dp->aux.dev = dev->dev;
1050 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1051
0b99836f
JN
1052 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1053 connector->base.kdev->kobj.name);
8316f337 1054
4f71d0cb 1055 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1056 if (ret < 0) {
4f71d0cb 1057 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1058 name, ret);
1059 return;
ab2c0672 1060 }
8a5e6aeb 1061
0b99836f
JN
1062 ret = sysfs_create_link(&connector->base.kdev->kobj,
1063 &intel_dp->aux.ddc.dev.kobj,
1064 intel_dp->aux.ddc.dev.kobj.name);
1065 if (ret < 0) {
1066 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1067 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1068 }
a4fc5ed6
KP
1069}
1070
80f65de3
ID
1071static void
1072intel_dp_connector_unregister(struct intel_connector *intel_connector)
1073{
1074 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1075
0e32b39c
DA
1076 if (!intel_connector->mst_port)
1077 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1078 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1079 intel_connector_unregister(intel_connector);
1080}
1081
5416d871 1082static void
c3346ef6 1083skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1084{
1085 u32 ctrl1;
1086
1087 pipe_config->ddi_pll_sel = SKL_DPLL0;
1088 pipe_config->dpll_hw_state.cfgcr1 = 0;
1089 pipe_config->dpll_hw_state.cfgcr2 = 0;
1090
1091 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1092 switch (link_clock / 2) {
1093 case 81000:
5416d871
DL
1094 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1095 SKL_DPLL0);
1096 break;
c3346ef6 1097 case 135000:
5416d871
DL
1098 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1099 SKL_DPLL0);
1100 break;
c3346ef6 1101 case 270000:
5416d871
DL
1102 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1103 SKL_DPLL0);
1104 break;
c3346ef6
SJ
1105 case 162000:
1106 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1107 SKL_DPLL0);
1108 break;
1109 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1110 results in CDCLK change. Need to handle the change of CDCLK by
1111 disabling pipes and re-enabling them */
1112 case 108000:
1113 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1114 SKL_DPLL0);
1115 break;
1116 case 216000:
1117 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1118 SKL_DPLL0);
1119 break;
1120
5416d871
DL
1121 }
1122 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1123}
1124
0e50338c 1125static void
5cec258b 1126hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1127{
1128 switch (link_bw) {
1129 case DP_LINK_BW_1_62:
1130 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1131 break;
1132 case DP_LINK_BW_2_7:
1133 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1134 break;
1135 case DP_LINK_BW_5_4:
1136 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1137 break;
1138 }
1139}
1140
fc0f8e25 1141static int
12f6a2e2 1142intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1143{
94ca719e
VS
1144 if (intel_dp->num_sink_rates) {
1145 *sink_rates = intel_dp->sink_rates;
1146 return intel_dp->num_sink_rates;
fc0f8e25 1147 }
12f6a2e2
VS
1148
1149 *sink_rates = default_rates;
1150
1151 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1152}
1153
a8f3ef61 1154static int
1db10e28 1155intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1156{
636280ba
VS
1157 if (INTEL_INFO(dev)->gen >= 9) {
1158 *source_rates = gen9_rates;
1159 return ARRAY_SIZE(gen9_rates);
fe51bfb9
VS
1160 } else if (IS_CHERRYVIEW(dev)) {
1161 *source_rates = chv_rates;
1162 return ARRAY_SIZE(chv_rates);
a8f3ef61 1163 }
636280ba
VS
1164
1165 *source_rates = default_rates;
1166
1db10e28
VS
1167 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1168 /* WaDisableHBR2:skl */
1169 return (DP_LINK_BW_2_7 >> 3) + 1;
1170 else if (INTEL_INFO(dev)->gen >= 8 ||
1171 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1172 return (DP_LINK_BW_5_4 >> 3) + 1;
1173 else
1174 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1175}
1176
c6bb3538
DV
1177static void
1178intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1179 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1180{
1181 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1182 const struct dp_link_dpll *divisor = NULL;
1183 int i, count = 0;
c6bb3538
DV
1184
1185 if (IS_G4X(dev)) {
9dd4ffdf
CML
1186 divisor = gen4_dpll;
1187 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1188 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1189 divisor = pch_dpll;
1190 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1191 } else if (IS_CHERRYVIEW(dev)) {
1192 divisor = chv_dpll;
1193 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1194 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1195 divisor = vlv_dpll;
1196 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1197 }
9dd4ffdf
CML
1198
1199 if (divisor && count) {
1200 for (i = 0; i < count; i++) {
1201 if (link_bw == divisor[i].link_bw) {
1202 pipe_config->dpll = divisor[i].dpll;
1203 pipe_config->clock_set = true;
1204 break;
1205 }
1206 }
c6bb3538
DV
1207 }
1208}
1209
2ecae76a
VS
1210static int intersect_rates(const int *source_rates, int source_len,
1211 const int *sink_rates, int sink_len,
94ca719e 1212 int *common_rates)
a8f3ef61
SJ
1213{
1214 int i = 0, j = 0, k = 0;
1215
a8f3ef61
SJ
1216 while (i < source_len && j < sink_len) {
1217 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1218 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219 return k;
94ca719e 1220 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1221 ++k;
1222 ++i;
1223 ++j;
1224 } else if (source_rates[i] < sink_rates[j]) {
1225 ++i;
1226 } else {
1227 ++j;
1228 }
1229 }
1230 return k;
1231}
1232
94ca719e
VS
1233static int intel_dp_common_rates(struct intel_dp *intel_dp,
1234 int *common_rates)
2ecae76a
VS
1235{
1236 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1237 const int *source_rates, *sink_rates;
1238 int source_len, sink_len;
1239
1240 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1241 source_len = intel_dp_source_rates(dev, &source_rates);
1242
1243 return intersect_rates(source_rates, source_len,
1244 sink_rates, sink_len,
94ca719e 1245 common_rates);
2ecae76a
VS
1246}
1247
0336400e
VS
1248static void snprintf_int_array(char *str, size_t len,
1249 const int *array, int nelem)
1250{
1251 int i;
1252
1253 str[0] = '\0';
1254
1255 for (i = 0; i < nelem; i++) {
1256 int r = snprintf(str, len, "%d,", array[i]);
1257 if (r >= len)
1258 return;
1259 str += r;
1260 len -= r;
1261 }
1262}
1263
1264static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265{
1266 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1267 const int *source_rates, *sink_rates;
94ca719e
VS
1268 int source_len, sink_len, common_len;
1269 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1270 char str[128]; /* FIXME: too big for stack? */
1271
1272 if ((drm_debug & DRM_UT_KMS) == 0)
1273 return;
1274
1275 source_len = intel_dp_source_rates(dev, &source_rates);
1276 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1277 DRM_DEBUG_KMS("source rates: %s\n", str);
1278
1279 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1280 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1281 DRM_DEBUG_KMS("sink rates: %s\n", str);
1282
94ca719e
VS
1283 common_len = intel_dp_common_rates(intel_dp, common_rates);
1284 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1285 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1286}
1287
f4896f15 1288static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1289{
1290 int i = 0;
1291
1292 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1293 if (find == rates[i])
1294 break;
1295
1296 return i;
1297}
1298
50fec21a
VS
1299int
1300intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301{
1302 int rates[DP_MAX_SUPPORTED_RATES] = {};
1303 int len;
1304
94ca719e 1305 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1306 if (WARN_ON(len <= 0))
1307 return 162000;
1308
1309 return rates[rate_to_index(0, rates) - 1];
1310}
1311
ed4e9c1d
VS
1312int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313{
94ca719e 1314 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1315}
1316
00c09d70 1317bool
5bfe2ac0 1318intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1319 struct intel_crtc_state *pipe_config)
a4fc5ed6 1320{
5bfe2ac0 1321 struct drm_device *dev = encoder->base.dev;
36008365 1322 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1323 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1324 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1325 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1326 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1327 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1328 int lane_count, clock;
56071a20 1329 int min_lane_count = 1;
eeb6324d 1330 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1331 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1332 int min_clock = 0;
a8f3ef61 1333 int max_clock;
083f9560 1334 int bpp, mode_rate;
ff9a6750 1335 int link_avail, link_clock;
94ca719e
VS
1336 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1337 int common_len;
a8f3ef61 1338
94ca719e 1339 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1340
1341 /* No common link rates between source and sink */
94ca719e 1342 WARN_ON(common_len <= 0);
a8f3ef61 1343
94ca719e 1344 max_clock = common_len - 1;
a4fc5ed6 1345
bc7d38a4 1346 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1347 pipe_config->has_pch_encoder = true;
1348
03afc4a2 1349 pipe_config->has_dp_encoder = true;
f769cd24 1350 pipe_config->has_drrs = false;
9fcb1704 1351 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1352
dd06f90e
JN
1353 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1354 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1355 adjusted_mode);
2dd24552
JB
1356 if (!HAS_PCH_SPLIT(dev))
1357 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1358 intel_connector->panel.fitting_mode);
1359 else
b074cec8
JB
1360 intel_pch_panel_fitting(intel_crtc, pipe_config,
1361 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1362 }
1363
cb1793ce 1364 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1365 return false;
1366
083f9560 1367 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1368 "max bw %d pixel clock %iKHz\n",
94ca719e 1369 max_lane_count, common_rates[max_clock],
241bfc38 1370 adjusted_mode->crtc_clock);
083f9560 1371
36008365
DV
1372 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1373 * bpc in between. */
3e7ca985 1374 bpp = pipe_config->pipe_bpp;
56071a20
JN
1375 if (is_edp(intel_dp)) {
1376 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1377 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1378 dev_priv->vbt.edp_bpp);
1379 bpp = dev_priv->vbt.edp_bpp;
1380 }
1381
344c5bbc
JN
1382 /*
1383 * Use the maximum clock and number of lanes the eDP panel
1384 * advertizes being capable of. The panels are generally
1385 * designed to support only a single clock and lane
1386 * configuration, and typically these values correspond to the
1387 * native resolution of the panel.
1388 */
1389 min_lane_count = max_lane_count;
1390 min_clock = max_clock;
7984211e 1391 }
657445fe 1392
36008365 1393 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1394 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1395 bpp);
36008365 1396
c6930992 1397 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1398 for (lane_count = min_lane_count;
1399 lane_count <= max_lane_count;
1400 lane_count <<= 1) {
1401
94ca719e 1402 link_clock = common_rates[clock];
36008365
DV
1403 link_avail = intel_dp_max_data_rate(link_clock,
1404 lane_count);
1405
1406 if (mode_rate <= link_avail) {
1407 goto found;
1408 }
1409 }
1410 }
1411 }
c4867936 1412
36008365 1413 return false;
3685a8f3 1414
36008365 1415found:
55bc60db
VS
1416 if (intel_dp->color_range_auto) {
1417 /*
1418 * See:
1419 * CEA-861-E - 5.1 Default Encoding Parameters
1420 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1421 */
18316c8c 1422 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1423 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1424 else
1425 intel_dp->color_range = 0;
1426 }
1427
3685a8f3 1428 if (intel_dp->color_range)
50f3b016 1429 pipe_config->limited_color_range = true;
a4fc5ed6 1430
36008365 1431 intel_dp->lane_count = lane_count;
a8f3ef61 1432
94ca719e 1433 if (intel_dp->num_sink_rates) {
bc27b7d3 1434 intel_dp->link_bw = 0;
a8f3ef61 1435 intel_dp->rate_select =
94ca719e 1436 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1437 } else {
1438 intel_dp->link_bw =
94ca719e 1439 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1440 intel_dp->rate_select = 0;
a8f3ef61
SJ
1441 }
1442
657445fe 1443 pipe_config->pipe_bpp = bpp;
94ca719e 1444 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1445
36008365
DV
1446 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1447 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1448 pipe_config->port_clock, bpp);
36008365
DV
1449 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1450 mode_rate, link_avail);
a4fc5ed6 1451
03afc4a2 1452 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1453 adjusted_mode->crtc_clock,
1454 pipe_config->port_clock,
03afc4a2 1455 &pipe_config->dp_m_n);
9d1a455b 1456
439d7ac0 1457 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1458 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1459 pipe_config->has_drrs = true;
439d7ac0
PB
1460 intel_link_compute_m_n(bpp, lane_count,
1461 intel_connector->panel.downclock_mode->clock,
1462 pipe_config->port_clock,
1463 &pipe_config->dp_m2_n2);
1464 }
1465
5416d871 1466 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1467 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
5416d871 1468 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1469 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1470 else
1471 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1472
03afc4a2 1473 return true;
a4fc5ed6
KP
1474}
1475
7c62a164 1476static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1477{
7c62a164
DV
1478 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1479 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1480 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1481 struct drm_i915_private *dev_priv = dev->dev_private;
1482 u32 dpa_ctl;
1483
6e3c9717
ACO
1484 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1485 crtc->config->port_clock);
ea9b6006
DV
1486 dpa_ctl = I915_READ(DP_A);
1487 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1488
6e3c9717 1489 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1490 /* For a long time we've carried around a ILK-DevA w/a for the
1491 * 160MHz clock. If we're really unlucky, it's still required.
1492 */
1493 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1494 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1495 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1496 } else {
1497 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1498 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1499 }
1ce17038 1500
ea9b6006
DV
1501 I915_WRITE(DP_A, dpa_ctl);
1502
1503 POSTING_READ(DP_A);
1504 udelay(500);
1505}
1506
8ac33ed3 1507static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1508{
b934223d 1509 struct drm_device *dev = encoder->base.dev;
417e822d 1510 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1512 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1513 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1514 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1515
417e822d 1516 /*
1a2eb460 1517 * There are four kinds of DP registers:
417e822d
KP
1518 *
1519 * IBX PCH
1a2eb460
KP
1520 * SNB CPU
1521 * IVB CPU
417e822d
KP
1522 * CPT PCH
1523 *
1524 * IBX PCH and CPU are the same for almost everything,
1525 * except that the CPU DP PLL is configured in this
1526 * register
1527 *
1528 * CPT PCH is quite different, having many bits moved
1529 * to the TRANS_DP_CTL register instead. That
1530 * configuration happens (oddly) in ironlake_pch_enable
1531 */
9c9e7927 1532
417e822d
KP
1533 /* Preserve the BIOS-computed detected bit. This is
1534 * supposed to be read-only.
1535 */
1536 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1537
417e822d 1538 /* Handle DP bits in common between all three register formats */
417e822d 1539 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1540 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1541
6e3c9717 1542 if (crtc->config->has_audio)
ea5b213a 1543 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1544
417e822d 1545 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1546
bc7d38a4 1547 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1548 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1549 intel_dp->DP |= DP_SYNC_HS_HIGH;
1550 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1551 intel_dp->DP |= DP_SYNC_VS_HIGH;
1552 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1553
6aba5b6c 1554 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1555 intel_dp->DP |= DP_ENHANCED_FRAMING;
1556
7c62a164 1557 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1558 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1559 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1560 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1561
1562 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1563 intel_dp->DP |= DP_SYNC_HS_HIGH;
1564 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1565 intel_dp->DP |= DP_SYNC_VS_HIGH;
1566 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1567
6aba5b6c 1568 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1569 intel_dp->DP |= DP_ENHANCED_FRAMING;
1570
44f37d1f
CML
1571 if (!IS_CHERRYVIEW(dev)) {
1572 if (crtc->pipe == 1)
1573 intel_dp->DP |= DP_PIPEB_SELECT;
1574 } else {
1575 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1576 }
417e822d
KP
1577 } else {
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1579 }
a4fc5ed6
KP
1580}
1581
ffd6749d
PZ
1582#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1583#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1584
1a5ef5b7
PZ
1585#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1586#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1587
ffd6749d
PZ
1588#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1589#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1590
4be73780 1591static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1592 u32 mask,
1593 u32 value)
bd943159 1594{
30add22d 1595 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1596 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1597 u32 pp_stat_reg, pp_ctrl_reg;
1598
e39b999a
VS
1599 lockdep_assert_held(&dev_priv->pps_mutex);
1600
bf13e81b
JN
1601 pp_stat_reg = _pp_stat_reg(intel_dp);
1602 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1603
99ea7127 1604 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1605 mask, value,
1606 I915_READ(pp_stat_reg),
1607 I915_READ(pp_ctrl_reg));
32ce697c 1608
453c5420 1609 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1610 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1611 I915_READ(pp_stat_reg),
1612 I915_READ(pp_ctrl_reg));
32ce697c 1613 }
54c136d4
CW
1614
1615 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1616}
32ce697c 1617
4be73780 1618static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1619{
1620 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1621 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1622}
1623
4be73780 1624static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1625{
1626 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1627 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1628}
1629
4be73780 1630static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1631{
1632 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1633
1634 /* When we disable the VDD override bit last we have to do the manual
1635 * wait. */
1636 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1637 intel_dp->panel_power_cycle_delay);
1638
4be73780 1639 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1640}
1641
4be73780 1642static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1643{
1644 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1645 intel_dp->backlight_on_delay);
1646}
1647
4be73780 1648static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1649{
1650 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1651 intel_dp->backlight_off_delay);
1652}
99ea7127 1653
832dd3c1
KP
1654/* Read the current pp_control value, unlocking the register if it
1655 * is locked
1656 */
1657
453c5420 1658static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1659{
453c5420
JB
1660 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662 u32 control;
832dd3c1 1663
e39b999a
VS
1664 lockdep_assert_held(&dev_priv->pps_mutex);
1665
bf13e81b 1666 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1667 control &= ~PANEL_UNLOCK_MASK;
1668 control |= PANEL_UNLOCK_REGS;
1669 return control;
bd943159
KP
1670}
1671
951468f3
VS
1672/*
1673 * Must be paired with edp_panel_vdd_off().
1674 * Must hold pps_mutex around the whole on/off sequence.
1675 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1676 */
1e0560e0 1677static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1678{
30add22d 1679 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1680 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1681 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1682 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1683 enum intel_display_power_domain power_domain;
5d613501 1684 u32 pp;
453c5420 1685 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1686 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1687
e39b999a
VS
1688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
97af61f5 1690 if (!is_edp(intel_dp))
adddaaf4 1691 return false;
bd943159 1692
2c623c11 1693 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1694 intel_dp->want_panel_vdd = true;
99ea7127 1695
4be73780 1696 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1697 return need_to_disable;
b0665d57 1698
4e6e1a54
ID
1699 power_domain = intel_display_port_power_domain(intel_encoder);
1700 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1701
3936fcf4
VS
1702 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1703 port_name(intel_dig_port->port));
bd943159 1704
4be73780
DV
1705 if (!edp_have_panel_power(intel_dp))
1706 wait_panel_power_cycle(intel_dp);
99ea7127 1707
453c5420 1708 pp = ironlake_get_pp_control(intel_dp);
5d613501 1709 pp |= EDP_FORCE_VDD;
ebf33b18 1710
bf13e81b
JN
1711 pp_stat_reg = _pp_stat_reg(intel_dp);
1712 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1713
1714 I915_WRITE(pp_ctrl_reg, pp);
1715 POSTING_READ(pp_ctrl_reg);
1716 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1717 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1718 /*
1719 * If the panel wasn't on, delay before accessing aux channel
1720 */
4be73780 1721 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1722 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1723 port_name(intel_dig_port->port));
f01eca2e 1724 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1725 }
adddaaf4
JN
1726
1727 return need_to_disable;
1728}
1729
951468f3
VS
1730/*
1731 * Must be paired with intel_edp_panel_vdd_off() or
1732 * intel_edp_panel_off().
1733 * Nested calls to these functions are not allowed since
1734 * we drop the lock. Caller must use some higher level
1735 * locking to prevent nested calls from other threads.
1736 */
b80d6c78 1737void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1738{
c695b6b6 1739 bool vdd;
adddaaf4 1740
c695b6b6
VS
1741 if (!is_edp(intel_dp))
1742 return;
1743
773538e8 1744 pps_lock(intel_dp);
c695b6b6 1745 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1746 pps_unlock(intel_dp);
c695b6b6 1747
e2c719b7 1748 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1749 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1750}
1751
4be73780 1752static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1753{
30add22d 1754 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1755 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1756 struct intel_digital_port *intel_dig_port =
1757 dp_to_dig_port(intel_dp);
1758 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1759 enum intel_display_power_domain power_domain;
5d613501 1760 u32 pp;
453c5420 1761 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1762
e39b999a 1763 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1764
15e899a0 1765 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1766
15e899a0 1767 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1768 return;
b0665d57 1769
3936fcf4
VS
1770 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1771 port_name(intel_dig_port->port));
bd943159 1772
be2c9196
VS
1773 pp = ironlake_get_pp_control(intel_dp);
1774 pp &= ~EDP_FORCE_VDD;
453c5420 1775
be2c9196
VS
1776 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1777 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1778
be2c9196
VS
1779 I915_WRITE(pp_ctrl_reg, pp);
1780 POSTING_READ(pp_ctrl_reg);
90791a5c 1781
be2c9196
VS
1782 /* Make sure sequencer is idle before allowing subsequent activity */
1783 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1784 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1785
be2c9196
VS
1786 if ((pp & POWER_TARGET_ON) == 0)
1787 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1788
be2c9196
VS
1789 power_domain = intel_display_port_power_domain(intel_encoder);
1790 intel_display_power_put(dev_priv, power_domain);
bd943159 1791}
5d613501 1792
4be73780 1793static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1794{
1795 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1796 struct intel_dp, panel_vdd_work);
bd943159 1797
773538e8 1798 pps_lock(intel_dp);
15e899a0
VS
1799 if (!intel_dp->want_panel_vdd)
1800 edp_panel_vdd_off_sync(intel_dp);
773538e8 1801 pps_unlock(intel_dp);
bd943159
KP
1802}
1803
aba86890
ID
1804static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1805{
1806 unsigned long delay;
1807
1808 /*
1809 * Queue the timer to fire a long time from now (relative to the power
1810 * down delay) to keep the panel power up across a sequence of
1811 * operations.
1812 */
1813 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1814 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1815}
1816
951468f3
VS
1817/*
1818 * Must be paired with edp_panel_vdd_on().
1819 * Must hold pps_mutex around the whole on/off sequence.
1820 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1821 */
4be73780 1822static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1823{
e39b999a
VS
1824 struct drm_i915_private *dev_priv =
1825 intel_dp_to_dev(intel_dp)->dev_private;
1826
1827 lockdep_assert_held(&dev_priv->pps_mutex);
1828
97af61f5
KP
1829 if (!is_edp(intel_dp))
1830 return;
5d613501 1831
e2c719b7 1832 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1833 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1834
bd943159
KP
1835 intel_dp->want_panel_vdd = false;
1836
aba86890 1837 if (sync)
4be73780 1838 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1839 else
1840 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1841}
1842
9f0fb5be 1843static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1844{
30add22d 1845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1846 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1847 u32 pp;
453c5420 1848 u32 pp_ctrl_reg;
9934c132 1849
9f0fb5be
VS
1850 lockdep_assert_held(&dev_priv->pps_mutex);
1851
97af61f5 1852 if (!is_edp(intel_dp))
bd943159 1853 return;
99ea7127 1854
3936fcf4
VS
1855 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1856 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1857
e7a89ace
VS
1858 if (WARN(edp_have_panel_power(intel_dp),
1859 "eDP port %c panel power already on\n",
1860 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1861 return;
9934c132 1862
4be73780 1863 wait_panel_power_cycle(intel_dp);
37c6c9b0 1864
bf13e81b 1865 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1866 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1867 if (IS_GEN5(dev)) {
1868 /* ILK workaround: disable reset around power sequence */
1869 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1870 I915_WRITE(pp_ctrl_reg, pp);
1871 POSTING_READ(pp_ctrl_reg);
05ce1a49 1872 }
37c6c9b0 1873
1c0ae80a 1874 pp |= POWER_TARGET_ON;
99ea7127
KP
1875 if (!IS_GEN5(dev))
1876 pp |= PANEL_POWER_RESET;
1877
453c5420
JB
1878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
9934c132 1880
4be73780 1881 wait_panel_on(intel_dp);
dce56b3c 1882 intel_dp->last_power_on = jiffies;
9934c132 1883
05ce1a49
KP
1884 if (IS_GEN5(dev)) {
1885 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1886 I915_WRITE(pp_ctrl_reg, pp);
1887 POSTING_READ(pp_ctrl_reg);
05ce1a49 1888 }
9f0fb5be 1889}
e39b999a 1890
9f0fb5be
VS
1891void intel_edp_panel_on(struct intel_dp *intel_dp)
1892{
1893 if (!is_edp(intel_dp))
1894 return;
1895
1896 pps_lock(intel_dp);
1897 edp_panel_on(intel_dp);
773538e8 1898 pps_unlock(intel_dp);
9934c132
JB
1899}
1900
9f0fb5be
VS
1901
1902static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1903{
4e6e1a54
ID
1904 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1905 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1906 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1907 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1908 enum intel_display_power_domain power_domain;
99ea7127 1909 u32 pp;
453c5420 1910 u32 pp_ctrl_reg;
9934c132 1911
9f0fb5be
VS
1912 lockdep_assert_held(&dev_priv->pps_mutex);
1913
97af61f5
KP
1914 if (!is_edp(intel_dp))
1915 return;
37c6c9b0 1916
3936fcf4
VS
1917 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1918 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1919
3936fcf4
VS
1920 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1921 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1922
453c5420 1923 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1924 /* We need to switch off panel power _and_ force vdd, for otherwise some
1925 * panels get very unhappy and cease to work. */
b3064154
PJ
1926 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1927 EDP_BLC_ENABLE);
453c5420 1928
bf13e81b 1929 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1930
849e39f5
PZ
1931 intel_dp->want_panel_vdd = false;
1932
453c5420
JB
1933 I915_WRITE(pp_ctrl_reg, pp);
1934 POSTING_READ(pp_ctrl_reg);
9934c132 1935
dce56b3c 1936 intel_dp->last_power_cycle = jiffies;
4be73780 1937 wait_panel_off(intel_dp);
849e39f5
PZ
1938
1939 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1940 power_domain = intel_display_port_power_domain(intel_encoder);
1941 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1942}
e39b999a 1943
9f0fb5be
VS
1944void intel_edp_panel_off(struct intel_dp *intel_dp)
1945{
1946 if (!is_edp(intel_dp))
1947 return;
e39b999a 1948
9f0fb5be
VS
1949 pps_lock(intel_dp);
1950 edp_panel_off(intel_dp);
773538e8 1951 pps_unlock(intel_dp);
9934c132
JB
1952}
1953
1250d107
JN
1954/* Enable backlight in the panel power control. */
1955static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1956{
da63a9f2
PZ
1957 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1958 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1959 struct drm_i915_private *dev_priv = dev->dev_private;
1960 u32 pp;
453c5420 1961 u32 pp_ctrl_reg;
32f9d658 1962
01cb9ea6
JB
1963 /*
1964 * If we enable the backlight right away following a panel power
1965 * on, we may see slight flicker as the panel syncs with the eDP
1966 * link. So delay a bit to make sure the image is solid before
1967 * allowing it to appear.
1968 */
4be73780 1969 wait_backlight_on(intel_dp);
e39b999a 1970
773538e8 1971 pps_lock(intel_dp);
e39b999a 1972
453c5420 1973 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1974 pp |= EDP_BLC_ENABLE;
453c5420 1975
bf13e81b 1976 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1977
1978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
e39b999a 1980
773538e8 1981 pps_unlock(intel_dp);
32f9d658
ZW
1982}
1983
1250d107
JN
1984/* Enable backlight PWM and backlight PP control. */
1985void intel_edp_backlight_on(struct intel_dp *intel_dp)
1986{
1987 if (!is_edp(intel_dp))
1988 return;
1989
1990 DRM_DEBUG_KMS("\n");
1991
1992 intel_panel_enable_backlight(intel_dp->attached_connector);
1993 _intel_edp_backlight_on(intel_dp);
1994}
1995
1996/* Disable backlight in the panel power control. */
1997static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1998{
30add22d 1999 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2000 struct drm_i915_private *dev_priv = dev->dev_private;
2001 u32 pp;
453c5420 2002 u32 pp_ctrl_reg;
32f9d658 2003
f01eca2e
KP
2004 if (!is_edp(intel_dp))
2005 return;
2006
773538e8 2007 pps_lock(intel_dp);
e39b999a 2008
453c5420 2009 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2010 pp &= ~EDP_BLC_ENABLE;
453c5420 2011
bf13e81b 2012 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2013
2014 I915_WRITE(pp_ctrl_reg, pp);
2015 POSTING_READ(pp_ctrl_reg);
f7d2323c 2016
773538e8 2017 pps_unlock(intel_dp);
e39b999a
VS
2018
2019 intel_dp->last_backlight_off = jiffies;
f7d2323c 2020 edp_wait_backlight_off(intel_dp);
1250d107 2021}
f7d2323c 2022
1250d107
JN
2023/* Disable backlight PP control and backlight PWM. */
2024void intel_edp_backlight_off(struct intel_dp *intel_dp)
2025{
2026 if (!is_edp(intel_dp))
2027 return;
2028
2029 DRM_DEBUG_KMS("\n");
f7d2323c 2030
1250d107 2031 _intel_edp_backlight_off(intel_dp);
f7d2323c 2032 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2033}
a4fc5ed6 2034
73580fb7
JN
2035/*
2036 * Hook for controlling the panel power control backlight through the bl_power
2037 * sysfs attribute. Take care to handle multiple calls.
2038 */
2039static void intel_edp_backlight_power(struct intel_connector *connector,
2040 bool enable)
2041{
2042 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2043 bool is_enabled;
2044
773538e8 2045 pps_lock(intel_dp);
e39b999a 2046 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2047 pps_unlock(intel_dp);
73580fb7
JN
2048
2049 if (is_enabled == enable)
2050 return;
2051
23ba9373
JN
2052 DRM_DEBUG_KMS("panel power control backlight %s\n",
2053 enable ? "enable" : "disable");
73580fb7
JN
2054
2055 if (enable)
2056 _intel_edp_backlight_on(intel_dp);
2057 else
2058 _intel_edp_backlight_off(intel_dp);
2059}
2060
2bd2ad64 2061static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2062{
da63a9f2
PZ
2063 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2064 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2065 struct drm_device *dev = crtc->dev;
d240f20f
JB
2066 struct drm_i915_private *dev_priv = dev->dev_private;
2067 u32 dpa_ctl;
2068
2bd2ad64
DV
2069 assert_pipe_disabled(dev_priv,
2070 to_intel_crtc(crtc)->pipe);
2071
d240f20f
JB
2072 DRM_DEBUG_KMS("\n");
2073 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2074 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2075 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2076
2077 /* We don't adjust intel_dp->DP while tearing down the link, to
2078 * facilitate link retraining (e.g. after hotplug). Hence clear all
2079 * enable bits here to ensure that we don't enable too much. */
2080 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2081 intel_dp->DP |= DP_PLL_ENABLE;
2082 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2083 POSTING_READ(DP_A);
2084 udelay(200);
d240f20f
JB
2085}
2086
2bd2ad64 2087static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2088{
da63a9f2
PZ
2089 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2090 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2091 struct drm_device *dev = crtc->dev;
d240f20f
JB
2092 struct drm_i915_private *dev_priv = dev->dev_private;
2093 u32 dpa_ctl;
2094
2bd2ad64
DV
2095 assert_pipe_disabled(dev_priv,
2096 to_intel_crtc(crtc)->pipe);
2097
d240f20f 2098 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2099 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2100 "dp pll off, should be on\n");
2101 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102
2103 /* We can't rely on the value tracked for the DP register in
2104 * intel_dp->DP because link_down must not change that (otherwise link
2105 * re-training will fail. */
298b0b39 2106 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2107 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2108 POSTING_READ(DP_A);
d240f20f
JB
2109 udelay(200);
2110}
2111
c7ad3810 2112/* If the sink supports it, try to set the power state appropriately */
c19b0669 2113void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2114{
2115 int ret, i;
2116
2117 /* Should have a valid DPCD by this point */
2118 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2119 return;
2120
2121 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2122 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2123 DP_SET_POWER_D3);
c7ad3810
JB
2124 } else {
2125 /*
2126 * When turning on, we need to retry for 1ms to give the sink
2127 * time to wake up.
2128 */
2129 for (i = 0; i < 3; i++) {
9d1a1031
JN
2130 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2131 DP_SET_POWER_D0);
c7ad3810
JB
2132 if (ret == 1)
2133 break;
2134 msleep(1);
2135 }
2136 }
f9cac721
JN
2137
2138 if (ret != 1)
2139 DRM_DEBUG_KMS("failed to %s sink power state\n",
2140 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2141}
2142
19d8fe15
DV
2143static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2144 enum pipe *pipe)
d240f20f 2145{
19d8fe15 2146 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2147 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2148 struct drm_device *dev = encoder->base.dev;
2149 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2150 enum intel_display_power_domain power_domain;
2151 u32 tmp;
2152
2153 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2154 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2155 return false;
2156
2157 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2158
2159 if (!(tmp & DP_PORT_EN))
2160 return false;
2161
bc7d38a4 2162 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2163 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2164 } else if (IS_CHERRYVIEW(dev)) {
2165 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2166 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2167 *pipe = PORT_TO_PIPE(tmp);
2168 } else {
2169 u32 trans_sel;
2170 u32 trans_dp;
2171 int i;
2172
2173 switch (intel_dp->output_reg) {
2174 case PCH_DP_B:
2175 trans_sel = TRANS_DP_PORT_SEL_B;
2176 break;
2177 case PCH_DP_C:
2178 trans_sel = TRANS_DP_PORT_SEL_C;
2179 break;
2180 case PCH_DP_D:
2181 trans_sel = TRANS_DP_PORT_SEL_D;
2182 break;
2183 default:
2184 return true;
2185 }
2186
055e393f 2187 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2188 trans_dp = I915_READ(TRANS_DP_CTL(i));
2189 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2190 *pipe = i;
2191 return true;
2192 }
2193 }
19d8fe15 2194
4a0833ec
DV
2195 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2196 intel_dp->output_reg);
2197 }
d240f20f 2198
19d8fe15
DV
2199 return true;
2200}
d240f20f 2201
045ac3b5 2202static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2203 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2204{
2205 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2206 u32 tmp, flags = 0;
63000ef6
XZ
2207 struct drm_device *dev = encoder->base.dev;
2208 struct drm_i915_private *dev_priv = dev->dev_private;
2209 enum port port = dp_to_dig_port(intel_dp)->port;
2210 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2211 int dotclock;
045ac3b5 2212
9ed109a7 2213 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2214
2215 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2216
63000ef6 2217 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2218 if (tmp & DP_SYNC_HS_HIGH)
2219 flags |= DRM_MODE_FLAG_PHSYNC;
2220 else
2221 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2222
63000ef6
XZ
2223 if (tmp & DP_SYNC_VS_HIGH)
2224 flags |= DRM_MODE_FLAG_PVSYNC;
2225 else
2226 flags |= DRM_MODE_FLAG_NVSYNC;
2227 } else {
2228 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2229 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2230 flags |= DRM_MODE_FLAG_PHSYNC;
2231 else
2232 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2233
63000ef6
XZ
2234 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2235 flags |= DRM_MODE_FLAG_PVSYNC;
2236 else
2237 flags |= DRM_MODE_FLAG_NVSYNC;
2238 }
045ac3b5 2239
2d112de7 2240 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2241
8c875fca
VS
2242 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2243 tmp & DP_COLOR_RANGE_16_235)
2244 pipe_config->limited_color_range = true;
2245
eb14cb74
VS
2246 pipe_config->has_dp_encoder = true;
2247
2248 intel_dp_get_m_n(crtc, pipe_config);
2249
18442d08 2250 if (port == PORT_A) {
f1f644dc
JB
2251 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2252 pipe_config->port_clock = 162000;
2253 else
2254 pipe_config->port_clock = 270000;
2255 }
18442d08
VS
2256
2257 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2258 &pipe_config->dp_m_n);
2259
2260 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2261 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2262
2d112de7 2263 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2264
c6cd2ee2
JN
2265 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2266 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2267 /*
2268 * This is a big fat ugly hack.
2269 *
2270 * Some machines in UEFI boot mode provide us a VBT that has 18
2271 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2272 * unknown we fail to light up. Yet the same BIOS boots up with
2273 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2274 * max, not what it tells us to use.
2275 *
2276 * Note: This will still be broken if the eDP panel is not lit
2277 * up by the BIOS, and thus we can't get the mode at module
2278 * load.
2279 */
2280 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2281 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2282 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2283 }
045ac3b5
JB
2284}
2285
e8cb4558 2286static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2287{
e8cb4558 2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2289 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2290 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2291
6e3c9717 2292 if (crtc->config->has_audio)
495a5bb8 2293 intel_audio_codec_disable(encoder);
6cb49835 2294
b32c6f48
RV
2295 if (HAS_PSR(dev) && !HAS_DDI(dev))
2296 intel_psr_disable(intel_dp);
2297
6cb49835
DV
2298 /* Make sure the panel is off before trying to change the mode. But also
2299 * ensure that we have vdd while we switch off the panel. */
24f3e092 2300 intel_edp_panel_vdd_on(intel_dp);
4be73780 2301 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2302 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2303 intel_edp_panel_off(intel_dp);
3739850b 2304
08aff3fe
VS
2305 /* disable the port before the pipe on g4x */
2306 if (INTEL_INFO(dev)->gen < 5)
3739850b 2307 intel_dp_link_down(intel_dp);
d240f20f
JB
2308}
2309
08aff3fe 2310static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2311{
2bd2ad64 2312 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2313 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2314
49277c31 2315 intel_dp_link_down(intel_dp);
08aff3fe
VS
2316 if (port == PORT_A)
2317 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2318}
2319
2320static void vlv_post_disable_dp(struct intel_encoder *encoder)
2321{
2322 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2323
2324 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2325}
2326
580d3811
VS
2327static void chv_post_disable_dp(struct intel_encoder *encoder)
2328{
2329 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2330 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2331 struct drm_device *dev = encoder->base.dev;
2332 struct drm_i915_private *dev_priv = dev->dev_private;
2333 struct intel_crtc *intel_crtc =
2334 to_intel_crtc(encoder->base.crtc);
2335 enum dpio_channel ch = vlv_dport_to_channel(dport);
2336 enum pipe pipe = intel_crtc->pipe;
2337 u32 val;
2338
2339 intel_dp_link_down(intel_dp);
2340
2341 mutex_lock(&dev_priv->dpio_lock);
2342
2343 /* Propagate soft reset to data lane reset */
97fd4d5c 2344 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2345 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2346 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2347
97fd4d5c
VS
2348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2349 val |= CHV_PCS_REQ_SOFTRESET_EN;
2350 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2351
2352 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2353 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2354 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2355
2356 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2357 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2358 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2359
2360 mutex_unlock(&dev_priv->dpio_lock);
2361}
2362
7b13b58a
VS
2363static void
2364_intel_dp_set_link_train(struct intel_dp *intel_dp,
2365 uint32_t *DP,
2366 uint8_t dp_train_pat)
2367{
2368 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2369 struct drm_device *dev = intel_dig_port->base.base.dev;
2370 struct drm_i915_private *dev_priv = dev->dev_private;
2371 enum port port = intel_dig_port->port;
2372
2373 if (HAS_DDI(dev)) {
2374 uint32_t temp = I915_READ(DP_TP_CTL(port));
2375
2376 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2377 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2378 else
2379 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2380
2381 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2382 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2383 case DP_TRAINING_PATTERN_DISABLE:
2384 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2385
2386 break;
2387 case DP_TRAINING_PATTERN_1:
2388 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2389 break;
2390 case DP_TRAINING_PATTERN_2:
2391 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2392 break;
2393 case DP_TRAINING_PATTERN_3:
2394 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2395 break;
2396 }
2397 I915_WRITE(DP_TP_CTL(port), temp);
2398
2399 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2400 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2401
2402 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2403 case DP_TRAINING_PATTERN_DISABLE:
2404 *DP |= DP_LINK_TRAIN_OFF_CPT;
2405 break;
2406 case DP_TRAINING_PATTERN_1:
2407 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2408 break;
2409 case DP_TRAINING_PATTERN_2:
2410 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2411 break;
2412 case DP_TRAINING_PATTERN_3:
2413 DRM_ERROR("DP training pattern 3 not supported\n");
2414 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2415 break;
2416 }
2417
2418 } else {
2419 if (IS_CHERRYVIEW(dev))
2420 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2421 else
2422 *DP &= ~DP_LINK_TRAIN_MASK;
2423
2424 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2425 case DP_TRAINING_PATTERN_DISABLE:
2426 *DP |= DP_LINK_TRAIN_OFF;
2427 break;
2428 case DP_TRAINING_PATTERN_1:
2429 *DP |= DP_LINK_TRAIN_PAT_1;
2430 break;
2431 case DP_TRAINING_PATTERN_2:
2432 *DP |= DP_LINK_TRAIN_PAT_2;
2433 break;
2434 case DP_TRAINING_PATTERN_3:
2435 if (IS_CHERRYVIEW(dev)) {
2436 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2437 } else {
2438 DRM_ERROR("DP training pattern 3 not supported\n");
2439 *DP |= DP_LINK_TRAIN_PAT_2;
2440 }
2441 break;
2442 }
2443 }
2444}
2445
2446static void intel_dp_enable_port(struct intel_dp *intel_dp)
2447{
2448 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2449 struct drm_i915_private *dev_priv = dev->dev_private;
2450
7b13b58a
VS
2451 /* enable with pattern 1 (as per spec) */
2452 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2453 DP_TRAINING_PATTERN_1);
2454
2455 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2456 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2457
2458 /*
2459 * Magic for VLV/CHV. We _must_ first set up the register
2460 * without actually enabling the port, and then do another
2461 * write to enable the port. Otherwise link training will
2462 * fail when the power sequencer is freshly used for this port.
2463 */
2464 intel_dp->DP |= DP_PORT_EN;
2465
2466 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2467 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2468}
2469
e8cb4558 2470static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2471{
e8cb4558
DV
2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473 struct drm_device *dev = encoder->base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2475 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2476 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2477
0c33d8d7
DV
2478 if (WARN_ON(dp_reg & DP_PORT_EN))
2479 return;
5d613501 2480
093e3f13
VS
2481 pps_lock(intel_dp);
2482
2483 if (IS_VALLEYVIEW(dev))
2484 vlv_init_panel_power_sequencer(intel_dp);
2485
7b13b58a 2486 intel_dp_enable_port(intel_dp);
093e3f13
VS
2487
2488 edp_panel_vdd_on(intel_dp);
2489 edp_panel_on(intel_dp);
2490 edp_panel_vdd_off(intel_dp, true);
2491
2492 pps_unlock(intel_dp);
2493
61234fa5
VS
2494 if (IS_VALLEYVIEW(dev))
2495 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2496
f01eca2e 2497 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2498 intel_dp_start_link_train(intel_dp);
33a34e4e 2499 intel_dp_complete_link_train(intel_dp);
3ab9c637 2500 intel_dp_stop_link_train(intel_dp);
c1dec79a 2501
6e3c9717 2502 if (crtc->config->has_audio) {
c1dec79a
JN
2503 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2504 pipe_name(crtc->pipe));
2505 intel_audio_codec_enable(encoder);
2506 }
ab1f90f9 2507}
89b667f8 2508
ecff4f3b
JN
2509static void g4x_enable_dp(struct intel_encoder *encoder)
2510{
828f5c6e
JN
2511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512
ecff4f3b 2513 intel_enable_dp(encoder);
4be73780 2514 intel_edp_backlight_on(intel_dp);
ab1f90f9 2515}
89b667f8 2516
ab1f90f9
JN
2517static void vlv_enable_dp(struct intel_encoder *encoder)
2518{
828f5c6e
JN
2519 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2520
4be73780 2521 intel_edp_backlight_on(intel_dp);
b32c6f48 2522 intel_psr_enable(intel_dp);
d240f20f
JB
2523}
2524
ecff4f3b 2525static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2526{
2527 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2528 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2529
8ac33ed3
DV
2530 intel_dp_prepare(encoder);
2531
d41f1efb
DV
2532 /* Only ilk+ has port A */
2533 if (dport->port == PORT_A) {
2534 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2535 ironlake_edp_pll_on(intel_dp);
d41f1efb 2536 }
ab1f90f9
JN
2537}
2538
83b84597
VS
2539static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2540{
2541 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2542 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2543 enum pipe pipe = intel_dp->pps_pipe;
2544 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2545
2546 edp_panel_vdd_off_sync(intel_dp);
2547
2548 /*
2549 * VLV seems to get confused when multiple power seqeuencers
2550 * have the same port selected (even if only one has power/vdd
2551 * enabled). The failure manifests as vlv_wait_port_ready() failing
2552 * CHV on the other hand doesn't seem to mind having the same port
2553 * selected in multiple power seqeuencers, but let's clear the
2554 * port select always when logically disconnecting a power sequencer
2555 * from a port.
2556 */
2557 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2558 pipe_name(pipe), port_name(intel_dig_port->port));
2559 I915_WRITE(pp_on_reg, 0);
2560 POSTING_READ(pp_on_reg);
2561
2562 intel_dp->pps_pipe = INVALID_PIPE;
2563}
2564
a4a5d2f8
VS
2565static void vlv_steal_power_sequencer(struct drm_device *dev,
2566 enum pipe pipe)
2567{
2568 struct drm_i915_private *dev_priv = dev->dev_private;
2569 struct intel_encoder *encoder;
2570
2571 lockdep_assert_held(&dev_priv->pps_mutex);
2572
ac3c12e4
VS
2573 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2574 return;
2575
a4a5d2f8
VS
2576 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2577 base.head) {
2578 struct intel_dp *intel_dp;
773538e8 2579 enum port port;
a4a5d2f8
VS
2580
2581 if (encoder->type != INTEL_OUTPUT_EDP)
2582 continue;
2583
2584 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2585 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2586
2587 if (intel_dp->pps_pipe != pipe)
2588 continue;
2589
2590 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2591 pipe_name(pipe), port_name(port));
a4a5d2f8 2592
034e43c6
VS
2593 WARN(encoder->connectors_active,
2594 "stealing pipe %c power sequencer from active eDP port %c\n",
2595 pipe_name(pipe), port_name(port));
a4a5d2f8 2596
a4a5d2f8 2597 /* make sure vdd is off before we steal it */
83b84597 2598 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2599 }
2600}
2601
2602static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2603{
2604 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2605 struct intel_encoder *encoder = &intel_dig_port->base;
2606 struct drm_device *dev = encoder->base.dev;
2607 struct drm_i915_private *dev_priv = dev->dev_private;
2608 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2609
2610 lockdep_assert_held(&dev_priv->pps_mutex);
2611
093e3f13
VS
2612 if (!is_edp(intel_dp))
2613 return;
2614
a4a5d2f8
VS
2615 if (intel_dp->pps_pipe == crtc->pipe)
2616 return;
2617
2618 /*
2619 * If another power sequencer was being used on this
2620 * port previously make sure to turn off vdd there while
2621 * we still have control of it.
2622 */
2623 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2624 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2625
2626 /*
2627 * We may be stealing the power
2628 * sequencer from another port.
2629 */
2630 vlv_steal_power_sequencer(dev, crtc->pipe);
2631
2632 /* now it's all ours */
2633 intel_dp->pps_pipe = crtc->pipe;
2634
2635 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2636 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2637
2638 /* init power sequencer on this pipe and port */
36b5f425
VS
2639 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2640 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2641}
2642
ab1f90f9 2643static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2644{
2bd2ad64 2645 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2646 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2647 struct drm_device *dev = encoder->base.dev;
89b667f8 2648 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2649 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2650 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2651 int pipe = intel_crtc->pipe;
2652 u32 val;
a4fc5ed6 2653
ab1f90f9 2654 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2655
ab3c759a 2656 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2657 val = 0;
2658 if (pipe)
2659 val |= (1<<21);
2660 else
2661 val &= ~(1<<21);
2662 val |= 0x001000c4;
ab3c759a
CML
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2664 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2665 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2666
ab1f90f9
JN
2667 mutex_unlock(&dev_priv->dpio_lock);
2668
2669 intel_enable_dp(encoder);
89b667f8
JB
2670}
2671
ecff4f3b 2672static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2673{
2674 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2675 struct drm_device *dev = encoder->base.dev;
2676 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2677 struct intel_crtc *intel_crtc =
2678 to_intel_crtc(encoder->base.crtc);
e4607fcf 2679 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2680 int pipe = intel_crtc->pipe;
89b667f8 2681
8ac33ed3
DV
2682 intel_dp_prepare(encoder);
2683
89b667f8 2684 /* Program Tx lane resets to default */
0980a60f 2685 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2686 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2687 DPIO_PCS_TX_LANE2_RESET |
2688 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2690 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2691 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2692 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2693 DPIO_PCS_CLK_SOFT_RESET);
2694
2695 /* Fix up inter-pair skew failure */
ab3c759a
CML
2696 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2699 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2700}
2701
e4a1d846
CML
2702static void chv_pre_enable_dp(struct intel_encoder *encoder)
2703{
2704 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2705 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2706 struct drm_device *dev = encoder->base.dev;
2707 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2708 struct intel_crtc *intel_crtc =
2709 to_intel_crtc(encoder->base.crtc);
2710 enum dpio_channel ch = vlv_dport_to_channel(dport);
2711 int pipe = intel_crtc->pipe;
2712 int data, i;
949c1d43 2713 u32 val;
e4a1d846 2714
e4a1d846 2715 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2716
570e2a74
VS
2717 /* allow hardware to manage TX FIFO reset source */
2718 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2719 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2720 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2721
2722 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2723 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2724 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2725
949c1d43 2726 /* Deassert soft data lane reset*/
97fd4d5c 2727 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2728 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2729 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2730
2731 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2732 val |= CHV_PCS_REQ_SOFTRESET_EN;
2733 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2734
2735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2736 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2737 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2738
97fd4d5c 2739 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2740 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2742
2743 /* Program Tx lane latency optimal setting*/
e4a1d846 2744 for (i = 0; i < 4; i++) {
e4a1d846
CML
2745 /* Set the upar bit */
2746 data = (i == 1) ? 0x0 : 0x1;
2747 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2748 data << DPIO_UPAR_SHIFT);
2749 }
2750
2751 /* Data lane stagger programming */
2752 /* FIXME: Fix up value only after power analysis */
2753
2754 mutex_unlock(&dev_priv->dpio_lock);
2755
e4a1d846 2756 intel_enable_dp(encoder);
e4a1d846
CML
2757}
2758
9197c88b
VS
2759static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2760{
2761 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2762 struct drm_device *dev = encoder->base.dev;
2763 struct drm_i915_private *dev_priv = dev->dev_private;
2764 struct intel_crtc *intel_crtc =
2765 to_intel_crtc(encoder->base.crtc);
2766 enum dpio_channel ch = vlv_dport_to_channel(dport);
2767 enum pipe pipe = intel_crtc->pipe;
2768 u32 val;
2769
625695f8
VS
2770 intel_dp_prepare(encoder);
2771
9197c88b
VS
2772 mutex_lock(&dev_priv->dpio_lock);
2773
b9e5ac3c
VS
2774 /* program left/right clock distribution */
2775 if (pipe != PIPE_B) {
2776 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2777 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2778 if (ch == DPIO_CH0)
2779 val |= CHV_BUFLEFTENA1_FORCE;
2780 if (ch == DPIO_CH1)
2781 val |= CHV_BUFRIGHTENA1_FORCE;
2782 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2783 } else {
2784 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2785 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2786 if (ch == DPIO_CH0)
2787 val |= CHV_BUFLEFTENA2_FORCE;
2788 if (ch == DPIO_CH1)
2789 val |= CHV_BUFRIGHTENA2_FORCE;
2790 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2791 }
2792
9197c88b
VS
2793 /* program clock channel usage */
2794 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2795 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2796 if (pipe != PIPE_B)
2797 val &= ~CHV_PCS_USEDCLKCHANNEL;
2798 else
2799 val |= CHV_PCS_USEDCLKCHANNEL;
2800 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2801
2802 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2803 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2804 if (pipe != PIPE_B)
2805 val &= ~CHV_PCS_USEDCLKCHANNEL;
2806 else
2807 val |= CHV_PCS_USEDCLKCHANNEL;
2808 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2809
2810 /*
2811 * This a a bit weird since generally CL
2812 * matches the pipe, but here we need to
2813 * pick the CL based on the port.
2814 */
2815 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2816 if (pipe != PIPE_B)
2817 val &= ~CHV_CMN_USEDCLKCHANNEL;
2818 else
2819 val |= CHV_CMN_USEDCLKCHANNEL;
2820 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2821
2822 mutex_unlock(&dev_priv->dpio_lock);
2823}
2824
a4fc5ed6 2825/*
df0c237d
JB
2826 * Native read with retry for link status and receiver capability reads for
2827 * cases where the sink may still be asleep.
9d1a1031
JN
2828 *
2829 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2830 * supposed to retry 3 times per the spec.
a4fc5ed6 2831 */
9d1a1031
JN
2832static ssize_t
2833intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2834 void *buffer, size_t size)
a4fc5ed6 2835{
9d1a1031
JN
2836 ssize_t ret;
2837 int i;
61da5fab 2838
f6a19066
VS
2839 /*
2840 * Sometime we just get the same incorrect byte repeated
2841 * over the entire buffer. Doing just one throw away read
2842 * initially seems to "solve" it.
2843 */
2844 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2845
61da5fab 2846 for (i = 0; i < 3; i++) {
9d1a1031
JN
2847 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2848 if (ret == size)
2849 return ret;
61da5fab
JB
2850 msleep(1);
2851 }
a4fc5ed6 2852
9d1a1031 2853 return ret;
a4fc5ed6
KP
2854}
2855
2856/*
2857 * Fetch AUX CH registers 0x202 - 0x207 which contain
2858 * link status information
2859 */
2860static bool
93f62dad 2861intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2862{
9d1a1031
JN
2863 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2864 DP_LANE0_1_STATUS,
2865 link_status,
2866 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2867}
2868
1100244e 2869/* These are source-specific values. */
a4fc5ed6 2870static uint8_t
1a2eb460 2871intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2872{
30add22d 2873 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2874 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2875 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2876
7ad14a29
SJ
2877 if (INTEL_INFO(dev)->gen >= 9) {
2878 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2879 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2880 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2881 } else if (IS_VALLEYVIEW(dev))
bd60018a 2882 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2883 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2884 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2885 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2886 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2887 else
bd60018a 2888 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2889}
2890
2891static uint8_t
2892intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2893{
30add22d 2894 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2895 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2896
5a9d1f1a
DL
2897 if (INTEL_INFO(dev)->gen >= 9) {
2898 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2900 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2901 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2902 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2904 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2905 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2906 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2907 default:
2908 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2909 }
2910 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2911 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2912 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2913 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2914 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2915 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2916 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2917 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2919 default:
bd60018a 2920 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2921 }
e2fa6fba
P
2922 } else if (IS_VALLEYVIEW(dev)) {
2923 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2924 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2925 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2926 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2927 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2928 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2929 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2930 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2931 default:
bd60018a 2932 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2933 }
bc7d38a4 2934 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2935 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2936 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2937 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2938 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2940 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2941 default:
bd60018a 2942 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2943 }
2944 } else {
2945 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2946 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2947 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2948 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2949 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2950 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2951 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2953 default:
bd60018a 2954 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2955 }
a4fc5ed6
KP
2956 }
2957}
2958
e2fa6fba
P
2959static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2960{
2961 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2962 struct drm_i915_private *dev_priv = dev->dev_private;
2963 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2964 struct intel_crtc *intel_crtc =
2965 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2966 unsigned long demph_reg_value, preemph_reg_value,
2967 uniqtranscale_reg_value;
2968 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2969 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2970 int pipe = intel_crtc->pipe;
e2fa6fba
P
2971
2972 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2973 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2974 preemph_reg_value = 0x0004000;
2975 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2977 demph_reg_value = 0x2B405555;
2978 uniqtranscale_reg_value = 0x552AB83A;
2979 break;
bd60018a 2980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2981 demph_reg_value = 0x2B404040;
2982 uniqtranscale_reg_value = 0x5548B83A;
2983 break;
bd60018a 2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2985 demph_reg_value = 0x2B245555;
2986 uniqtranscale_reg_value = 0x5560B83A;
2987 break;
bd60018a 2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2989 demph_reg_value = 0x2B405555;
2990 uniqtranscale_reg_value = 0x5598DA3A;
2991 break;
2992 default:
2993 return 0;
2994 }
2995 break;
bd60018a 2996 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2997 preemph_reg_value = 0x0002000;
2998 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2999 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3000 demph_reg_value = 0x2B404040;
3001 uniqtranscale_reg_value = 0x5552B83A;
3002 break;
bd60018a 3003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3004 demph_reg_value = 0x2B404848;
3005 uniqtranscale_reg_value = 0x5580B83A;
3006 break;
bd60018a 3007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3008 demph_reg_value = 0x2B404040;
3009 uniqtranscale_reg_value = 0x55ADDA3A;
3010 break;
3011 default:
3012 return 0;
3013 }
3014 break;
bd60018a 3015 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3016 preemph_reg_value = 0x0000000;
3017 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3018 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3019 demph_reg_value = 0x2B305555;
3020 uniqtranscale_reg_value = 0x5570B83A;
3021 break;
bd60018a 3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3023 demph_reg_value = 0x2B2B4040;
3024 uniqtranscale_reg_value = 0x55ADDA3A;
3025 break;
3026 default:
3027 return 0;
3028 }
3029 break;
bd60018a 3030 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3031 preemph_reg_value = 0x0006000;
3032 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3034 demph_reg_value = 0x1B405555;
3035 uniqtranscale_reg_value = 0x55ADDA3A;
3036 break;
3037 default:
3038 return 0;
3039 }
3040 break;
3041 default:
3042 return 0;
3043 }
3044
0980a60f 3045 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3046 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3047 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3048 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3049 uniqtranscale_reg_value);
ab3c759a
CML
3050 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3051 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3052 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3053 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3054 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3055
3056 return 0;
3057}
3058
e4a1d846
CML
3059static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3060{
3061 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3062 struct drm_i915_private *dev_priv = dev->dev_private;
3063 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3064 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3065 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3066 uint8_t train_set = intel_dp->train_set[0];
3067 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3068 enum pipe pipe = intel_crtc->pipe;
3069 int i;
e4a1d846
CML
3070
3071 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3072 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3073 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3074 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3075 deemph_reg_value = 128;
3076 margin_reg_value = 52;
3077 break;
bd60018a 3078 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3079 deemph_reg_value = 128;
3080 margin_reg_value = 77;
3081 break;
bd60018a 3082 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3083 deemph_reg_value = 128;
3084 margin_reg_value = 102;
3085 break;
bd60018a 3086 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3087 deemph_reg_value = 128;
3088 margin_reg_value = 154;
3089 /* FIXME extra to set for 1200 */
3090 break;
3091 default:
3092 return 0;
3093 }
3094 break;
bd60018a 3095 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3096 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3097 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3098 deemph_reg_value = 85;
3099 margin_reg_value = 78;
3100 break;
bd60018a 3101 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3102 deemph_reg_value = 85;
3103 margin_reg_value = 116;
3104 break;
bd60018a 3105 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3106 deemph_reg_value = 85;
3107 margin_reg_value = 154;
3108 break;
3109 default:
3110 return 0;
3111 }
3112 break;
bd60018a 3113 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3114 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3116 deemph_reg_value = 64;
3117 margin_reg_value = 104;
3118 break;
bd60018a 3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3120 deemph_reg_value = 64;
3121 margin_reg_value = 154;
3122 break;
3123 default:
3124 return 0;
3125 }
3126 break;
bd60018a 3127 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3128 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3130 deemph_reg_value = 43;
3131 margin_reg_value = 154;
3132 break;
3133 default:
3134 return 0;
3135 }
3136 break;
3137 default:
3138 return 0;
3139 }
3140
3141 mutex_lock(&dev_priv->dpio_lock);
3142
3143 /* Clear calc init */
1966e59e
VS
3144 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3145 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3146 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3147 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3148 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3149
3150 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3151 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3152 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3153 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3154 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3155
a02ef3c7
VS
3156 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3157 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3158 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3159 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3160
3161 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3162 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3163 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3164 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3165
e4a1d846 3166 /* Program swing deemph */
f72df8db
VS
3167 for (i = 0; i < 4; i++) {
3168 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3169 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3170 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3171 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3172 }
e4a1d846
CML
3173
3174 /* Program swing margin */
f72df8db
VS
3175 for (i = 0; i < 4; i++) {
3176 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3177 val &= ~DPIO_SWING_MARGIN000_MASK;
3178 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3179 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3180 }
e4a1d846
CML
3181
3182 /* Disable unique transition scale */
f72df8db
VS
3183 for (i = 0; i < 4; i++) {
3184 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3185 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3186 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3187 }
e4a1d846
CML
3188
3189 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3190 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3191 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3192 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3193
3194 /*
3195 * The document said it needs to set bit 27 for ch0 and bit 26
3196 * for ch1. Might be a typo in the doc.
3197 * For now, for this unique transition scale selection, set bit
3198 * 27 for ch0 and ch1.
3199 */
f72df8db
VS
3200 for (i = 0; i < 4; i++) {
3201 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3202 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3203 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3204 }
e4a1d846 3205
f72df8db
VS
3206 for (i = 0; i < 4; i++) {
3207 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3208 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3209 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3210 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3211 }
e4a1d846
CML
3212 }
3213
3214 /* Start swing calculation */
1966e59e
VS
3215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3216 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3217 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3218
3219 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3220 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3221 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3222
3223 /* LRC Bypass */
3224 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3225 val |= DPIO_LRC_BYPASS;
3226 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3227
3228 mutex_unlock(&dev_priv->dpio_lock);
3229
3230 return 0;
3231}
3232
a4fc5ed6 3233static void
0301b3ac
JN
3234intel_get_adjust_train(struct intel_dp *intel_dp,
3235 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3236{
3237 uint8_t v = 0;
3238 uint8_t p = 0;
3239 int lane;
1a2eb460
KP
3240 uint8_t voltage_max;
3241 uint8_t preemph_max;
a4fc5ed6 3242
33a34e4e 3243 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3244 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3245 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3246
3247 if (this_v > v)
3248 v = this_v;
3249 if (this_p > p)
3250 p = this_p;
3251 }
3252
1a2eb460 3253 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3254 if (v >= voltage_max)
3255 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3256
1a2eb460
KP
3257 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3258 if (p >= preemph_max)
3259 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3260
3261 for (lane = 0; lane < 4; lane++)
33a34e4e 3262 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3263}
3264
3265static uint32_t
f0a3424e 3266intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3267{
3cf2efb1 3268 uint32_t signal_levels = 0;
a4fc5ed6 3269
3cf2efb1 3270 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3272 default:
3273 signal_levels |= DP_VOLTAGE_0_4;
3274 break;
bd60018a 3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3276 signal_levels |= DP_VOLTAGE_0_6;
3277 break;
bd60018a 3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3279 signal_levels |= DP_VOLTAGE_0_8;
3280 break;
bd60018a 3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3282 signal_levels |= DP_VOLTAGE_1_2;
3283 break;
3284 }
3cf2efb1 3285 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3286 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3287 default:
3288 signal_levels |= DP_PRE_EMPHASIS_0;
3289 break;
bd60018a 3290 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3291 signal_levels |= DP_PRE_EMPHASIS_3_5;
3292 break;
bd60018a 3293 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3294 signal_levels |= DP_PRE_EMPHASIS_6;
3295 break;
bd60018a 3296 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3297 signal_levels |= DP_PRE_EMPHASIS_9_5;
3298 break;
3299 }
3300 return signal_levels;
3301}
3302
e3421a18
ZW
3303/* Gen6's DP voltage swing and pre-emphasis control */
3304static uint32_t
3305intel_gen6_edp_signal_levels(uint8_t train_set)
3306{
3c5a62b5
YL
3307 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3308 DP_TRAIN_PRE_EMPHASIS_MASK);
3309 switch (signal_levels) {
bd60018a
SJ
3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3312 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3314 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3317 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3320 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3323 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3324 default:
3c5a62b5
YL
3325 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3326 "0x%x\n", signal_levels);
3327 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3328 }
3329}
3330
1a2eb460
KP
3331/* Gen7's DP voltage swing and pre-emphasis control */
3332static uint32_t
3333intel_gen7_edp_signal_levels(uint8_t train_set)
3334{
3335 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3336 DP_TRAIN_PRE_EMPHASIS_MASK);
3337 switch (signal_levels) {
bd60018a 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3339 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3341 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3343 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3344
bd60018a 3345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3346 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3348 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3349
bd60018a 3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3351 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3353 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3354
3355 default:
3356 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3357 "0x%x\n", signal_levels);
3358 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3359 }
3360}
3361
d6c0d722
PZ
3362/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3363static uint32_t
f0a3424e 3364intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3365{
d6c0d722
PZ
3366 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3367 DP_TRAIN_PRE_EMPHASIS_MASK);
3368 switch (signal_levels) {
bd60018a 3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3370 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3372 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3374 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3375 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3376 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3377
bd60018a 3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3379 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3381 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3383 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3384
bd60018a 3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3386 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3388 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3389
3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3391 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3392 default:
3393 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3394 "0x%x\n", signal_levels);
c5fe6a06 3395 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3396 }
a4fc5ed6
KP
3397}
3398
f0a3424e
PZ
3399/* Properly updates "DP" with the correct signal levels. */
3400static void
3401intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3402{
3403 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3404 enum port port = intel_dig_port->port;
f0a3424e
PZ
3405 struct drm_device *dev = intel_dig_port->base.base.dev;
3406 uint32_t signal_levels, mask;
3407 uint8_t train_set = intel_dp->train_set[0];
3408
5a9d1f1a 3409 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3410 signal_levels = intel_hsw_signal_levels(train_set);
3411 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3412 } else if (IS_CHERRYVIEW(dev)) {
3413 signal_levels = intel_chv_signal_levels(intel_dp);
3414 mask = 0;
e2fa6fba
P
3415 } else if (IS_VALLEYVIEW(dev)) {
3416 signal_levels = intel_vlv_signal_levels(intel_dp);
3417 mask = 0;
bc7d38a4 3418 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3419 signal_levels = intel_gen7_edp_signal_levels(train_set);
3420 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3421 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3422 signal_levels = intel_gen6_edp_signal_levels(train_set);
3423 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3424 } else {
3425 signal_levels = intel_gen4_signal_levels(train_set);
3426 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3427 }
3428
3429 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3430
3431 *DP = (*DP & ~mask) | signal_levels;
3432}
3433
a4fc5ed6 3434static bool
ea5b213a 3435intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3436 uint32_t *DP,
58e10eb9 3437 uint8_t dp_train_pat)
a4fc5ed6 3438{
174edf1f
PZ
3439 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3440 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3441 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3442 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3443 int ret, len;
a4fc5ed6 3444
7b13b58a 3445 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3446
70aff66c 3447 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3448 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3449
2cdfe6c8
JN
3450 buf[0] = dp_train_pat;
3451 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3452 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3453 /* don't write DP_TRAINING_LANEx_SET on disable */
3454 len = 1;
3455 } else {
3456 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3457 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3458 len = intel_dp->lane_count + 1;
47ea7542 3459 }
a4fc5ed6 3460
9d1a1031
JN
3461 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3462 buf, len);
2cdfe6c8
JN
3463
3464 return ret == len;
a4fc5ed6
KP
3465}
3466
70aff66c
JN
3467static bool
3468intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3469 uint8_t dp_train_pat)
3470{
953d22e8 3471 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3472 intel_dp_set_signal_levels(intel_dp, DP);
3473 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3474}
3475
3476static bool
3477intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3478 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3479{
3480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3481 struct drm_device *dev = intel_dig_port->base.base.dev;
3482 struct drm_i915_private *dev_priv = dev->dev_private;
3483 int ret;
3484
3485 intel_get_adjust_train(intel_dp, link_status);
3486 intel_dp_set_signal_levels(intel_dp, DP);
3487
3488 I915_WRITE(intel_dp->output_reg, *DP);
3489 POSTING_READ(intel_dp->output_reg);
3490
9d1a1031
JN
3491 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3492 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3493
3494 return ret == intel_dp->lane_count;
3495}
3496
3ab9c637
ID
3497static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3498{
3499 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3500 struct drm_device *dev = intel_dig_port->base.base.dev;
3501 struct drm_i915_private *dev_priv = dev->dev_private;
3502 enum port port = intel_dig_port->port;
3503 uint32_t val;
3504
3505 if (!HAS_DDI(dev))
3506 return;
3507
3508 val = I915_READ(DP_TP_CTL(port));
3509 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3510 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3511 I915_WRITE(DP_TP_CTL(port), val);
3512
3513 /*
3514 * On PORT_A we can have only eDP in SST mode. There the only reason
3515 * we need to set idle transmission mode is to work around a HW issue
3516 * where we enable the pipe while not in idle link-training mode.
3517 * In this case there is requirement to wait for a minimum number of
3518 * idle patterns to be sent.
3519 */
3520 if (port == PORT_A)
3521 return;
3522
3523 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3524 1))
3525 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3526}
3527
33a34e4e 3528/* Enable corresponding port and start training pattern 1 */
c19b0669 3529void
33a34e4e 3530intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3531{
da63a9f2 3532 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3533 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3534 int i;
3535 uint8_t voltage;
cdb0e95b 3536 int voltage_tries, loop_tries;
ea5b213a 3537 uint32_t DP = intel_dp->DP;
6aba5b6c 3538 uint8_t link_config[2];
a4fc5ed6 3539
affa9354 3540 if (HAS_DDI(dev))
c19b0669
PZ
3541 intel_ddi_prepare_link_retrain(encoder);
3542
3cf2efb1 3543 /* Write the link configuration data */
6aba5b6c
JN
3544 link_config[0] = intel_dp->link_bw;
3545 link_config[1] = intel_dp->lane_count;
3546 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3547 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3548 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3549 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3550 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3551 &intel_dp->rate_select, 1);
6aba5b6c
JN
3552
3553 link_config[0] = 0;
3554 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3555 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3556
3557 DP |= DP_PORT_EN;
1a2eb460 3558
70aff66c
JN
3559 /* clock recovery */
3560 if (!intel_dp_reset_link_train(intel_dp, &DP,
3561 DP_TRAINING_PATTERN_1 |
3562 DP_LINK_SCRAMBLING_DISABLE)) {
3563 DRM_ERROR("failed to enable link training\n");
3564 return;
3565 }
3566
a4fc5ed6 3567 voltage = 0xff;
cdb0e95b
KP
3568 voltage_tries = 0;
3569 loop_tries = 0;
a4fc5ed6 3570 for (;;) {
70aff66c 3571 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3572
a7c9655f 3573 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3574 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3575 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3576 break;
93f62dad 3577 }
a4fc5ed6 3578
01916270 3579 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3580 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3581 break;
3582 }
3583
3584 /* Check to see if we've tried the max voltage */
3585 for (i = 0; i < intel_dp->lane_count; i++)
3586 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3587 break;
3b4f819d 3588 if (i == intel_dp->lane_count) {
b06fbda3
DV
3589 ++loop_tries;
3590 if (loop_tries == 5) {
3def84b3 3591 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3592 break;
3593 }
70aff66c
JN
3594 intel_dp_reset_link_train(intel_dp, &DP,
3595 DP_TRAINING_PATTERN_1 |
3596 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3597 voltage_tries = 0;
3598 continue;
3599 }
a4fc5ed6 3600
3cf2efb1 3601 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3602 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3603 ++voltage_tries;
b06fbda3 3604 if (voltage_tries == 5) {
3def84b3 3605 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3606 break;
3607 }
3608 } else
3609 voltage_tries = 0;
3610 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3611
70aff66c
JN
3612 /* Update training set as requested by target */
3613 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3614 DRM_ERROR("failed to update link training\n");
3615 break;
3616 }
a4fc5ed6
KP
3617 }
3618
33a34e4e
JB
3619 intel_dp->DP = DP;
3620}
3621
c19b0669 3622void
33a34e4e
JB
3623intel_dp_complete_link_train(struct intel_dp *intel_dp)
3624{
33a34e4e 3625 bool channel_eq = false;
37f80975 3626 int tries, cr_tries;
33a34e4e 3627 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3628 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3629
3630 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3631 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3632 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3633
a4fc5ed6 3634 /* channel equalization */
70aff66c 3635 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3636 training_pattern |
70aff66c
JN
3637 DP_LINK_SCRAMBLING_DISABLE)) {
3638 DRM_ERROR("failed to start channel equalization\n");
3639 return;
3640 }
3641
a4fc5ed6 3642 tries = 0;
37f80975 3643 cr_tries = 0;
a4fc5ed6
KP
3644 channel_eq = false;
3645 for (;;) {
70aff66c 3646 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3647
37f80975
JB
3648 if (cr_tries > 5) {
3649 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3650 break;
3651 }
3652
a7c9655f 3653 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3654 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3655 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3656 break;
70aff66c 3657 }
a4fc5ed6 3658
37f80975 3659 /* Make sure clock is still ok */
01916270 3660 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3661 intel_dp_start_link_train(intel_dp);
70aff66c 3662 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3663 training_pattern |
70aff66c 3664 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3665 cr_tries++;
3666 continue;
3667 }
3668
1ffdff13 3669 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3670 channel_eq = true;
3671 break;
3672 }
a4fc5ed6 3673
37f80975
JB
3674 /* Try 5 times, then try clock recovery if that fails */
3675 if (tries > 5) {
37f80975 3676 intel_dp_start_link_train(intel_dp);
70aff66c 3677 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3678 training_pattern |
70aff66c 3679 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3680 tries = 0;
3681 cr_tries++;
3682 continue;
3683 }
a4fc5ed6 3684
70aff66c
JN
3685 /* Update training set as requested by target */
3686 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3687 DRM_ERROR("failed to update link training\n");
3688 break;
3689 }
3cf2efb1 3690 ++tries;
869184a6 3691 }
3cf2efb1 3692
3ab9c637
ID
3693 intel_dp_set_idle_link_train(intel_dp);
3694
3695 intel_dp->DP = DP;
3696
d6c0d722 3697 if (channel_eq)
07f42258 3698 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3699
3ab9c637
ID
3700}
3701
3702void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3703{
70aff66c 3704 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3705 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3706}
3707
3708static void
ea5b213a 3709intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3710{
da63a9f2 3711 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3712 enum port port = intel_dig_port->port;
da63a9f2 3713 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3714 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3715 uint32_t DP = intel_dp->DP;
a4fc5ed6 3716
bc76e320 3717 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3718 return;
3719
0c33d8d7 3720 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3721 return;
3722
28c97730 3723 DRM_DEBUG_KMS("\n");
32f9d658 3724
bc7d38a4 3725 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3726 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3727 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3728 } else {
aad3d14d
VS
3729 if (IS_CHERRYVIEW(dev))
3730 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3731 else
3732 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3733 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3734 }
fe255d00 3735 POSTING_READ(intel_dp->output_reg);
5eb08b69 3736
493a7081 3737 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3738 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3739 /* Hardware workaround: leaving our transcoder select
3740 * set to transcoder B while it's off will prevent the
3741 * corresponding HDMI output on transcoder A.
3742 *
3743 * Combine this with another hardware workaround:
3744 * transcoder select bit can only be cleared while the
3745 * port is enabled.
3746 */
3747 DP &= ~DP_PIPEB_SELECT;
3748 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3749 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3750 }
3751
832afda6 3752 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3753 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3754 POSTING_READ(intel_dp->output_reg);
f01eca2e 3755 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3756}
3757
26d61aad
KP
3758static bool
3759intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3760{
a031d709
RV
3761 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3762 struct drm_device *dev = dig_port->base.base.dev;
3763 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3764 uint8_t rev;
a031d709 3765
9d1a1031
JN
3766 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3767 sizeof(intel_dp->dpcd)) < 0)
edb39244 3768 return false; /* aux transfer failed */
92fd8fd1 3769
a8e98153 3770 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3771
edb39244
AJ
3772 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3773 return false; /* DPCD not present */
3774
2293bb5c
SK
3775 /* Check if the panel supports PSR */
3776 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3777 if (is_edp(intel_dp)) {
9d1a1031
JN
3778 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3779 intel_dp->psr_dpcd,
3780 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3781 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3782 dev_priv->psr.sink_support = true;
50003939 3783 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3784 }
50003939
JN
3785 }
3786
7809a611 3787 /* Training Pattern 3 support, both source and sink */
06ea66b6 3788 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3789 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3790 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3791 intel_dp->use_tps3 = true;
f8d8a672 3792 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3793 } else
3794 intel_dp->use_tps3 = false;
3795
fc0f8e25
SJ
3796 /* Intermediate frequency support */
3797 if (is_edp(intel_dp) &&
3798 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3799 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3800 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3801 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3802 int i;
3803
fc0f8e25
SJ
3804 intel_dp_dpcd_read_wake(&intel_dp->aux,
3805 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3806 sink_rates,
3807 sizeof(sink_rates));
ea2d8a42 3808
94ca719e
VS
3809 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3810 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3811
3812 if (val == 0)
3813 break;
3814
af77b974
SJ
3815 /* Value read is in kHz while drm clock is saved in deca-kHz */
3816 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3817 }
94ca719e 3818 intel_dp->num_sink_rates = i;
fc0f8e25 3819 }
0336400e
VS
3820
3821 intel_dp_print_rates(intel_dp);
3822
edb39244
AJ
3823 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3824 DP_DWN_STRM_PORT_PRESENT))
3825 return true; /* native DP sink */
3826
3827 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3828 return true; /* no per-port downstream info */
3829
9d1a1031
JN
3830 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3831 intel_dp->downstream_ports,
3832 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3833 return false; /* downstream port status fetch failed */
3834
3835 return true;
92fd8fd1
KP
3836}
3837
0d198328
AJ
3838static void
3839intel_dp_probe_oui(struct intel_dp *intel_dp)
3840{
3841 u8 buf[3];
3842
3843 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3844 return;
3845
9d1a1031 3846 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3847 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3848 buf[0], buf[1], buf[2]);
3849
9d1a1031 3850 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3851 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3852 buf[0], buf[1], buf[2]);
3853}
3854
0e32b39c
DA
3855static bool
3856intel_dp_probe_mst(struct intel_dp *intel_dp)
3857{
3858 u8 buf[1];
3859
3860 if (!intel_dp->can_mst)
3861 return false;
3862
3863 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3864 return false;
3865
0e32b39c
DA
3866 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3867 if (buf[0] & DP_MST_CAP) {
3868 DRM_DEBUG_KMS("Sink is MST capable\n");
3869 intel_dp->is_mst = true;
3870 } else {
3871 DRM_DEBUG_KMS("Sink is not MST capable\n");
3872 intel_dp->is_mst = false;
3873 }
3874 }
0e32b39c
DA
3875
3876 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3877 return intel_dp->is_mst;
3878}
3879
d2e216d0
RV
3880int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3881{
3882 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3883 struct drm_device *dev = intel_dig_port->base.base.dev;
3884 struct intel_crtc *intel_crtc =
3885 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3886 u8 buf;
3887 int test_crc_count;
3888 int attempts = 6;
d2e216d0 3889
ad9dc91b 3890 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3891 return -EIO;
d2e216d0 3892
ad9dc91b 3893 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3894 return -ENOTTY;
3895
1dda5f93
RV
3896 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3897 return -EIO;
3898
9d1a1031 3899 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3900 buf | DP_TEST_SINK_START) < 0)
bda0381e 3901 return -EIO;
d2e216d0 3902
1dda5f93 3903 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3904 return -EIO;
ad9dc91b 3905 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3906
ad9dc91b 3907 do {
1dda5f93
RV
3908 if (drm_dp_dpcd_readb(&intel_dp->aux,
3909 DP_TEST_SINK_MISC, &buf) < 0)
3910 return -EIO;
ad9dc91b
RV
3911 intel_wait_for_vblank(dev, intel_crtc->pipe);
3912 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3913
3914 if (attempts == 0) {
90bd1f46
DV
3915 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3916 return -ETIMEDOUT;
ad9dc91b 3917 }
d2e216d0 3918
9d1a1031 3919 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3920 return -EIO;
d2e216d0 3921
1dda5f93
RV
3922 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3923 return -EIO;
3924 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3925 buf & ~DP_TEST_SINK_START) < 0)
3926 return -EIO;
ce31d9f4 3927
d2e216d0
RV
3928 return 0;
3929}
3930
a60f0e38
JB
3931static bool
3932intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3933{
9d1a1031
JN
3934 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3935 DP_DEVICE_SERVICE_IRQ_VECTOR,
3936 sink_irq_vector, 1) == 1;
a60f0e38
JB
3937}
3938
0e32b39c
DA
3939static bool
3940intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3941{
3942 int ret;
3943
3944 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3945 DP_SINK_COUNT_ESI,
3946 sink_irq_vector, 14);
3947 if (ret != 14)
3948 return false;
3949
3950 return true;
3951}
3952
a60f0e38
JB
3953static void
3954intel_dp_handle_test_request(struct intel_dp *intel_dp)
3955{
3956 /* NAK by default */
9d1a1031 3957 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3958}
3959
0e32b39c
DA
3960static int
3961intel_dp_check_mst_status(struct intel_dp *intel_dp)
3962{
3963 bool bret;
3964
3965 if (intel_dp->is_mst) {
3966 u8 esi[16] = { 0 };
3967 int ret = 0;
3968 int retry;
3969 bool handled;
3970 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3971go_again:
3972 if (bret == true) {
3973
3974 /* check link status - esi[10] = 0x200c */
3975 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3976 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3977 intel_dp_start_link_train(intel_dp);
3978 intel_dp_complete_link_train(intel_dp);
3979 intel_dp_stop_link_train(intel_dp);
3980 }
3981
6f34cc39 3982 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3983 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3984
3985 if (handled) {
3986 for (retry = 0; retry < 3; retry++) {
3987 int wret;
3988 wret = drm_dp_dpcd_write(&intel_dp->aux,
3989 DP_SINK_COUNT_ESI+1,
3990 &esi[1], 3);
3991 if (wret == 3) {
3992 break;
3993 }
3994 }
3995
3996 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3997 if (bret == true) {
6f34cc39 3998 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3999 goto go_again;
4000 }
4001 } else
4002 ret = 0;
4003
4004 return ret;
4005 } else {
4006 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4007 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4008 intel_dp->is_mst = false;
4009 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4010 /* send a hotplug event */
4011 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4012 }
4013 }
4014 return -EINVAL;
4015}
4016
a4fc5ed6
KP
4017/*
4018 * According to DP spec
4019 * 5.1.2:
4020 * 1. Read DPCD
4021 * 2. Configure link according to Receiver Capabilities
4022 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4023 * 4. Check link status on receipt of hot-plug interrupt
4024 */
a5146200 4025static void
ea5b213a 4026intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4027{
5b215bcf 4028 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4029 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4030 u8 sink_irq_vector;
93f62dad 4031 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4032
5b215bcf
DA
4033 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4034
da63a9f2 4035 if (!intel_encoder->connectors_active)
d2b996ac 4036 return;
59cd09e1 4037
da63a9f2 4038 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4039 return;
4040
1a125d8a
ID
4041 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4042 return;
4043
92fd8fd1 4044 /* Try to read receiver status if the link appears to be up */
93f62dad 4045 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4046 return;
4047 }
4048
92fd8fd1 4049 /* Now read the DPCD to see if it's actually running */
26d61aad 4050 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4051 return;
4052 }
4053
a60f0e38
JB
4054 /* Try to read the source of the interrupt */
4055 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4056 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4057 /* Clear interrupt source */
9d1a1031
JN
4058 drm_dp_dpcd_writeb(&intel_dp->aux,
4059 DP_DEVICE_SERVICE_IRQ_VECTOR,
4060 sink_irq_vector);
a60f0e38
JB
4061
4062 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4063 intel_dp_handle_test_request(intel_dp);
4064 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4065 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4066 }
4067
1ffdff13 4068 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4069 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4070 intel_encoder->base.name);
33a34e4e
JB
4071 intel_dp_start_link_train(intel_dp);
4072 intel_dp_complete_link_train(intel_dp);
3ab9c637 4073 intel_dp_stop_link_train(intel_dp);
33a34e4e 4074 }
a4fc5ed6 4075}
a4fc5ed6 4076
caf9ab24 4077/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4078static enum drm_connector_status
26d61aad 4079intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4080{
caf9ab24 4081 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4082 uint8_t type;
4083
4084 if (!intel_dp_get_dpcd(intel_dp))
4085 return connector_status_disconnected;
4086
4087 /* if there's no downstream port, we're done */
4088 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4089 return connector_status_connected;
caf9ab24
AJ
4090
4091 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4092 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4093 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4094 uint8_t reg;
9d1a1031
JN
4095
4096 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4097 &reg, 1) < 0)
caf9ab24 4098 return connector_status_unknown;
9d1a1031 4099
23235177
AJ
4100 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4101 : connector_status_disconnected;
caf9ab24
AJ
4102 }
4103
4104 /* If no HPD, poke DDC gently */
0b99836f 4105 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4106 return connector_status_connected;
caf9ab24
AJ
4107
4108 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4109 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4110 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4111 if (type == DP_DS_PORT_TYPE_VGA ||
4112 type == DP_DS_PORT_TYPE_NON_EDID)
4113 return connector_status_unknown;
4114 } else {
4115 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4116 DP_DWN_STRM_PORT_TYPE_MASK;
4117 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4118 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4119 return connector_status_unknown;
4120 }
caf9ab24
AJ
4121
4122 /* Anything else is out of spec, warn and ignore */
4123 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4124 return connector_status_disconnected;
71ba9000
AJ
4125}
4126
d410b56d
CW
4127static enum drm_connector_status
4128edp_detect(struct intel_dp *intel_dp)
4129{
4130 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4131 enum drm_connector_status status;
4132
4133 status = intel_panel_detect(dev);
4134 if (status == connector_status_unknown)
4135 status = connector_status_connected;
4136
4137 return status;
4138}
4139
5eb08b69 4140static enum drm_connector_status
a9756bb5 4141ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4142{
30add22d 4143 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4144 struct drm_i915_private *dev_priv = dev->dev_private;
4145 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4146
1b469639
DL
4147 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4148 return connector_status_disconnected;
4149
26d61aad 4150 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4151}
4152
2a592bec
DA
4153static int g4x_digital_port_connected(struct drm_device *dev,
4154 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4155{
a4fc5ed6 4156 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4157 uint32_t bit;
5eb08b69 4158
232a6ee9
TP
4159 if (IS_VALLEYVIEW(dev)) {
4160 switch (intel_dig_port->port) {
4161 case PORT_B:
4162 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4163 break;
4164 case PORT_C:
4165 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4166 break;
4167 case PORT_D:
4168 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4169 break;
4170 default:
2a592bec 4171 return -EINVAL;
232a6ee9
TP
4172 }
4173 } else {
4174 switch (intel_dig_port->port) {
4175 case PORT_B:
4176 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4177 break;
4178 case PORT_C:
4179 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4180 break;
4181 case PORT_D:
4182 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4183 break;
4184 default:
2a592bec 4185 return -EINVAL;
232a6ee9 4186 }
a4fc5ed6
KP
4187 }
4188
10f76a38 4189 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4190 return 0;
4191 return 1;
4192}
4193
4194static enum drm_connector_status
4195g4x_dp_detect(struct intel_dp *intel_dp)
4196{
4197 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4198 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4199 int ret;
4200
4201 /* Can't disconnect eDP, but you can close the lid... */
4202 if (is_edp(intel_dp)) {
4203 enum drm_connector_status status;
4204
4205 status = intel_panel_detect(dev);
4206 if (status == connector_status_unknown)
4207 status = connector_status_connected;
4208 return status;
4209 }
4210
4211 ret = g4x_digital_port_connected(dev, intel_dig_port);
4212 if (ret == -EINVAL)
4213 return connector_status_unknown;
4214 else if (ret == 0)
a4fc5ed6
KP
4215 return connector_status_disconnected;
4216
26d61aad 4217 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4218}
4219
8c241fef 4220static struct edid *
beb60608 4221intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4222{
beb60608 4223 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4224
9cd300e0
JN
4225 /* use cached edid if we have one */
4226 if (intel_connector->edid) {
9cd300e0
JN
4227 /* invalid edid */
4228 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4229 return NULL;
4230
55e9edeb 4231 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4232 } else
4233 return drm_get_edid(&intel_connector->base,
4234 &intel_dp->aux.ddc);
4235}
8c241fef 4236
beb60608
CW
4237static void
4238intel_dp_set_edid(struct intel_dp *intel_dp)
4239{
4240 struct intel_connector *intel_connector = intel_dp->attached_connector;
4241 struct edid *edid;
8c241fef 4242
beb60608
CW
4243 edid = intel_dp_get_edid(intel_dp);
4244 intel_connector->detect_edid = edid;
4245
4246 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4247 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4248 else
4249 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4250}
4251
beb60608
CW
4252static void
4253intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4254{
beb60608 4255 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4256
beb60608
CW
4257 kfree(intel_connector->detect_edid);
4258 intel_connector->detect_edid = NULL;
9cd300e0 4259
beb60608
CW
4260 intel_dp->has_audio = false;
4261}
d6f24d0f 4262
beb60608
CW
4263static enum intel_display_power_domain
4264intel_dp_power_get(struct intel_dp *dp)
4265{
4266 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4267 enum intel_display_power_domain power_domain;
4268
4269 power_domain = intel_display_port_power_domain(encoder);
4270 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4271
4272 return power_domain;
4273}
d6f24d0f 4274
beb60608
CW
4275static void
4276intel_dp_power_put(struct intel_dp *dp,
4277 enum intel_display_power_domain power_domain)
4278{
4279 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4280 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4281}
4282
a9756bb5
ZW
4283static enum drm_connector_status
4284intel_dp_detect(struct drm_connector *connector, bool force)
4285{
4286 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4287 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4288 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4289 struct drm_device *dev = connector->dev;
a9756bb5 4290 enum drm_connector_status status;
671dedd2 4291 enum intel_display_power_domain power_domain;
0e32b39c 4292 bool ret;
a9756bb5 4293
164c8598 4294 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4295 connector->base.id, connector->name);
beb60608 4296 intel_dp_unset_edid(intel_dp);
164c8598 4297
0e32b39c
DA
4298 if (intel_dp->is_mst) {
4299 /* MST devices are disconnected from a monitor POV */
4300 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4301 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4302 return connector_status_disconnected;
0e32b39c
DA
4303 }
4304
beb60608 4305 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4306
d410b56d
CW
4307 /* Can't disconnect eDP, but you can close the lid... */
4308 if (is_edp(intel_dp))
4309 status = edp_detect(intel_dp);
4310 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4311 status = ironlake_dp_detect(intel_dp);
4312 else
4313 status = g4x_dp_detect(intel_dp);
4314 if (status != connector_status_connected)
c8c8fb33 4315 goto out;
a9756bb5 4316
0d198328
AJ
4317 intel_dp_probe_oui(intel_dp);
4318
0e32b39c
DA
4319 ret = intel_dp_probe_mst(intel_dp);
4320 if (ret) {
4321 /* if we are in MST mode then this connector
4322 won't appear connected or have anything with EDID on it */
4323 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4324 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4325 status = connector_status_disconnected;
4326 goto out;
4327 }
4328
beb60608 4329 intel_dp_set_edid(intel_dp);
a9756bb5 4330
d63885da
PZ
4331 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4332 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4333 status = connector_status_connected;
4334
4335out:
beb60608 4336 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4337 return status;
a4fc5ed6
KP
4338}
4339
beb60608
CW
4340static void
4341intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4342{
df0e9248 4343 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4344 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4345 enum intel_display_power_domain power_domain;
a4fc5ed6 4346
beb60608
CW
4347 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4348 connector->base.id, connector->name);
4349 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4350
beb60608
CW
4351 if (connector->status != connector_status_connected)
4352 return;
671dedd2 4353
beb60608
CW
4354 power_domain = intel_dp_power_get(intel_dp);
4355
4356 intel_dp_set_edid(intel_dp);
4357
4358 intel_dp_power_put(intel_dp, power_domain);
4359
4360 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4361 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4362}
4363
4364static int intel_dp_get_modes(struct drm_connector *connector)
4365{
4366 struct intel_connector *intel_connector = to_intel_connector(connector);
4367 struct edid *edid;
4368
4369 edid = intel_connector->detect_edid;
4370 if (edid) {
4371 int ret = intel_connector_update_modes(connector, edid);
4372 if (ret)
4373 return ret;
4374 }
32f9d658 4375
f8779fda 4376 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4377 if (is_edp(intel_attached_dp(connector)) &&
4378 intel_connector->panel.fixed_mode) {
f8779fda 4379 struct drm_display_mode *mode;
beb60608
CW
4380
4381 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4382 intel_connector->panel.fixed_mode);
f8779fda 4383 if (mode) {
32f9d658
ZW
4384 drm_mode_probed_add(connector, mode);
4385 return 1;
4386 }
4387 }
beb60608 4388
32f9d658 4389 return 0;
a4fc5ed6
KP
4390}
4391
1aad7ac0
CW
4392static bool
4393intel_dp_detect_audio(struct drm_connector *connector)
4394{
1aad7ac0 4395 bool has_audio = false;
beb60608 4396 struct edid *edid;
1aad7ac0 4397
beb60608
CW
4398 edid = to_intel_connector(connector)->detect_edid;
4399 if (edid)
1aad7ac0 4400 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4401
1aad7ac0
CW
4402 return has_audio;
4403}
4404
f684960e
CW
4405static int
4406intel_dp_set_property(struct drm_connector *connector,
4407 struct drm_property *property,
4408 uint64_t val)
4409{
e953fd7b 4410 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4411 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4412 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4413 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4414 int ret;
4415
662595df 4416 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4417 if (ret)
4418 return ret;
4419
3f43c48d 4420 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4421 int i = val;
4422 bool has_audio;
4423
4424 if (i == intel_dp->force_audio)
f684960e
CW
4425 return 0;
4426
1aad7ac0 4427 intel_dp->force_audio = i;
f684960e 4428
c3e5f67b 4429 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4430 has_audio = intel_dp_detect_audio(connector);
4431 else
c3e5f67b 4432 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4433
4434 if (has_audio == intel_dp->has_audio)
f684960e
CW
4435 return 0;
4436
1aad7ac0 4437 intel_dp->has_audio = has_audio;
f684960e
CW
4438 goto done;
4439 }
4440
e953fd7b 4441 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4442 bool old_auto = intel_dp->color_range_auto;
4443 uint32_t old_range = intel_dp->color_range;
4444
55bc60db
VS
4445 switch (val) {
4446 case INTEL_BROADCAST_RGB_AUTO:
4447 intel_dp->color_range_auto = true;
4448 break;
4449 case INTEL_BROADCAST_RGB_FULL:
4450 intel_dp->color_range_auto = false;
4451 intel_dp->color_range = 0;
4452 break;
4453 case INTEL_BROADCAST_RGB_LIMITED:
4454 intel_dp->color_range_auto = false;
4455 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4456 break;
4457 default:
4458 return -EINVAL;
4459 }
ae4edb80
DV
4460
4461 if (old_auto == intel_dp->color_range_auto &&
4462 old_range == intel_dp->color_range)
4463 return 0;
4464
e953fd7b
CW
4465 goto done;
4466 }
4467
53b41837
YN
4468 if (is_edp(intel_dp) &&
4469 property == connector->dev->mode_config.scaling_mode_property) {
4470 if (val == DRM_MODE_SCALE_NONE) {
4471 DRM_DEBUG_KMS("no scaling not supported\n");
4472 return -EINVAL;
4473 }
4474
4475 if (intel_connector->panel.fitting_mode == val) {
4476 /* the eDP scaling property is not changed */
4477 return 0;
4478 }
4479 intel_connector->panel.fitting_mode = val;
4480
4481 goto done;
4482 }
4483
f684960e
CW
4484 return -EINVAL;
4485
4486done:
c0c36b94
CW
4487 if (intel_encoder->base.crtc)
4488 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4489
4490 return 0;
4491}
4492
a4fc5ed6 4493static void
73845adf 4494intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4495{
1d508706 4496 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4497
10e972d3 4498 kfree(intel_connector->detect_edid);
beb60608 4499
9cd300e0
JN
4500 if (!IS_ERR_OR_NULL(intel_connector->edid))
4501 kfree(intel_connector->edid);
4502
acd8db10
PZ
4503 /* Can't call is_edp() since the encoder may have been destroyed
4504 * already. */
4505 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4506 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4507
a4fc5ed6 4508 drm_connector_cleanup(connector);
55f78c43 4509 kfree(connector);
a4fc5ed6
KP
4510}
4511
00c09d70 4512void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4513{
da63a9f2
PZ
4514 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4515 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4516
4f71d0cb 4517 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4518 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4519 if (is_edp(intel_dp)) {
4520 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4521 /*
4522 * vdd might still be enabled do to the delayed vdd off.
4523 * Make sure vdd is actually turned off here.
4524 */
773538e8 4525 pps_lock(intel_dp);
4be73780 4526 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4527 pps_unlock(intel_dp);
4528
01527b31
CT
4529 if (intel_dp->edp_notifier.notifier_call) {
4530 unregister_reboot_notifier(&intel_dp->edp_notifier);
4531 intel_dp->edp_notifier.notifier_call = NULL;
4532 }
bd943159 4533 }
c8bd0e49 4534 drm_encoder_cleanup(encoder);
da63a9f2 4535 kfree(intel_dig_port);
24d05927
DV
4536}
4537
07f9cd0b
ID
4538static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4539{
4540 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4541
4542 if (!is_edp(intel_dp))
4543 return;
4544
951468f3
VS
4545 /*
4546 * vdd might still be enabled do to the delayed vdd off.
4547 * Make sure vdd is actually turned off here.
4548 */
afa4e53a 4549 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4550 pps_lock(intel_dp);
07f9cd0b 4551 edp_panel_vdd_off_sync(intel_dp);
773538e8 4552 pps_unlock(intel_dp);
07f9cd0b
ID
4553}
4554
49e6bc51
VS
4555static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4556{
4557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4558 struct drm_device *dev = intel_dig_port->base.base.dev;
4559 struct drm_i915_private *dev_priv = dev->dev_private;
4560 enum intel_display_power_domain power_domain;
4561
4562 lockdep_assert_held(&dev_priv->pps_mutex);
4563
4564 if (!edp_have_panel_vdd(intel_dp))
4565 return;
4566
4567 /*
4568 * The VDD bit needs a power domain reference, so if the bit is
4569 * already enabled when we boot or resume, grab this reference and
4570 * schedule a vdd off, so we don't hold on to the reference
4571 * indefinitely.
4572 */
4573 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4574 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4575 intel_display_power_get(dev_priv, power_domain);
4576
4577 edp_panel_vdd_schedule_off(intel_dp);
4578}
4579
6d93c0c4
ID
4580static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4581{
49e6bc51
VS
4582 struct intel_dp *intel_dp;
4583
4584 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4585 return;
4586
4587 intel_dp = enc_to_intel_dp(encoder);
4588
4589 pps_lock(intel_dp);
4590
4591 /*
4592 * Read out the current power sequencer assignment,
4593 * in case the BIOS did something with it.
4594 */
4595 if (IS_VALLEYVIEW(encoder->dev))
4596 vlv_initial_power_sequencer_setup(intel_dp);
4597
4598 intel_edp_panel_vdd_sanitize(intel_dp);
4599
4600 pps_unlock(intel_dp);
6d93c0c4
ID
4601}
4602
a4fc5ed6 4603static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4604 .dpms = intel_connector_dpms,
a4fc5ed6 4605 .detect = intel_dp_detect,
beb60608 4606 .force = intel_dp_force,
a4fc5ed6 4607 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4608 .set_property = intel_dp_set_property,
2545e4a6 4609 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4610 .destroy = intel_dp_connector_destroy,
c6f95f27 4611 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4612 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4613};
4614
4615static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4616 .get_modes = intel_dp_get_modes,
4617 .mode_valid = intel_dp_mode_valid,
df0e9248 4618 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4619};
4620
a4fc5ed6 4621static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4622 .reset = intel_dp_encoder_reset,
24d05927 4623 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4624};
4625
0e32b39c 4626void
21d40d37 4627intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4628{
0e32b39c 4629 return;
c8110e52 4630}
6207937d 4631
b2c5c181 4632enum irqreturn
13cf5504
DA
4633intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4634{
4635 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4636 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4637 struct drm_device *dev = intel_dig_port->base.base.dev;
4638 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4639 enum intel_display_power_domain power_domain;
b2c5c181 4640 enum irqreturn ret = IRQ_NONE;
1c767b33 4641
0e32b39c
DA
4642 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4643 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4644
7a7f84cc
VS
4645 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4646 /*
4647 * vdd off can generate a long pulse on eDP which
4648 * would require vdd on to handle it, and thus we
4649 * would end up in an endless cycle of
4650 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4651 */
4652 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4653 port_name(intel_dig_port->port));
a8b3d52f 4654 return IRQ_HANDLED;
7a7f84cc
VS
4655 }
4656
26fbb774
VS
4657 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4658 port_name(intel_dig_port->port),
0e32b39c 4659 long_hpd ? "long" : "short");
13cf5504 4660
1c767b33
ID
4661 power_domain = intel_display_port_power_domain(intel_encoder);
4662 intel_display_power_get(dev_priv, power_domain);
4663
0e32b39c 4664 if (long_hpd) {
2a592bec
DA
4665
4666 if (HAS_PCH_SPLIT(dev)) {
4667 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4668 goto mst_fail;
4669 } else {
4670 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4671 goto mst_fail;
4672 }
0e32b39c
DA
4673
4674 if (!intel_dp_get_dpcd(intel_dp)) {
4675 goto mst_fail;
4676 }
4677
4678 intel_dp_probe_oui(intel_dp);
4679
4680 if (!intel_dp_probe_mst(intel_dp))
4681 goto mst_fail;
4682
4683 } else {
4684 if (intel_dp->is_mst) {
1c767b33 4685 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4686 goto mst_fail;
4687 }
4688
4689 if (!intel_dp->is_mst) {
4690 /*
4691 * we'll check the link status via the normal hot plug path later -
4692 * but for short hpds we should check it now
4693 */
5b215bcf 4694 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4695 intel_dp_check_link_status(intel_dp);
5b215bcf 4696 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4697 }
4698 }
b2c5c181
DV
4699
4700 ret = IRQ_HANDLED;
4701
1c767b33 4702 goto put_power;
0e32b39c
DA
4703mst_fail:
4704 /* if we were in MST mode, and device is not there get out of MST mode */
4705 if (intel_dp->is_mst) {
4706 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4707 intel_dp->is_mst = false;
4708 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4709 }
1c767b33
ID
4710put_power:
4711 intel_display_power_put(dev_priv, power_domain);
4712
4713 return ret;
13cf5504
DA
4714}
4715
e3421a18
ZW
4716/* Return which DP Port should be selected for Transcoder DP control */
4717int
0206e353 4718intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4719{
4720 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4721 struct intel_encoder *intel_encoder;
4722 struct intel_dp *intel_dp;
e3421a18 4723
fa90ecef
PZ
4724 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4725 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4726
fa90ecef
PZ
4727 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4728 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4729 return intel_dp->output_reg;
e3421a18 4730 }
ea5b213a 4731
e3421a18
ZW
4732 return -1;
4733}
4734
36e83a18 4735/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4736bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4737{
4738 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4739 union child_device_config *p_child;
36e83a18 4740 int i;
5d8a7752
VS
4741 static const short port_mapping[] = {
4742 [PORT_B] = PORT_IDPB,
4743 [PORT_C] = PORT_IDPC,
4744 [PORT_D] = PORT_IDPD,
4745 };
36e83a18 4746
3b32a35b
VS
4747 if (port == PORT_A)
4748 return true;
4749
41aa3448 4750 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4751 return false;
4752
41aa3448
RV
4753 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4754 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4755
5d8a7752 4756 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4757 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4758 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4759 return true;
4760 }
4761 return false;
4762}
4763
0e32b39c 4764void
f684960e
CW
4765intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4766{
53b41837
YN
4767 struct intel_connector *intel_connector = to_intel_connector(connector);
4768
3f43c48d 4769 intel_attach_force_audio_property(connector);
e953fd7b 4770 intel_attach_broadcast_rgb_property(connector);
55bc60db 4771 intel_dp->color_range_auto = true;
53b41837
YN
4772
4773 if (is_edp(intel_dp)) {
4774 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4775 drm_object_attach_property(
4776 &connector->base,
53b41837 4777 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4778 DRM_MODE_SCALE_ASPECT);
4779 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4780 }
f684960e
CW
4781}
4782
dada1a9f
ID
4783static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4784{
4785 intel_dp->last_power_cycle = jiffies;
4786 intel_dp->last_power_on = jiffies;
4787 intel_dp->last_backlight_off = jiffies;
4788}
4789
67a54566
DV
4790static void
4791intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4792 struct intel_dp *intel_dp)
67a54566
DV
4793{
4794 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4795 struct edp_power_seq cur, vbt, spec,
4796 *final = &intel_dp->pps_delays;
67a54566 4797 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4798 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4799
e39b999a
VS
4800 lockdep_assert_held(&dev_priv->pps_mutex);
4801
81ddbc69
VS
4802 /* already initialized? */
4803 if (final->t11_t12 != 0)
4804 return;
4805
453c5420 4806 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4807 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4808 pp_on_reg = PCH_PP_ON_DELAYS;
4809 pp_off_reg = PCH_PP_OFF_DELAYS;
4810 pp_div_reg = PCH_PP_DIVISOR;
4811 } else {
bf13e81b
JN
4812 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4813
4814 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4815 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4816 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4817 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4818 }
67a54566
DV
4819
4820 /* Workaround: Need to write PP_CONTROL with the unlock key as
4821 * the very first thing. */
453c5420 4822 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4823 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4824
453c5420
JB
4825 pp_on = I915_READ(pp_on_reg);
4826 pp_off = I915_READ(pp_off_reg);
4827 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4828
4829 /* Pull timing values out of registers */
4830 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4831 PANEL_POWER_UP_DELAY_SHIFT;
4832
4833 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4834 PANEL_LIGHT_ON_DELAY_SHIFT;
4835
4836 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4837 PANEL_LIGHT_OFF_DELAY_SHIFT;
4838
4839 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4840 PANEL_POWER_DOWN_DELAY_SHIFT;
4841
4842 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4843 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4844
4845 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4846 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4847
41aa3448 4848 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4849
4850 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4851 * our hw here, which are all in 100usec. */
4852 spec.t1_t3 = 210 * 10;
4853 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4854 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4855 spec.t10 = 500 * 10;
4856 /* This one is special and actually in units of 100ms, but zero
4857 * based in the hw (so we need to add 100 ms). But the sw vbt
4858 * table multiplies it with 1000 to make it in units of 100usec,
4859 * too. */
4860 spec.t11_t12 = (510 + 100) * 10;
4861
4862 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4863 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4864
4865 /* Use the max of the register settings and vbt. If both are
4866 * unset, fall back to the spec limits. */
36b5f425 4867#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4868 spec.field : \
4869 max(cur.field, vbt.field))
4870 assign_final(t1_t3);
4871 assign_final(t8);
4872 assign_final(t9);
4873 assign_final(t10);
4874 assign_final(t11_t12);
4875#undef assign_final
4876
36b5f425 4877#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4878 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4879 intel_dp->backlight_on_delay = get_delay(t8);
4880 intel_dp->backlight_off_delay = get_delay(t9);
4881 intel_dp->panel_power_down_delay = get_delay(t10);
4882 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4883#undef get_delay
4884
f30d26e4
JN
4885 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4886 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4887 intel_dp->panel_power_cycle_delay);
4888
4889 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4890 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4891}
4892
4893static void
4894intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4895 struct intel_dp *intel_dp)
f30d26e4
JN
4896{
4897 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4898 u32 pp_on, pp_off, pp_div, port_sel = 0;
4899 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4900 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4901 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4902 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4903
e39b999a 4904 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4905
4906 if (HAS_PCH_SPLIT(dev)) {
4907 pp_on_reg = PCH_PP_ON_DELAYS;
4908 pp_off_reg = PCH_PP_OFF_DELAYS;
4909 pp_div_reg = PCH_PP_DIVISOR;
4910 } else {
bf13e81b
JN
4911 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4912
4913 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4914 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4915 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4916 }
4917
b2f19d1a
PZ
4918 /*
4919 * And finally store the new values in the power sequencer. The
4920 * backlight delays are set to 1 because we do manual waits on them. For
4921 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4922 * we'll end up waiting for the backlight off delay twice: once when we
4923 * do the manual sleep, and once when we disable the panel and wait for
4924 * the PP_STATUS bit to become zero.
4925 */
f30d26e4 4926 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4927 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4928 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4929 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4930 /* Compute the divisor for the pp clock, simply match the Bspec
4931 * formula. */
453c5420 4932 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4933 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4934 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4935
4936 /* Haswell doesn't have any port selection bits for the panel
4937 * power sequencer any more. */
bc7d38a4 4938 if (IS_VALLEYVIEW(dev)) {
ad933b56 4939 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4940 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4941 if (port == PORT_A)
a24c144c 4942 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4943 else
a24c144c 4944 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4945 }
4946
453c5420
JB
4947 pp_on |= port_sel;
4948
4949 I915_WRITE(pp_on_reg, pp_on);
4950 I915_WRITE(pp_off_reg, pp_off);
4951 I915_WRITE(pp_div_reg, pp_div);
67a54566 4952
67a54566 4953 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4954 I915_READ(pp_on_reg),
4955 I915_READ(pp_off_reg),
4956 I915_READ(pp_div_reg));
f684960e
CW
4957}
4958
b33a2815
VK
4959/**
4960 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4961 * @dev: DRM device
4962 * @refresh_rate: RR to be programmed
4963 *
4964 * This function gets called when refresh rate (RR) has to be changed from
4965 * one frequency to another. Switches can be between high and low RR
4966 * supported by the panel or to any other RR based on media playback (in
4967 * this case, RR value needs to be passed from user space).
4968 *
4969 * The caller of this function needs to take a lock on dev_priv->drrs.
4970 */
96178eeb 4971static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4972{
4973 struct drm_i915_private *dev_priv = dev->dev_private;
4974 struct intel_encoder *encoder;
96178eeb
VK
4975 struct intel_digital_port *dig_port = NULL;
4976 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4977 struct intel_crtc_state *config = NULL;
439d7ac0 4978 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4979 u32 reg, val;
96178eeb 4980 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4981
4982 if (refresh_rate <= 0) {
4983 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4984 return;
4985 }
4986
96178eeb
VK
4987 if (intel_dp == NULL) {
4988 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4989 return;
4990 }
4991
1fcc9d1c 4992 /*
e4d59f6b
RV
4993 * FIXME: This needs proper synchronization with psr state for some
4994 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4995 */
439d7ac0 4996
96178eeb
VK
4997 dig_port = dp_to_dig_port(intel_dp);
4998 encoder = &dig_port->base;
723f9aab 4999 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5000
5001 if (!intel_crtc) {
5002 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5003 return;
5004 }
5005
6e3c9717 5006 config = intel_crtc->config;
439d7ac0 5007
96178eeb 5008 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5009 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5010 return;
5011 }
5012
96178eeb
VK
5013 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5014 refresh_rate)
439d7ac0
PB
5015 index = DRRS_LOW_RR;
5016
96178eeb 5017 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5018 DRM_DEBUG_KMS(
5019 "DRRS requested for previously set RR...ignoring\n");
5020 return;
5021 }
5022
5023 if (!intel_crtc->active) {
5024 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5025 return;
5026 }
5027
44395bfe 5028 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5029 switch (index) {
5030 case DRRS_HIGH_RR:
5031 intel_dp_set_m_n(intel_crtc, M1_N1);
5032 break;
5033 case DRRS_LOW_RR:
5034 intel_dp_set_m_n(intel_crtc, M2_N2);
5035 break;
5036 case DRRS_MAX_RR:
5037 default:
5038 DRM_ERROR("Unsupported refreshrate type\n");
5039 }
5040 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5041 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5042 val = I915_READ(reg);
a4c30b1d 5043
439d7ac0 5044 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5045 if (IS_VALLEYVIEW(dev))
5046 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5047 else
5048 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5049 } else {
6fa7aec1
VK
5050 if (IS_VALLEYVIEW(dev))
5051 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5052 else
5053 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5054 }
5055 I915_WRITE(reg, val);
5056 }
5057
4e9ac947
VK
5058 dev_priv->drrs.refresh_rate_type = index;
5059
5060 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5061}
5062
b33a2815
VK
5063/**
5064 * intel_edp_drrs_enable - init drrs struct if supported
5065 * @intel_dp: DP struct
5066 *
5067 * Initializes frontbuffer_bits and drrs.dp
5068 */
c395578e
VK
5069void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5070{
5071 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5072 struct drm_i915_private *dev_priv = dev->dev_private;
5073 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5074 struct drm_crtc *crtc = dig_port->base.base.crtc;
5075 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5076
5077 if (!intel_crtc->config->has_drrs) {
5078 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5079 return;
5080 }
5081
5082 mutex_lock(&dev_priv->drrs.mutex);
5083 if (WARN_ON(dev_priv->drrs.dp)) {
5084 DRM_ERROR("DRRS already enabled\n");
5085 goto unlock;
5086 }
5087
5088 dev_priv->drrs.busy_frontbuffer_bits = 0;
5089
5090 dev_priv->drrs.dp = intel_dp;
5091
5092unlock:
5093 mutex_unlock(&dev_priv->drrs.mutex);
5094}
5095
b33a2815
VK
5096/**
5097 * intel_edp_drrs_disable - Disable DRRS
5098 * @intel_dp: DP struct
5099 *
5100 */
c395578e
VK
5101void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5102{
5103 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5104 struct drm_i915_private *dev_priv = dev->dev_private;
5105 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5106 struct drm_crtc *crtc = dig_port->base.base.crtc;
5107 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5108
5109 if (!intel_crtc->config->has_drrs)
5110 return;
5111
5112 mutex_lock(&dev_priv->drrs.mutex);
5113 if (!dev_priv->drrs.dp) {
5114 mutex_unlock(&dev_priv->drrs.mutex);
5115 return;
5116 }
5117
5118 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5119 intel_dp_set_drrs_state(dev_priv->dev,
5120 intel_dp->attached_connector->panel.
5121 fixed_mode->vrefresh);
5122
5123 dev_priv->drrs.dp = NULL;
5124 mutex_unlock(&dev_priv->drrs.mutex);
5125
5126 cancel_delayed_work_sync(&dev_priv->drrs.work);
5127}
5128
4e9ac947
VK
5129static void intel_edp_drrs_downclock_work(struct work_struct *work)
5130{
5131 struct drm_i915_private *dev_priv =
5132 container_of(work, typeof(*dev_priv), drrs.work.work);
5133 struct intel_dp *intel_dp;
5134
5135 mutex_lock(&dev_priv->drrs.mutex);
5136
5137 intel_dp = dev_priv->drrs.dp;
5138
5139 if (!intel_dp)
5140 goto unlock;
5141
439d7ac0 5142 /*
4e9ac947
VK
5143 * The delayed work can race with an invalidate hence we need to
5144 * recheck.
439d7ac0
PB
5145 */
5146
4e9ac947
VK
5147 if (dev_priv->drrs.busy_frontbuffer_bits)
5148 goto unlock;
439d7ac0 5149
4e9ac947
VK
5150 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5151 intel_dp_set_drrs_state(dev_priv->dev,
5152 intel_dp->attached_connector->panel.
5153 downclock_mode->vrefresh);
439d7ac0 5154
4e9ac947 5155unlock:
4e9ac947 5156 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5157}
5158
b33a2815
VK
5159/**
5160 * intel_edp_drrs_invalidate - Invalidate DRRS
5161 * @dev: DRM device
5162 * @frontbuffer_bits: frontbuffer plane tracking bits
5163 *
5164 * When there is a disturbance on screen (due to cursor movement/time
5165 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5166 * high RR.
5167 *
5168 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5169 */
a93fad0f
VK
5170void intel_edp_drrs_invalidate(struct drm_device *dev,
5171 unsigned frontbuffer_bits)
5172{
5173 struct drm_i915_private *dev_priv = dev->dev_private;
5174 struct drm_crtc *crtc;
5175 enum pipe pipe;
5176
9da7d693 5177 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5178 return;
5179
88f933a8 5180 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5181
a93fad0f 5182 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5183 if (!dev_priv->drrs.dp) {
5184 mutex_unlock(&dev_priv->drrs.mutex);
5185 return;
5186 }
5187
a93fad0f
VK
5188 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5189 pipe = to_intel_crtc(crtc)->pipe;
5190
5191 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5192 intel_dp_set_drrs_state(dev_priv->dev,
5193 dev_priv->drrs.dp->attached_connector->panel.
5194 fixed_mode->vrefresh);
5195 }
5196
5197 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5198
5199 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5200 mutex_unlock(&dev_priv->drrs.mutex);
5201}
5202
b33a2815
VK
5203/**
5204 * intel_edp_drrs_flush - Flush DRRS
5205 * @dev: DRM device
5206 * @frontbuffer_bits: frontbuffer plane tracking bits
5207 *
5208 * When there is no movement on screen, DRRS work can be scheduled.
5209 * This DRRS work is responsible for setting relevant registers after a
5210 * timeout of 1 second.
5211 *
5212 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5213 */
a93fad0f
VK
5214void intel_edp_drrs_flush(struct drm_device *dev,
5215 unsigned frontbuffer_bits)
5216{
5217 struct drm_i915_private *dev_priv = dev->dev_private;
5218 struct drm_crtc *crtc;
5219 enum pipe pipe;
5220
9da7d693 5221 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5222 return;
5223
88f933a8 5224 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5225
a93fad0f 5226 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5227 if (!dev_priv->drrs.dp) {
5228 mutex_unlock(&dev_priv->drrs.mutex);
5229 return;
5230 }
5231
a93fad0f
VK
5232 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5233 pipe = to_intel_crtc(crtc)->pipe;
5234 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5235
a93fad0f
VK
5236 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5237 !dev_priv->drrs.busy_frontbuffer_bits)
5238 schedule_delayed_work(&dev_priv->drrs.work,
5239 msecs_to_jiffies(1000));
5240 mutex_unlock(&dev_priv->drrs.mutex);
5241}
5242
b33a2815
VK
5243/**
5244 * DOC: Display Refresh Rate Switching (DRRS)
5245 *
5246 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5247 * which enables swtching between low and high refresh rates,
5248 * dynamically, based on the usage scenario. This feature is applicable
5249 * for internal panels.
5250 *
5251 * Indication that the panel supports DRRS is given by the panel EDID, which
5252 * would list multiple refresh rates for one resolution.
5253 *
5254 * DRRS is of 2 types - static and seamless.
5255 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5256 * (may appear as a blink on screen) and is used in dock-undock scenario.
5257 * Seamless DRRS involves changing RR without any visual effect to the user
5258 * and can be used during normal system usage. This is done by programming
5259 * certain registers.
5260 *
5261 * Support for static/seamless DRRS may be indicated in the VBT based on
5262 * inputs from the panel spec.
5263 *
5264 * DRRS saves power by switching to low RR based on usage scenarios.
5265 *
5266 * eDP DRRS:-
5267 * The implementation is based on frontbuffer tracking implementation.
5268 * When there is a disturbance on the screen triggered by user activity or a
5269 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5270 * When there is no movement on screen, after a timeout of 1 second, a switch
5271 * to low RR is made.
5272 * For integration with frontbuffer tracking code,
5273 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5274 *
5275 * DRRS can be further extended to support other internal panels and also
5276 * the scenario of video playback wherein RR is set based on the rate
5277 * requested by userspace.
5278 */
5279
5280/**
5281 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5282 * @intel_connector: eDP connector
5283 * @fixed_mode: preferred mode of panel
5284 *
5285 * This function is called only once at driver load to initialize basic
5286 * DRRS stuff.
5287 *
5288 * Returns:
5289 * Downclock mode if panel supports it, else return NULL.
5290 * DRRS support is determined by the presence of downclock mode (apart
5291 * from VBT setting).
5292 */
4f9db5b5 5293static struct drm_display_mode *
96178eeb
VK
5294intel_dp_drrs_init(struct intel_connector *intel_connector,
5295 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5296{
5297 struct drm_connector *connector = &intel_connector->base;
96178eeb 5298 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5299 struct drm_i915_private *dev_priv = dev->dev_private;
5300 struct drm_display_mode *downclock_mode = NULL;
5301
9da7d693
DV
5302 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5303 mutex_init(&dev_priv->drrs.mutex);
5304
4f9db5b5
PB
5305 if (INTEL_INFO(dev)->gen <= 6) {
5306 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5307 return NULL;
5308 }
5309
5310 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5311 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5312 return NULL;
5313 }
5314
5315 downclock_mode = intel_find_panel_downclock
5316 (dev, fixed_mode, connector);
5317
5318 if (!downclock_mode) {
a1d26342 5319 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5320 return NULL;
5321 }
5322
96178eeb 5323 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5324
96178eeb 5325 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5326 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5327 return downclock_mode;
5328}
5329
ed92f0b2 5330static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5331 struct intel_connector *intel_connector)
ed92f0b2
PZ
5332{
5333 struct drm_connector *connector = &intel_connector->base;
5334 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5335 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5336 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5337 struct drm_i915_private *dev_priv = dev->dev_private;
5338 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5339 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5340 bool has_dpcd;
5341 struct drm_display_mode *scan;
5342 struct edid *edid;
6517d273 5343 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5344
5345 if (!is_edp(intel_dp))
5346 return true;
5347
49e6bc51
VS
5348 pps_lock(intel_dp);
5349 intel_edp_panel_vdd_sanitize(intel_dp);
5350 pps_unlock(intel_dp);
63635217 5351
ed92f0b2 5352 /* Cache DPCD and EDID for edp. */
ed92f0b2 5353 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5354
5355 if (has_dpcd) {
5356 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5357 dev_priv->no_aux_handshake =
5358 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5359 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5360 } else {
5361 /* if this fails, presume the device is a ghost */
5362 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5363 return false;
5364 }
5365
5366 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5367 pps_lock(intel_dp);
36b5f425 5368 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5369 pps_unlock(intel_dp);
ed92f0b2 5370
060c8778 5371 mutex_lock(&dev->mode_config.mutex);
0b99836f 5372 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5373 if (edid) {
5374 if (drm_add_edid_modes(connector, edid)) {
5375 drm_mode_connector_update_edid_property(connector,
5376 edid);
5377 drm_edid_to_eld(connector, edid);
5378 } else {
5379 kfree(edid);
5380 edid = ERR_PTR(-EINVAL);
5381 }
5382 } else {
5383 edid = ERR_PTR(-ENOENT);
5384 }
5385 intel_connector->edid = edid;
5386
5387 /* prefer fixed mode from EDID if available */
5388 list_for_each_entry(scan, &connector->probed_modes, head) {
5389 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5390 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5391 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5392 intel_connector, fixed_mode);
ed92f0b2
PZ
5393 break;
5394 }
5395 }
5396
5397 /* fallback to VBT if available for eDP */
5398 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5399 fixed_mode = drm_mode_duplicate(dev,
5400 dev_priv->vbt.lfp_lvds_vbt_mode);
5401 if (fixed_mode)
5402 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5403 }
060c8778 5404 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5405
01527b31
CT
5406 if (IS_VALLEYVIEW(dev)) {
5407 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5408 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5409
5410 /*
5411 * Figure out the current pipe for the initial backlight setup.
5412 * If the current pipe isn't valid, try the PPS pipe, and if that
5413 * fails just assume pipe A.
5414 */
5415 if (IS_CHERRYVIEW(dev))
5416 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5417 else
5418 pipe = PORT_TO_PIPE(intel_dp->DP);
5419
5420 if (pipe != PIPE_A && pipe != PIPE_B)
5421 pipe = intel_dp->pps_pipe;
5422
5423 if (pipe != PIPE_A && pipe != PIPE_B)
5424 pipe = PIPE_A;
5425
5426 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5427 pipe_name(pipe));
01527b31
CT
5428 }
5429
4f9db5b5 5430 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5431 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5432 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5433
5434 return true;
5435}
5436
16c25533 5437bool
f0fec3f2
PZ
5438intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5439 struct intel_connector *intel_connector)
a4fc5ed6 5440{
f0fec3f2
PZ
5441 struct drm_connector *connector = &intel_connector->base;
5442 struct intel_dp *intel_dp = &intel_dig_port->dp;
5443 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5444 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5445 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5446 enum port port = intel_dig_port->port;
0b99836f 5447 int type;
a4fc5ed6 5448
a4a5d2f8
VS
5449 intel_dp->pps_pipe = INVALID_PIPE;
5450
ec5b01dd 5451 /* intel_dp vfuncs */
b6b5e383
DL
5452 if (INTEL_INFO(dev)->gen >= 9)
5453 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5454 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5455 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5456 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5457 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5458 else if (HAS_PCH_SPLIT(dev))
5459 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5460 else
5461 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5462
b9ca5fad
DL
5463 if (INTEL_INFO(dev)->gen >= 9)
5464 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5465 else
5466 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5467
0767935e
DV
5468 /* Preserve the current hw state. */
5469 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5470 intel_dp->attached_connector = intel_connector;
3d3dc149 5471
3b32a35b 5472 if (intel_dp_is_edp(dev, port))
b329530c 5473 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5474 else
5475 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5476
f7d24902
ID
5477 /*
5478 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5479 * for DP the encoder type can be set by the caller to
5480 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5481 */
5482 if (type == DRM_MODE_CONNECTOR_eDP)
5483 intel_encoder->type = INTEL_OUTPUT_EDP;
5484
c17ed5b5
VS
5485 /* eDP only on port B and/or C on vlv/chv */
5486 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5487 port != PORT_B && port != PORT_C))
5488 return false;
5489
e7281eab
ID
5490 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5491 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5492 port_name(port));
5493
b329530c 5494 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5495 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5496
a4fc5ed6
KP
5497 connector->interlace_allowed = true;
5498 connector->doublescan_allowed = 0;
5499
f0fec3f2 5500 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5501 edp_panel_vdd_work);
a4fc5ed6 5502
df0e9248 5503 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5504 drm_connector_register(connector);
a4fc5ed6 5505
affa9354 5506 if (HAS_DDI(dev))
bcbc889b
PZ
5507 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5508 else
5509 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5510 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5511
0b99836f 5512 /* Set up the hotplug pin. */
ab9d7c30
PZ
5513 switch (port) {
5514 case PORT_A:
1d843f9d 5515 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5516 break;
5517 case PORT_B:
1d843f9d 5518 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5519 break;
5520 case PORT_C:
1d843f9d 5521 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5522 break;
5523 case PORT_D:
1d843f9d 5524 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5525 break;
5526 default:
ad1c0b19 5527 BUG();
5eb08b69
ZW
5528 }
5529
dada1a9f 5530 if (is_edp(intel_dp)) {
773538e8 5531 pps_lock(intel_dp);
1e74a324
VS
5532 intel_dp_init_panel_power_timestamps(intel_dp);
5533 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5534 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5535 else
36b5f425 5536 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5537 pps_unlock(intel_dp);
dada1a9f 5538 }
0095e6dc 5539
9d1a1031 5540 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5541
0e32b39c 5542 /* init MST on ports that can support it */
c86ea3d0 5543 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5544 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5545 intel_dp_mst_encoder_init(intel_dig_port,
5546 intel_connector->base.base.id);
0e32b39c
DA
5547 }
5548 }
5549
36b5f425 5550 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5551 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5552 if (is_edp(intel_dp)) {
5553 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5554 /*
5555 * vdd might still be enabled do to the delayed vdd off.
5556 * Make sure vdd is actually turned off here.
5557 */
773538e8 5558 pps_lock(intel_dp);
4be73780 5559 edp_panel_vdd_off_sync(intel_dp);
773538e8 5560 pps_unlock(intel_dp);
15b1d171 5561 }
34ea3d38 5562 drm_connector_unregister(connector);
b2f246a8 5563 drm_connector_cleanup(connector);
16c25533 5564 return false;
b2f246a8 5565 }
32f9d658 5566
f684960e
CW
5567 intel_dp_add_properties(intel_dp, connector);
5568
a4fc5ed6
KP
5569 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5570 * 0xd. Failure to do so will result in spurious interrupts being
5571 * generated on the port when a cable is not attached.
5572 */
5573 if (IS_G4X(dev) && !IS_GM45(dev)) {
5574 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5575 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5576 }
16c25533
PZ
5577
5578 return true;
a4fc5ed6 5579}
f0fec3f2
PZ
5580
5581void
5582intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5583{
13cf5504 5584 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5585 struct intel_digital_port *intel_dig_port;
5586 struct intel_encoder *intel_encoder;
5587 struct drm_encoder *encoder;
5588 struct intel_connector *intel_connector;
5589
b14c5679 5590 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5591 if (!intel_dig_port)
5592 return;
5593
08d9bc92 5594 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5595 if (!intel_connector) {
5596 kfree(intel_dig_port);
5597 return;
5598 }
5599
5600 intel_encoder = &intel_dig_port->base;
5601 encoder = &intel_encoder->base;
5602
5603 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5604 DRM_MODE_ENCODER_TMDS);
5605
5bfe2ac0 5606 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5607 intel_encoder->disable = intel_disable_dp;
00c09d70 5608 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5609 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5610 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5611 if (IS_CHERRYVIEW(dev)) {
9197c88b 5612 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5613 intel_encoder->pre_enable = chv_pre_enable_dp;
5614 intel_encoder->enable = vlv_enable_dp;
580d3811 5615 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5616 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5617 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5618 intel_encoder->pre_enable = vlv_pre_enable_dp;
5619 intel_encoder->enable = vlv_enable_dp;
49277c31 5620 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5621 } else {
ecff4f3b
JN
5622 intel_encoder->pre_enable = g4x_pre_enable_dp;
5623 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5624 if (INTEL_INFO(dev)->gen >= 5)
5625 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5626 }
f0fec3f2 5627
174edf1f 5628 intel_dig_port->port = port;
f0fec3f2
PZ
5629 intel_dig_port->dp.output_reg = output_reg;
5630
00c09d70 5631 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5632 if (IS_CHERRYVIEW(dev)) {
5633 if (port == PORT_D)
5634 intel_encoder->crtc_mask = 1 << 2;
5635 else
5636 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5637 } else {
5638 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5639 }
bc079e8b 5640 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5641 intel_encoder->hot_plug = intel_dp_hot_plug;
5642
13cf5504
DA
5643 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5644 dev_priv->hpd_irq_port[port] = intel_dig_port;
5645
15b1d171
PZ
5646 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5647 drm_encoder_cleanup(encoder);
5648 kfree(intel_dig_port);
b2f246a8 5649 kfree(intel_connector);
15b1d171 5650 }
f0fec3f2 5651}
0e32b39c
DA
5652
5653void intel_dp_mst_suspend(struct drm_device *dev)
5654{
5655 struct drm_i915_private *dev_priv = dev->dev_private;
5656 int i;
5657
5658 /* disable MST */
5659 for (i = 0; i < I915_MAX_PORTS; i++) {
5660 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5661 if (!intel_dig_port)
5662 continue;
5663
5664 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5665 if (!intel_dig_port->dp.can_mst)
5666 continue;
5667 if (intel_dig_port->dp.is_mst)
5668 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5669 }
5670 }
5671}
5672
5673void intel_dp_mst_resume(struct drm_device *dev)
5674{
5675 struct drm_i915_private *dev_priv = dev->dev_private;
5676 int i;
5677
5678 for (i = 0; i < I915_MAX_PORTS; i++) {
5679 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5680 if (!intel_dig_port)
5681 continue;
5682 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5683 int ret;
5684
5685 if (!intel_dig_port->dp.can_mst)
5686 continue;
5687
5688 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5689 if (ret != 0) {
5690 intel_dp_check_mst_status(&intel_dig_port->dp);
5691 }
5692 }
5693 }
5694}