drm/i915/chv: Remove DPIO force latency causing interpair skew issue
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
fe51bfb9
VS
90static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
f4896f15 93static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 94
cfcb0fc9
JB
95/**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102static bool is_edp(struct intel_dp *intel_dp)
103{
da63a9f2
PZ
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
107}
108
68b4d824 109static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 110{
68b4d824
ID
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
114}
115
df0e9248
CW
116static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117{
fa90ecef 118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
119}
120
ea5b213a 121static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 122static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 123static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 124static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
125static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
a4fc5ed6 127
ed4e9c1d
VS
128static int
129intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 130{
7183dc29 131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
1db10e28 136 case DP_LINK_BW_5_4:
d4eead50 137 break;
a4fc5ed6 138 default:
d4eead50
ID
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
a4fc5ed6
KP
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145}
146
eeb6324d
PZ
147static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148{
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161}
162
cd9dde44
AJ
163/*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
a4fc5ed6 180static int
c898261c 181intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 182{
cd9dde44 183 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
184}
185
fe27d53e
DA
186static int
187intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188{
189 return (max_link_clock * max_lanes * 8) / 10;
190}
191
c19de8eb 192static enum drm_mode_status
a4fc5ed6
KP
193intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195{
df0e9248 196 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 201
dd06f90e
JN
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
204 return MODE_PANEL;
205
dd06f90e 206 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 207 return MODE_PANEL;
03afc4a2
DV
208
209 target_clock = fixed_mode->clock;
7de56f43
ZY
210 }
211
50fec21a 212 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 213 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
c4867936 219 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
0af78a2b
DV
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
a4fc5ed6
KP
227 return MODE_OK;
228}
229
a4f1289e 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
231{
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240}
241
c2af70e2 242static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
243{
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249}
250
fb0f8fbf
KP
251/* hrawclock is 1/4 the FSB frequency */
252static int
253intel_hrawclk(struct drm_device *dev)
254{
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
9473c8f4
VP
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
fb0f8fbf
KP
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283}
284
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b
JN
288static void
289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 290 struct intel_dp *intel_dp);
bf13e81b 291
773538e8
VS
292static void pps_lock(struct intel_dp *intel_dp)
293{
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308}
309
310static void pps_unlock(struct intel_dp *intel_dp)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322}
323
961a0db0
VS
324static void
325vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326{
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 331 bool pll_enabled;
961a0db0
VS
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
d288f65f
VS
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
961a0db0
VS
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
961a0db0
VS
382}
383
bf13e81b
JN
384static enum pipe
385vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386{
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 392 enum pipe pipe;
bf13e81b 393
e39b999a 394 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 395
a8c3344e
VS
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
a4a5d2f8
VS
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
a8c3344e
VS
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
a4a5d2f8 427
a8c3344e
VS
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
36b5f425
VS
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 438
961a0db0
VS
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
444
445 return intel_dp->pps_pipe;
446}
447
6491ab27
VS
448typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453{
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455}
456
457static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461}
462
463static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return true;
467}
bf13e81b 468
a4a5d2f8 469static enum pipe
6491ab27
VS
470vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
a4a5d2f8
VS
473{
474 enum pipe pipe;
bf13e81b 475
bf13e81b
JN
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
6491ab27
VS
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
a4a5d2f8 486 return pipe;
bf13e81b
JN
487 }
488
a4a5d2f8
VS
489 return INVALID_PIPE;
490}
491
492static void
493vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494{
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
6491ab27
VS
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
a4a5d2f8
VS
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
bf13e81b
JN
520 }
521
a4a5d2f8
VS
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
36b5f425
VS
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
527}
528
773538e8
VS
529void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530{
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
bf13e81b
JN
556}
557
558static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566}
567
568static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569{
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576}
577
01527b31
CT
578/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582{
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
773538e8 593 pps_lock(intel_dp);
e39b999a 594
01527b31 595 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
01527b31
CT
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
773538e8 609 pps_unlock(intel_dp);
e39b999a 610
01527b31
CT
611 return 0;
612}
613
4be73780 614static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 615{
30add22d 616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
e39b999a
VS
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
9a42356b
VS
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
bf13e81b 625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
626}
627
4be73780 628static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 629{
30add22d 630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
e39b999a
VS
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
9a42356b
VS
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
773538e8 639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
640}
641
9b984dae
KP
642static void
643intel_dp_check_edp(struct intel_dp *intel_dp)
644{
30add22d 645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 646 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 647
9b984dae
KP
648 if (!is_edp(intel_dp))
649 return;
453c5420 650
4be73780 651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
656 }
657}
658
9ee32fea
DV
659static uint32_t
660intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661{
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
666 uint32_t status;
667 bool done;
668
ef04f00d 669#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 670 if (has_aux_irq)
b18ac466 671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 672 msecs_to_jiffies_timeout(10));
9ee32fea
DV
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678#undef C
679
680 return status;
681}
682
ec5b01dd 683static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 684{
174edf1f
PZ
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 687
ec5b01dd
DL
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 691 */
ec5b01dd
DL
692 return index ? 0 : intel_hrawclk(dev) / 2;
693}
694
695static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 if (index)
701 return 0;
702
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 706 else
b84a1cf8 707 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
708 } else {
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710 }
711}
712
713static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714{
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
718
719 if (intel_dig_port->port == PORT_A) {
720 if (index)
721 return 0;
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
bc86625a
CW
725 switch (index) {
726 case 0: return 63;
727 case 1: return 72;
728 default: return 0;
729 }
ec5b01dd 730 } else {
bc86625a 731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 732 }
b84a1cf8
RV
733}
734
ec5b01dd
DL
735static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736{
737 return index ? 0 : 100;
738}
739
b6b5e383
DL
740static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741{
742 /*
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 */
747 return index ? 0 : 1;
748}
749
5ed12a19
DL
750static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751 bool has_aux_irq,
752 int send_bytes,
753 uint32_t aux_clock_divider)
754{
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
758
759 if (IS_GEN6(dev))
760 precharge = 3;
761 else
762 precharge = 5;
763
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766 else
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 770 DP_AUX_CH_CTL_DONE |
5ed12a19 771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 773 timeout |
788d4433 774 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
778}
779
b9ca5fad
DL
780static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 bool has_aux_irq,
782 int send_bytes,
783 uint32_t unused)
784{
785 return DP_AUX_CH_CTL_SEND_BUSY |
786 DP_AUX_CH_CTL_DONE |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793}
794
b84a1cf8
RV
795static int
796intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 797 const uint8_t *send, int send_bytes,
b84a1cf8
RV
798 uint8_t *recv, int recv_size)
799{
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
bc86625a 805 uint32_t aux_clock_divider;
b84a1cf8
RV
806 int i, ret, recv_bytes;
807 uint32_t status;
5ed12a19 808 int try, clock = 0;
4e6b788c 809 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
810 bool vdd;
811
773538e8 812 pps_lock(intel_dp);
e39b999a 813
72c3500a
VS
814 /*
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818 * ourselves.
819 */
1e0560e0 820 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
821
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
824 * deep sleep states.
825 */
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828 intel_dp_check_edp(intel_dp);
5eb08b69 829
c67a470b
PZ
830 intel_aux_display_runtime_get(dev_priv);
831
11bee43e
JB
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
ef04f00d 834 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 break;
837 msleep(1);
838 }
839
840 if (try == 3) {
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 I915_READ(ch_ctl));
9ee32fea
DV
843 ret = -EBUSY;
844 goto out;
4f7f7b7e
CW
845 }
846
46a5ae9f
PZ
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 ret = -E2BIG;
850 goto out;
851 }
852
ec5b01dd 853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 has_aux_irq,
856 send_bytes,
857 aux_clock_divider);
5ed12a19 858
bc86625a
CW
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
a4f1289e
RV
864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
bc86625a
CW
866
867 /* Send the command and wait for it to complete */
5ed12a19 868 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
869
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872 /* Clear done status and any errors */
873 I915_WRITE(ch_ctl,
874 status |
875 DP_AUX_CH_CTL_DONE |
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue;
882 if (status & DP_AUX_CH_CTL_DONE)
883 break;
884 }
4f7f7b7e 885 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
886 break;
887 }
888
a4fc5ed6 889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
891 ret = -EBUSY;
892 goto out;
a4fc5ed6
KP
893 }
894
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
897 */
a5b3da54 898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
900 ret = -EIO;
901 goto out;
a5b3da54 902 }
1ae8c0a5
KP
903
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
a5b3da54 906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
908 ret = -ETIMEDOUT;
909 goto out;
a4fc5ed6
KP
910 }
911
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
0206e353 917
4f7f7b7e 918 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
a4fc5ed6 921
9ee32fea
DV
922 ret = recv_bytes;
923out:
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 925 intel_aux_display_runtime_put(dev_priv);
9ee32fea 926
884f19e9
JN
927 if (vdd)
928 edp_panel_vdd_off(intel_dp, false);
929
773538e8 930 pps_unlock(intel_dp);
e39b999a 931
9ee32fea 932 return ret;
a4fc5ed6
KP
933}
934
a6c8aff0
JN
935#define BARE_ADDRESS_SIZE 3
936#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
937static ssize_t
938intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 939{
9d1a1031
JN
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
a4fc5ed6 943 int ret;
a4fc5ed6 944
d2d9cbbd
VS
945 txbuf[0] = (msg->request << 4) |
946 ((msg->address >> 16) & 0xf);
947 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
948 txbuf[2] = msg->address & 0xff;
949 txbuf[3] = msg->size - 1;
46a5ae9f 950
9d1a1031
JN
951 switch (msg->request & ~DP_AUX_I2C_MOT) {
952 case DP_AUX_NATIVE_WRITE:
953 case DP_AUX_I2C_WRITE:
a6c8aff0 954 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 955 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 956
9d1a1031
JN
957 if (WARN_ON(txsize > 20))
958 return -E2BIG;
a4fc5ed6 959
9d1a1031 960 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 961
9d1a1031
JN
962 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963 if (ret > 0) {
964 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 965
a1ddefd8
JN
966 if (ret > 1) {
967 /* Number of bytes written in a short write. */
968 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969 } else {
970 /* Return payload size. */
971 ret = msg->size;
972 }
9d1a1031
JN
973 }
974 break;
46a5ae9f 975
9d1a1031
JN
976 case DP_AUX_NATIVE_READ:
977 case DP_AUX_I2C_READ:
a6c8aff0 978 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 979 rxsize = msg->size + 1;
a4fc5ed6 980
9d1a1031
JN
981 if (WARN_ON(rxsize > 20))
982 return -E2BIG;
a4fc5ed6 983
9d1a1031
JN
984 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985 if (ret > 0) {
986 msg->reply = rxbuf[0] >> 4;
987 /*
988 * Assume happy day, and copy the data. The caller is
989 * expected to check msg->reply before touching it.
990 *
991 * Return payload size.
992 */
993 ret--;
994 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 995 }
9d1a1031
JN
996 break;
997
998 default:
999 ret = -EINVAL;
1000 break;
a4fc5ed6 1001 }
f51a44b9 1002
9d1a1031 1003 return ret;
a4fc5ed6
KP
1004}
1005
9d1a1031
JN
1006static void
1007intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1008{
1009 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1010 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1011 enum port port = intel_dig_port->port;
0b99836f 1012 const char *name = NULL;
ab2c0672
DA
1013 int ret;
1014
33ad6626
JN
1015 switch (port) {
1016 case PORT_A:
1017 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1018 name = "DPDDC-A";
ab2c0672 1019 break;
33ad6626
JN
1020 case PORT_B:
1021 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1022 name = "DPDDC-B";
ab2c0672 1023 break;
33ad6626
JN
1024 case PORT_C:
1025 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1026 name = "DPDDC-C";
ab2c0672 1027 break;
33ad6626
JN
1028 case PORT_D:
1029 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1030 name = "DPDDC-D";
33ad6626
JN
1031 break;
1032 default:
1033 BUG();
ab2c0672
DA
1034 }
1035
1b1aad75
DL
1036 /*
1037 * The AUX_CTL register is usually DP_CTL + 0x10.
1038 *
1039 * On Haswell and Broadwell though:
1040 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1041 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042 *
1043 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044 */
1045 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1046 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1047
0b99836f 1048 intel_dp->aux.name = name;
9d1a1031
JN
1049 intel_dp->aux.dev = dev->dev;
1050 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1051
0b99836f
JN
1052 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1053 connector->base.kdev->kobj.name);
8316f337 1054
4f71d0cb 1055 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1056 if (ret < 0) {
4f71d0cb 1057 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1058 name, ret);
1059 return;
ab2c0672 1060 }
8a5e6aeb 1061
0b99836f
JN
1062 ret = sysfs_create_link(&connector->base.kdev->kobj,
1063 &intel_dp->aux.ddc.dev.kobj,
1064 intel_dp->aux.ddc.dev.kobj.name);
1065 if (ret < 0) {
1066 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1067 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1068 }
a4fc5ed6
KP
1069}
1070
80f65de3
ID
1071static void
1072intel_dp_connector_unregister(struct intel_connector *intel_connector)
1073{
1074 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1075
0e32b39c
DA
1076 if (!intel_connector->mst_port)
1077 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1078 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1079 intel_connector_unregister(intel_connector);
1080}
1081
5416d871 1082static void
c3346ef6 1083skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1084{
1085 u32 ctrl1;
1086
1087 pipe_config->ddi_pll_sel = SKL_DPLL0;
1088 pipe_config->dpll_hw_state.cfgcr1 = 0;
1089 pipe_config->dpll_hw_state.cfgcr2 = 0;
1090
1091 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1092 switch (link_clock / 2) {
1093 case 81000:
5416d871
DL
1094 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1095 SKL_DPLL0);
1096 break;
c3346ef6 1097 case 135000:
5416d871
DL
1098 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1099 SKL_DPLL0);
1100 break;
c3346ef6 1101 case 270000:
5416d871
DL
1102 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1103 SKL_DPLL0);
1104 break;
c3346ef6
SJ
1105 case 162000:
1106 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1107 SKL_DPLL0);
1108 break;
1109 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1110 results in CDCLK change. Need to handle the change of CDCLK by
1111 disabling pipes and re-enabling them */
1112 case 108000:
1113 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1114 SKL_DPLL0);
1115 break;
1116 case 216000:
1117 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1118 SKL_DPLL0);
1119 break;
1120
5416d871
DL
1121 }
1122 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1123}
1124
0e50338c 1125static void
5cec258b 1126hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1127{
1128 switch (link_bw) {
1129 case DP_LINK_BW_1_62:
1130 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1131 break;
1132 case DP_LINK_BW_2_7:
1133 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1134 break;
1135 case DP_LINK_BW_5_4:
1136 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1137 break;
1138 }
1139}
1140
fc0f8e25 1141static int
12f6a2e2 1142intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1143{
94ca719e
VS
1144 if (intel_dp->num_sink_rates) {
1145 *sink_rates = intel_dp->sink_rates;
1146 return intel_dp->num_sink_rates;
fc0f8e25 1147 }
12f6a2e2
VS
1148
1149 *sink_rates = default_rates;
1150
1151 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1152}
1153
a8f3ef61 1154static int
1db10e28 1155intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1156{
636280ba
VS
1157 if (INTEL_INFO(dev)->gen >= 9) {
1158 *source_rates = gen9_rates;
1159 return ARRAY_SIZE(gen9_rates);
fe51bfb9
VS
1160 } else if (IS_CHERRYVIEW(dev)) {
1161 *source_rates = chv_rates;
1162 return ARRAY_SIZE(chv_rates);
a8f3ef61 1163 }
636280ba
VS
1164
1165 *source_rates = default_rates;
1166
1db10e28
VS
1167 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1168 /* WaDisableHBR2:skl */
1169 return (DP_LINK_BW_2_7 >> 3) + 1;
1170 else if (INTEL_INFO(dev)->gen >= 8 ||
1171 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1172 return (DP_LINK_BW_5_4 >> 3) + 1;
1173 else
1174 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1175}
1176
c6bb3538
DV
1177static void
1178intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1179 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1180{
1181 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1182 const struct dp_link_dpll *divisor = NULL;
1183 int i, count = 0;
c6bb3538
DV
1184
1185 if (IS_G4X(dev)) {
9dd4ffdf
CML
1186 divisor = gen4_dpll;
1187 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1188 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1189 divisor = pch_dpll;
1190 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1191 } else if (IS_CHERRYVIEW(dev)) {
1192 divisor = chv_dpll;
1193 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1194 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1195 divisor = vlv_dpll;
1196 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1197 }
9dd4ffdf
CML
1198
1199 if (divisor && count) {
1200 for (i = 0; i < count; i++) {
1201 if (link_bw == divisor[i].link_bw) {
1202 pipe_config->dpll = divisor[i].dpll;
1203 pipe_config->clock_set = true;
1204 break;
1205 }
1206 }
c6bb3538
DV
1207 }
1208}
1209
2ecae76a
VS
1210static int intersect_rates(const int *source_rates, int source_len,
1211 const int *sink_rates, int sink_len,
94ca719e 1212 int *common_rates)
a8f3ef61
SJ
1213{
1214 int i = 0, j = 0, k = 0;
1215
a8f3ef61
SJ
1216 while (i < source_len && j < sink_len) {
1217 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1218 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219 return k;
94ca719e 1220 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1221 ++k;
1222 ++i;
1223 ++j;
1224 } else if (source_rates[i] < sink_rates[j]) {
1225 ++i;
1226 } else {
1227 ++j;
1228 }
1229 }
1230 return k;
1231}
1232
94ca719e
VS
1233static int intel_dp_common_rates(struct intel_dp *intel_dp,
1234 int *common_rates)
2ecae76a
VS
1235{
1236 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1237 const int *source_rates, *sink_rates;
1238 int source_len, sink_len;
1239
1240 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1241 source_len = intel_dp_source_rates(dev, &source_rates);
1242
1243 return intersect_rates(source_rates, source_len,
1244 sink_rates, sink_len,
94ca719e 1245 common_rates);
2ecae76a
VS
1246}
1247
0336400e
VS
1248static void snprintf_int_array(char *str, size_t len,
1249 const int *array, int nelem)
1250{
1251 int i;
1252
1253 str[0] = '\0';
1254
1255 for (i = 0; i < nelem; i++) {
1256 int r = snprintf(str, len, "%d,", array[i]);
1257 if (r >= len)
1258 return;
1259 str += r;
1260 len -= r;
1261 }
1262}
1263
1264static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265{
1266 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1267 const int *source_rates, *sink_rates;
94ca719e
VS
1268 int source_len, sink_len, common_len;
1269 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1270 char str[128]; /* FIXME: too big for stack? */
1271
1272 if ((drm_debug & DRM_UT_KMS) == 0)
1273 return;
1274
1275 source_len = intel_dp_source_rates(dev, &source_rates);
1276 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1277 DRM_DEBUG_KMS("source rates: %s\n", str);
1278
1279 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1280 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1281 DRM_DEBUG_KMS("sink rates: %s\n", str);
1282
94ca719e
VS
1283 common_len = intel_dp_common_rates(intel_dp, common_rates);
1284 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1285 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1286}
1287
f4896f15 1288static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1289{
1290 int i = 0;
1291
1292 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1293 if (find == rates[i])
1294 break;
1295
1296 return i;
1297}
1298
50fec21a
VS
1299int
1300intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301{
1302 int rates[DP_MAX_SUPPORTED_RATES] = {};
1303 int len;
1304
94ca719e 1305 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1306 if (WARN_ON(len <= 0))
1307 return 162000;
1308
1309 return rates[rate_to_index(0, rates) - 1];
1310}
1311
ed4e9c1d
VS
1312int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313{
94ca719e 1314 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1315}
1316
00c09d70 1317bool
5bfe2ac0 1318intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1319 struct intel_crtc_state *pipe_config)
a4fc5ed6 1320{
5bfe2ac0 1321 struct drm_device *dev = encoder->base.dev;
36008365 1322 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1323 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1324 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1325 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1326 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1327 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1328 int lane_count, clock;
56071a20 1329 int min_lane_count = 1;
eeb6324d 1330 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1331 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1332 int min_clock = 0;
a8f3ef61 1333 int max_clock;
083f9560 1334 int bpp, mode_rate;
ff9a6750 1335 int link_avail, link_clock;
94ca719e
VS
1336 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1337 int common_len;
a8f3ef61 1338
94ca719e 1339 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1340
1341 /* No common link rates between source and sink */
94ca719e 1342 WARN_ON(common_len <= 0);
a8f3ef61 1343
94ca719e 1344 max_clock = common_len - 1;
a4fc5ed6 1345
bc7d38a4 1346 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1347 pipe_config->has_pch_encoder = true;
1348
03afc4a2 1349 pipe_config->has_dp_encoder = true;
f769cd24 1350 pipe_config->has_drrs = false;
9ed109a7 1351 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1352
dd06f90e
JN
1353 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1354 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1355 adjusted_mode);
2dd24552
JB
1356 if (!HAS_PCH_SPLIT(dev))
1357 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1358 intel_connector->panel.fitting_mode);
1359 else
b074cec8
JB
1360 intel_pch_panel_fitting(intel_crtc, pipe_config,
1361 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1362 }
1363
cb1793ce 1364 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1365 return false;
1366
083f9560 1367 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1368 "max bw %d pixel clock %iKHz\n",
94ca719e 1369 max_lane_count, common_rates[max_clock],
241bfc38 1370 adjusted_mode->crtc_clock);
083f9560 1371
36008365
DV
1372 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1373 * bpc in between. */
3e7ca985 1374 bpp = pipe_config->pipe_bpp;
56071a20
JN
1375 if (is_edp(intel_dp)) {
1376 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1377 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1378 dev_priv->vbt.edp_bpp);
1379 bpp = dev_priv->vbt.edp_bpp;
1380 }
1381
344c5bbc
JN
1382 /*
1383 * Use the maximum clock and number of lanes the eDP panel
1384 * advertizes being capable of. The panels are generally
1385 * designed to support only a single clock and lane
1386 * configuration, and typically these values correspond to the
1387 * native resolution of the panel.
1388 */
1389 min_lane_count = max_lane_count;
1390 min_clock = max_clock;
7984211e 1391 }
657445fe 1392
36008365 1393 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1394 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1395 bpp);
36008365 1396
c6930992 1397 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1398 for (lane_count = min_lane_count;
1399 lane_count <= max_lane_count;
1400 lane_count <<= 1) {
1401
94ca719e 1402 link_clock = common_rates[clock];
36008365
DV
1403 link_avail = intel_dp_max_data_rate(link_clock,
1404 lane_count);
1405
1406 if (mode_rate <= link_avail) {
1407 goto found;
1408 }
1409 }
1410 }
1411 }
c4867936 1412
36008365 1413 return false;
3685a8f3 1414
36008365 1415found:
55bc60db
VS
1416 if (intel_dp->color_range_auto) {
1417 /*
1418 * See:
1419 * CEA-861-E - 5.1 Default Encoding Parameters
1420 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1421 */
18316c8c 1422 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1423 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1424 else
1425 intel_dp->color_range = 0;
1426 }
1427
3685a8f3 1428 if (intel_dp->color_range)
50f3b016 1429 pipe_config->limited_color_range = true;
a4fc5ed6 1430
36008365 1431 intel_dp->lane_count = lane_count;
a8f3ef61 1432
94ca719e 1433 if (intel_dp->num_sink_rates) {
bc27b7d3 1434 intel_dp->link_bw = 0;
a8f3ef61 1435 intel_dp->rate_select =
94ca719e 1436 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1437 } else {
1438 intel_dp->link_bw =
94ca719e 1439 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1440 intel_dp->rate_select = 0;
a8f3ef61
SJ
1441 }
1442
657445fe 1443 pipe_config->pipe_bpp = bpp;
94ca719e 1444 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1445
36008365
DV
1446 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1447 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1448 pipe_config->port_clock, bpp);
36008365
DV
1449 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1450 mode_rate, link_avail);
a4fc5ed6 1451
03afc4a2 1452 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1453 adjusted_mode->crtc_clock,
1454 pipe_config->port_clock,
03afc4a2 1455 &pipe_config->dp_m_n);
9d1a455b 1456
439d7ac0 1457 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1458 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1459 pipe_config->has_drrs = true;
439d7ac0
PB
1460 intel_link_compute_m_n(bpp, lane_count,
1461 intel_connector->panel.downclock_mode->clock,
1462 pipe_config->port_clock,
1463 &pipe_config->dp_m2_n2);
1464 }
1465
5416d871 1466 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1467 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
5416d871 1468 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1469 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1470 else
1471 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1472
03afc4a2 1473 return true;
a4fc5ed6
KP
1474}
1475
7c62a164 1476static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1477{
7c62a164
DV
1478 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1479 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1480 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1481 struct drm_i915_private *dev_priv = dev->dev_private;
1482 u32 dpa_ctl;
1483
6e3c9717
ACO
1484 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1485 crtc->config->port_clock);
ea9b6006
DV
1486 dpa_ctl = I915_READ(DP_A);
1487 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1488
6e3c9717 1489 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1490 /* For a long time we've carried around a ILK-DevA w/a for the
1491 * 160MHz clock. If we're really unlucky, it's still required.
1492 */
1493 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1494 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1495 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1496 } else {
1497 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1498 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1499 }
1ce17038 1500
ea9b6006
DV
1501 I915_WRITE(DP_A, dpa_ctl);
1502
1503 POSTING_READ(DP_A);
1504 udelay(500);
1505}
1506
8ac33ed3 1507static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1508{
b934223d 1509 struct drm_device *dev = encoder->base.dev;
417e822d 1510 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1512 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1513 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1514 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1515
417e822d 1516 /*
1a2eb460 1517 * There are four kinds of DP registers:
417e822d
KP
1518 *
1519 * IBX PCH
1a2eb460
KP
1520 * SNB CPU
1521 * IVB CPU
417e822d
KP
1522 * CPT PCH
1523 *
1524 * IBX PCH and CPU are the same for almost everything,
1525 * except that the CPU DP PLL is configured in this
1526 * register
1527 *
1528 * CPT PCH is quite different, having many bits moved
1529 * to the TRANS_DP_CTL register instead. That
1530 * configuration happens (oddly) in ironlake_pch_enable
1531 */
9c9e7927 1532
417e822d
KP
1533 /* Preserve the BIOS-computed detected bit. This is
1534 * supposed to be read-only.
1535 */
1536 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1537
417e822d 1538 /* Handle DP bits in common between all three register formats */
417e822d 1539 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1540 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1541
6e3c9717 1542 if (crtc->config->has_audio)
ea5b213a 1543 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1544
417e822d 1545 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1546
bc7d38a4 1547 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1548 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1549 intel_dp->DP |= DP_SYNC_HS_HIGH;
1550 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1551 intel_dp->DP |= DP_SYNC_VS_HIGH;
1552 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1553
6aba5b6c 1554 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1555 intel_dp->DP |= DP_ENHANCED_FRAMING;
1556
7c62a164 1557 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1558 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1559 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1560 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1561
1562 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1563 intel_dp->DP |= DP_SYNC_HS_HIGH;
1564 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1565 intel_dp->DP |= DP_SYNC_VS_HIGH;
1566 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1567
6aba5b6c 1568 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1569 intel_dp->DP |= DP_ENHANCED_FRAMING;
1570
44f37d1f
CML
1571 if (!IS_CHERRYVIEW(dev)) {
1572 if (crtc->pipe == 1)
1573 intel_dp->DP |= DP_PIPEB_SELECT;
1574 } else {
1575 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1576 }
417e822d
KP
1577 } else {
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1579 }
a4fc5ed6
KP
1580}
1581
ffd6749d
PZ
1582#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1583#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1584
1a5ef5b7
PZ
1585#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1586#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1587
ffd6749d
PZ
1588#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1589#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1590
4be73780 1591static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1592 u32 mask,
1593 u32 value)
bd943159 1594{
30add22d 1595 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1596 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1597 u32 pp_stat_reg, pp_ctrl_reg;
1598
e39b999a
VS
1599 lockdep_assert_held(&dev_priv->pps_mutex);
1600
bf13e81b
JN
1601 pp_stat_reg = _pp_stat_reg(intel_dp);
1602 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1603
99ea7127 1604 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1605 mask, value,
1606 I915_READ(pp_stat_reg),
1607 I915_READ(pp_ctrl_reg));
32ce697c 1608
453c5420 1609 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1610 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1611 I915_READ(pp_stat_reg),
1612 I915_READ(pp_ctrl_reg));
32ce697c 1613 }
54c136d4
CW
1614
1615 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1616}
32ce697c 1617
4be73780 1618static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1619{
1620 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1621 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1622}
1623
4be73780 1624static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1625{
1626 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1627 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1628}
1629
4be73780 1630static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1631{
1632 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1633
1634 /* When we disable the VDD override bit last we have to do the manual
1635 * wait. */
1636 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1637 intel_dp->panel_power_cycle_delay);
1638
4be73780 1639 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1640}
1641
4be73780 1642static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1643{
1644 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1645 intel_dp->backlight_on_delay);
1646}
1647
4be73780 1648static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1649{
1650 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1651 intel_dp->backlight_off_delay);
1652}
99ea7127 1653
832dd3c1
KP
1654/* Read the current pp_control value, unlocking the register if it
1655 * is locked
1656 */
1657
453c5420 1658static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1659{
453c5420
JB
1660 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662 u32 control;
832dd3c1 1663
e39b999a
VS
1664 lockdep_assert_held(&dev_priv->pps_mutex);
1665
bf13e81b 1666 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1667 control &= ~PANEL_UNLOCK_MASK;
1668 control |= PANEL_UNLOCK_REGS;
1669 return control;
bd943159
KP
1670}
1671
951468f3
VS
1672/*
1673 * Must be paired with edp_panel_vdd_off().
1674 * Must hold pps_mutex around the whole on/off sequence.
1675 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1676 */
1e0560e0 1677static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1678{
30add22d 1679 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1680 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1681 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1682 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1683 enum intel_display_power_domain power_domain;
5d613501 1684 u32 pp;
453c5420 1685 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1686 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1687
e39b999a
VS
1688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
97af61f5 1690 if (!is_edp(intel_dp))
adddaaf4 1691 return false;
bd943159 1692
2c623c11 1693 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1694 intel_dp->want_panel_vdd = true;
99ea7127 1695
4be73780 1696 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1697 return need_to_disable;
b0665d57 1698
4e6e1a54
ID
1699 power_domain = intel_display_port_power_domain(intel_encoder);
1700 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1701
3936fcf4
VS
1702 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1703 port_name(intel_dig_port->port));
bd943159 1704
4be73780
DV
1705 if (!edp_have_panel_power(intel_dp))
1706 wait_panel_power_cycle(intel_dp);
99ea7127 1707
453c5420 1708 pp = ironlake_get_pp_control(intel_dp);
5d613501 1709 pp |= EDP_FORCE_VDD;
ebf33b18 1710
bf13e81b
JN
1711 pp_stat_reg = _pp_stat_reg(intel_dp);
1712 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1713
1714 I915_WRITE(pp_ctrl_reg, pp);
1715 POSTING_READ(pp_ctrl_reg);
1716 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1717 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1718 /*
1719 * If the panel wasn't on, delay before accessing aux channel
1720 */
4be73780 1721 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1722 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1723 port_name(intel_dig_port->port));
f01eca2e 1724 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1725 }
adddaaf4
JN
1726
1727 return need_to_disable;
1728}
1729
951468f3
VS
1730/*
1731 * Must be paired with intel_edp_panel_vdd_off() or
1732 * intel_edp_panel_off().
1733 * Nested calls to these functions are not allowed since
1734 * we drop the lock. Caller must use some higher level
1735 * locking to prevent nested calls from other threads.
1736 */
b80d6c78 1737void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1738{
c695b6b6 1739 bool vdd;
adddaaf4 1740
c695b6b6
VS
1741 if (!is_edp(intel_dp))
1742 return;
1743
773538e8 1744 pps_lock(intel_dp);
c695b6b6 1745 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1746 pps_unlock(intel_dp);
c695b6b6 1747
e2c719b7 1748 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1749 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1750}
1751
4be73780 1752static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1753{
30add22d 1754 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1755 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1756 struct intel_digital_port *intel_dig_port =
1757 dp_to_dig_port(intel_dp);
1758 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1759 enum intel_display_power_domain power_domain;
5d613501 1760 u32 pp;
453c5420 1761 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1762
e39b999a 1763 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1764
15e899a0 1765 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1766
15e899a0 1767 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1768 return;
b0665d57 1769
3936fcf4
VS
1770 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1771 port_name(intel_dig_port->port));
bd943159 1772
be2c9196
VS
1773 pp = ironlake_get_pp_control(intel_dp);
1774 pp &= ~EDP_FORCE_VDD;
453c5420 1775
be2c9196
VS
1776 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1777 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1778
be2c9196
VS
1779 I915_WRITE(pp_ctrl_reg, pp);
1780 POSTING_READ(pp_ctrl_reg);
90791a5c 1781
be2c9196
VS
1782 /* Make sure sequencer is idle before allowing subsequent activity */
1783 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1784 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1785
be2c9196
VS
1786 if ((pp & POWER_TARGET_ON) == 0)
1787 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1788
be2c9196
VS
1789 power_domain = intel_display_port_power_domain(intel_encoder);
1790 intel_display_power_put(dev_priv, power_domain);
bd943159 1791}
5d613501 1792
4be73780 1793static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1794{
1795 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1796 struct intel_dp, panel_vdd_work);
bd943159 1797
773538e8 1798 pps_lock(intel_dp);
15e899a0
VS
1799 if (!intel_dp->want_panel_vdd)
1800 edp_panel_vdd_off_sync(intel_dp);
773538e8 1801 pps_unlock(intel_dp);
bd943159
KP
1802}
1803
aba86890
ID
1804static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1805{
1806 unsigned long delay;
1807
1808 /*
1809 * Queue the timer to fire a long time from now (relative to the power
1810 * down delay) to keep the panel power up across a sequence of
1811 * operations.
1812 */
1813 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1814 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1815}
1816
951468f3
VS
1817/*
1818 * Must be paired with edp_panel_vdd_on().
1819 * Must hold pps_mutex around the whole on/off sequence.
1820 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1821 */
4be73780 1822static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1823{
e39b999a
VS
1824 struct drm_i915_private *dev_priv =
1825 intel_dp_to_dev(intel_dp)->dev_private;
1826
1827 lockdep_assert_held(&dev_priv->pps_mutex);
1828
97af61f5
KP
1829 if (!is_edp(intel_dp))
1830 return;
5d613501 1831
e2c719b7 1832 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1833 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1834
bd943159
KP
1835 intel_dp->want_panel_vdd = false;
1836
aba86890 1837 if (sync)
4be73780 1838 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1839 else
1840 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1841}
1842
9f0fb5be 1843static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1844{
30add22d 1845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1846 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1847 u32 pp;
453c5420 1848 u32 pp_ctrl_reg;
9934c132 1849
9f0fb5be
VS
1850 lockdep_assert_held(&dev_priv->pps_mutex);
1851
97af61f5 1852 if (!is_edp(intel_dp))
bd943159 1853 return;
99ea7127 1854
3936fcf4
VS
1855 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1856 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1857
e7a89ace
VS
1858 if (WARN(edp_have_panel_power(intel_dp),
1859 "eDP port %c panel power already on\n",
1860 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1861 return;
9934c132 1862
4be73780 1863 wait_panel_power_cycle(intel_dp);
37c6c9b0 1864
bf13e81b 1865 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1866 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1867 if (IS_GEN5(dev)) {
1868 /* ILK workaround: disable reset around power sequence */
1869 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1870 I915_WRITE(pp_ctrl_reg, pp);
1871 POSTING_READ(pp_ctrl_reg);
05ce1a49 1872 }
37c6c9b0 1873
1c0ae80a 1874 pp |= POWER_TARGET_ON;
99ea7127
KP
1875 if (!IS_GEN5(dev))
1876 pp |= PANEL_POWER_RESET;
1877
453c5420
JB
1878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
9934c132 1880
4be73780 1881 wait_panel_on(intel_dp);
dce56b3c 1882 intel_dp->last_power_on = jiffies;
9934c132 1883
05ce1a49
KP
1884 if (IS_GEN5(dev)) {
1885 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1886 I915_WRITE(pp_ctrl_reg, pp);
1887 POSTING_READ(pp_ctrl_reg);
05ce1a49 1888 }
9f0fb5be 1889}
e39b999a 1890
9f0fb5be
VS
1891void intel_edp_panel_on(struct intel_dp *intel_dp)
1892{
1893 if (!is_edp(intel_dp))
1894 return;
1895
1896 pps_lock(intel_dp);
1897 edp_panel_on(intel_dp);
773538e8 1898 pps_unlock(intel_dp);
9934c132
JB
1899}
1900
9f0fb5be
VS
1901
1902static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1903{
4e6e1a54
ID
1904 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1905 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1906 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1907 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1908 enum intel_display_power_domain power_domain;
99ea7127 1909 u32 pp;
453c5420 1910 u32 pp_ctrl_reg;
9934c132 1911
9f0fb5be
VS
1912 lockdep_assert_held(&dev_priv->pps_mutex);
1913
97af61f5
KP
1914 if (!is_edp(intel_dp))
1915 return;
37c6c9b0 1916
3936fcf4
VS
1917 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1918 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1919
3936fcf4
VS
1920 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1921 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1922
453c5420 1923 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1924 /* We need to switch off panel power _and_ force vdd, for otherwise some
1925 * panels get very unhappy and cease to work. */
b3064154
PJ
1926 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1927 EDP_BLC_ENABLE);
453c5420 1928
bf13e81b 1929 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1930
849e39f5
PZ
1931 intel_dp->want_panel_vdd = false;
1932
453c5420
JB
1933 I915_WRITE(pp_ctrl_reg, pp);
1934 POSTING_READ(pp_ctrl_reg);
9934c132 1935
dce56b3c 1936 intel_dp->last_power_cycle = jiffies;
4be73780 1937 wait_panel_off(intel_dp);
849e39f5
PZ
1938
1939 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1940 power_domain = intel_display_port_power_domain(intel_encoder);
1941 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1942}
e39b999a 1943
9f0fb5be
VS
1944void intel_edp_panel_off(struct intel_dp *intel_dp)
1945{
1946 if (!is_edp(intel_dp))
1947 return;
e39b999a 1948
9f0fb5be
VS
1949 pps_lock(intel_dp);
1950 edp_panel_off(intel_dp);
773538e8 1951 pps_unlock(intel_dp);
9934c132
JB
1952}
1953
1250d107
JN
1954/* Enable backlight in the panel power control. */
1955static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1956{
da63a9f2
PZ
1957 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1958 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1959 struct drm_i915_private *dev_priv = dev->dev_private;
1960 u32 pp;
453c5420 1961 u32 pp_ctrl_reg;
32f9d658 1962
01cb9ea6
JB
1963 /*
1964 * If we enable the backlight right away following a panel power
1965 * on, we may see slight flicker as the panel syncs with the eDP
1966 * link. So delay a bit to make sure the image is solid before
1967 * allowing it to appear.
1968 */
4be73780 1969 wait_backlight_on(intel_dp);
e39b999a 1970
773538e8 1971 pps_lock(intel_dp);
e39b999a 1972
453c5420 1973 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1974 pp |= EDP_BLC_ENABLE;
453c5420 1975
bf13e81b 1976 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1977
1978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
e39b999a 1980
773538e8 1981 pps_unlock(intel_dp);
32f9d658
ZW
1982}
1983
1250d107
JN
1984/* Enable backlight PWM and backlight PP control. */
1985void intel_edp_backlight_on(struct intel_dp *intel_dp)
1986{
1987 if (!is_edp(intel_dp))
1988 return;
1989
1990 DRM_DEBUG_KMS("\n");
1991
1992 intel_panel_enable_backlight(intel_dp->attached_connector);
1993 _intel_edp_backlight_on(intel_dp);
1994}
1995
1996/* Disable backlight in the panel power control. */
1997static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1998{
30add22d 1999 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2000 struct drm_i915_private *dev_priv = dev->dev_private;
2001 u32 pp;
453c5420 2002 u32 pp_ctrl_reg;
32f9d658 2003
f01eca2e
KP
2004 if (!is_edp(intel_dp))
2005 return;
2006
773538e8 2007 pps_lock(intel_dp);
e39b999a 2008
453c5420 2009 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2010 pp &= ~EDP_BLC_ENABLE;
453c5420 2011
bf13e81b 2012 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2013
2014 I915_WRITE(pp_ctrl_reg, pp);
2015 POSTING_READ(pp_ctrl_reg);
f7d2323c 2016
773538e8 2017 pps_unlock(intel_dp);
e39b999a
VS
2018
2019 intel_dp->last_backlight_off = jiffies;
f7d2323c 2020 edp_wait_backlight_off(intel_dp);
1250d107 2021}
f7d2323c 2022
1250d107
JN
2023/* Disable backlight PP control and backlight PWM. */
2024void intel_edp_backlight_off(struct intel_dp *intel_dp)
2025{
2026 if (!is_edp(intel_dp))
2027 return;
2028
2029 DRM_DEBUG_KMS("\n");
f7d2323c 2030
1250d107 2031 _intel_edp_backlight_off(intel_dp);
f7d2323c 2032 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2033}
a4fc5ed6 2034
73580fb7
JN
2035/*
2036 * Hook for controlling the panel power control backlight through the bl_power
2037 * sysfs attribute. Take care to handle multiple calls.
2038 */
2039static void intel_edp_backlight_power(struct intel_connector *connector,
2040 bool enable)
2041{
2042 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2043 bool is_enabled;
2044
773538e8 2045 pps_lock(intel_dp);
e39b999a 2046 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2047 pps_unlock(intel_dp);
73580fb7
JN
2048
2049 if (is_enabled == enable)
2050 return;
2051
23ba9373
JN
2052 DRM_DEBUG_KMS("panel power control backlight %s\n",
2053 enable ? "enable" : "disable");
73580fb7
JN
2054
2055 if (enable)
2056 _intel_edp_backlight_on(intel_dp);
2057 else
2058 _intel_edp_backlight_off(intel_dp);
2059}
2060
2bd2ad64 2061static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2062{
da63a9f2
PZ
2063 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2064 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2065 struct drm_device *dev = crtc->dev;
d240f20f
JB
2066 struct drm_i915_private *dev_priv = dev->dev_private;
2067 u32 dpa_ctl;
2068
2bd2ad64
DV
2069 assert_pipe_disabled(dev_priv,
2070 to_intel_crtc(crtc)->pipe);
2071
d240f20f
JB
2072 DRM_DEBUG_KMS("\n");
2073 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2074 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2075 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2076
2077 /* We don't adjust intel_dp->DP while tearing down the link, to
2078 * facilitate link retraining (e.g. after hotplug). Hence clear all
2079 * enable bits here to ensure that we don't enable too much. */
2080 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2081 intel_dp->DP |= DP_PLL_ENABLE;
2082 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2083 POSTING_READ(DP_A);
2084 udelay(200);
d240f20f
JB
2085}
2086
2bd2ad64 2087static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2088{
da63a9f2
PZ
2089 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2090 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2091 struct drm_device *dev = crtc->dev;
d240f20f
JB
2092 struct drm_i915_private *dev_priv = dev->dev_private;
2093 u32 dpa_ctl;
2094
2bd2ad64
DV
2095 assert_pipe_disabled(dev_priv,
2096 to_intel_crtc(crtc)->pipe);
2097
d240f20f 2098 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2099 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2100 "dp pll off, should be on\n");
2101 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102
2103 /* We can't rely on the value tracked for the DP register in
2104 * intel_dp->DP because link_down must not change that (otherwise link
2105 * re-training will fail. */
298b0b39 2106 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2107 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2108 POSTING_READ(DP_A);
d240f20f
JB
2109 udelay(200);
2110}
2111
c7ad3810 2112/* If the sink supports it, try to set the power state appropriately */
c19b0669 2113void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2114{
2115 int ret, i;
2116
2117 /* Should have a valid DPCD by this point */
2118 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2119 return;
2120
2121 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2122 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2123 DP_SET_POWER_D3);
c7ad3810
JB
2124 } else {
2125 /*
2126 * When turning on, we need to retry for 1ms to give the sink
2127 * time to wake up.
2128 */
2129 for (i = 0; i < 3; i++) {
9d1a1031
JN
2130 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2131 DP_SET_POWER_D0);
c7ad3810
JB
2132 if (ret == 1)
2133 break;
2134 msleep(1);
2135 }
2136 }
f9cac721
JN
2137
2138 if (ret != 1)
2139 DRM_DEBUG_KMS("failed to %s sink power state\n",
2140 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2141}
2142
19d8fe15
DV
2143static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2144 enum pipe *pipe)
d240f20f 2145{
19d8fe15 2146 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2147 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2148 struct drm_device *dev = encoder->base.dev;
2149 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2150 enum intel_display_power_domain power_domain;
2151 u32 tmp;
2152
2153 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2154 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2155 return false;
2156
2157 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2158
2159 if (!(tmp & DP_PORT_EN))
2160 return false;
2161
bc7d38a4 2162 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2163 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2164 } else if (IS_CHERRYVIEW(dev)) {
2165 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2166 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2167 *pipe = PORT_TO_PIPE(tmp);
2168 } else {
2169 u32 trans_sel;
2170 u32 trans_dp;
2171 int i;
2172
2173 switch (intel_dp->output_reg) {
2174 case PCH_DP_B:
2175 trans_sel = TRANS_DP_PORT_SEL_B;
2176 break;
2177 case PCH_DP_C:
2178 trans_sel = TRANS_DP_PORT_SEL_C;
2179 break;
2180 case PCH_DP_D:
2181 trans_sel = TRANS_DP_PORT_SEL_D;
2182 break;
2183 default:
2184 return true;
2185 }
2186
055e393f 2187 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2188 trans_dp = I915_READ(TRANS_DP_CTL(i));
2189 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2190 *pipe = i;
2191 return true;
2192 }
2193 }
19d8fe15 2194
4a0833ec
DV
2195 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2196 intel_dp->output_reg);
2197 }
d240f20f 2198
19d8fe15
DV
2199 return true;
2200}
d240f20f 2201
045ac3b5 2202static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2203 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2204{
2205 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2206 u32 tmp, flags = 0;
63000ef6
XZ
2207 struct drm_device *dev = encoder->base.dev;
2208 struct drm_i915_private *dev_priv = dev->dev_private;
2209 enum port port = dp_to_dig_port(intel_dp)->port;
2210 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2211 int dotclock;
045ac3b5 2212
9ed109a7
DV
2213 tmp = I915_READ(intel_dp->output_reg);
2214 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2215 pipe_config->has_audio = true;
2216
63000ef6 2217 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2218 if (tmp & DP_SYNC_HS_HIGH)
2219 flags |= DRM_MODE_FLAG_PHSYNC;
2220 else
2221 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2222
63000ef6
XZ
2223 if (tmp & DP_SYNC_VS_HIGH)
2224 flags |= DRM_MODE_FLAG_PVSYNC;
2225 else
2226 flags |= DRM_MODE_FLAG_NVSYNC;
2227 } else {
2228 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2229 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2230 flags |= DRM_MODE_FLAG_PHSYNC;
2231 else
2232 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2233
63000ef6
XZ
2234 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2235 flags |= DRM_MODE_FLAG_PVSYNC;
2236 else
2237 flags |= DRM_MODE_FLAG_NVSYNC;
2238 }
045ac3b5 2239
2d112de7 2240 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2241
8c875fca
VS
2242 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2243 tmp & DP_COLOR_RANGE_16_235)
2244 pipe_config->limited_color_range = true;
2245
eb14cb74
VS
2246 pipe_config->has_dp_encoder = true;
2247
2248 intel_dp_get_m_n(crtc, pipe_config);
2249
18442d08 2250 if (port == PORT_A) {
f1f644dc
JB
2251 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2252 pipe_config->port_clock = 162000;
2253 else
2254 pipe_config->port_clock = 270000;
2255 }
18442d08
VS
2256
2257 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2258 &pipe_config->dp_m_n);
2259
2260 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2261 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2262
2d112de7 2263 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2264
c6cd2ee2
JN
2265 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2266 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2267 /*
2268 * This is a big fat ugly hack.
2269 *
2270 * Some machines in UEFI boot mode provide us a VBT that has 18
2271 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2272 * unknown we fail to light up. Yet the same BIOS boots up with
2273 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2274 * max, not what it tells us to use.
2275 *
2276 * Note: This will still be broken if the eDP panel is not lit
2277 * up by the BIOS, and thus we can't get the mode at module
2278 * load.
2279 */
2280 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2281 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2282 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2283 }
045ac3b5
JB
2284}
2285
e8cb4558 2286static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2287{
e8cb4558 2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2289 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2290 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2291
6e3c9717 2292 if (crtc->config->has_audio)
495a5bb8 2293 intel_audio_codec_disable(encoder);
6cb49835 2294
b32c6f48
RV
2295 if (HAS_PSR(dev) && !HAS_DDI(dev))
2296 intel_psr_disable(intel_dp);
2297
6cb49835
DV
2298 /* Make sure the panel is off before trying to change the mode. But also
2299 * ensure that we have vdd while we switch off the panel. */
24f3e092 2300 intel_edp_panel_vdd_on(intel_dp);
4be73780 2301 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2302 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2303 intel_edp_panel_off(intel_dp);
3739850b 2304
08aff3fe
VS
2305 /* disable the port before the pipe on g4x */
2306 if (INTEL_INFO(dev)->gen < 5)
3739850b 2307 intel_dp_link_down(intel_dp);
d240f20f
JB
2308}
2309
08aff3fe 2310static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2311{
2bd2ad64 2312 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2313 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2314
49277c31 2315 intel_dp_link_down(intel_dp);
08aff3fe
VS
2316 if (port == PORT_A)
2317 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2318}
2319
2320static void vlv_post_disable_dp(struct intel_encoder *encoder)
2321{
2322 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2323
2324 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2325}
2326
580d3811
VS
2327static void chv_post_disable_dp(struct intel_encoder *encoder)
2328{
2329 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2330 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2331 struct drm_device *dev = encoder->base.dev;
2332 struct drm_i915_private *dev_priv = dev->dev_private;
2333 struct intel_crtc *intel_crtc =
2334 to_intel_crtc(encoder->base.crtc);
2335 enum dpio_channel ch = vlv_dport_to_channel(dport);
2336 enum pipe pipe = intel_crtc->pipe;
2337 u32 val;
2338
2339 intel_dp_link_down(intel_dp);
2340
2341 mutex_lock(&dev_priv->dpio_lock);
2342
2343 /* Propagate soft reset to data lane reset */
97fd4d5c 2344 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2345 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2346 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2347
97fd4d5c
VS
2348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2349 val |= CHV_PCS_REQ_SOFTRESET_EN;
2350 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2351
2352 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2353 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2354 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2355
2356 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2357 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2358 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2359
2360 mutex_unlock(&dev_priv->dpio_lock);
2361}
2362
7b13b58a
VS
2363static void
2364_intel_dp_set_link_train(struct intel_dp *intel_dp,
2365 uint32_t *DP,
2366 uint8_t dp_train_pat)
2367{
2368 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2369 struct drm_device *dev = intel_dig_port->base.base.dev;
2370 struct drm_i915_private *dev_priv = dev->dev_private;
2371 enum port port = intel_dig_port->port;
2372
2373 if (HAS_DDI(dev)) {
2374 uint32_t temp = I915_READ(DP_TP_CTL(port));
2375
2376 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2377 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2378 else
2379 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2380
2381 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2382 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2383 case DP_TRAINING_PATTERN_DISABLE:
2384 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2385
2386 break;
2387 case DP_TRAINING_PATTERN_1:
2388 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2389 break;
2390 case DP_TRAINING_PATTERN_2:
2391 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2392 break;
2393 case DP_TRAINING_PATTERN_3:
2394 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2395 break;
2396 }
2397 I915_WRITE(DP_TP_CTL(port), temp);
2398
2399 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2400 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2401
2402 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2403 case DP_TRAINING_PATTERN_DISABLE:
2404 *DP |= DP_LINK_TRAIN_OFF_CPT;
2405 break;
2406 case DP_TRAINING_PATTERN_1:
2407 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2408 break;
2409 case DP_TRAINING_PATTERN_2:
2410 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2411 break;
2412 case DP_TRAINING_PATTERN_3:
2413 DRM_ERROR("DP training pattern 3 not supported\n");
2414 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2415 break;
2416 }
2417
2418 } else {
2419 if (IS_CHERRYVIEW(dev))
2420 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2421 else
2422 *DP &= ~DP_LINK_TRAIN_MASK;
2423
2424 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2425 case DP_TRAINING_PATTERN_DISABLE:
2426 *DP |= DP_LINK_TRAIN_OFF;
2427 break;
2428 case DP_TRAINING_PATTERN_1:
2429 *DP |= DP_LINK_TRAIN_PAT_1;
2430 break;
2431 case DP_TRAINING_PATTERN_2:
2432 *DP |= DP_LINK_TRAIN_PAT_2;
2433 break;
2434 case DP_TRAINING_PATTERN_3:
2435 if (IS_CHERRYVIEW(dev)) {
2436 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2437 } else {
2438 DRM_ERROR("DP training pattern 3 not supported\n");
2439 *DP |= DP_LINK_TRAIN_PAT_2;
2440 }
2441 break;
2442 }
2443 }
2444}
2445
2446static void intel_dp_enable_port(struct intel_dp *intel_dp)
2447{
2448 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2449 struct drm_i915_private *dev_priv = dev->dev_private;
2450
7b13b58a
VS
2451 /* enable with pattern 1 (as per spec) */
2452 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2453 DP_TRAINING_PATTERN_1);
2454
2455 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2456 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2457
2458 /*
2459 * Magic for VLV/CHV. We _must_ first set up the register
2460 * without actually enabling the port, and then do another
2461 * write to enable the port. Otherwise link training will
2462 * fail when the power sequencer is freshly used for this port.
2463 */
2464 intel_dp->DP |= DP_PORT_EN;
2465
2466 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2467 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2468}
2469
e8cb4558 2470static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2471{
e8cb4558
DV
2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473 struct drm_device *dev = encoder->base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2475 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2476 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2477
0c33d8d7
DV
2478 if (WARN_ON(dp_reg & DP_PORT_EN))
2479 return;
5d613501 2480
093e3f13
VS
2481 pps_lock(intel_dp);
2482
2483 if (IS_VALLEYVIEW(dev))
2484 vlv_init_panel_power_sequencer(intel_dp);
2485
7b13b58a 2486 intel_dp_enable_port(intel_dp);
093e3f13
VS
2487
2488 edp_panel_vdd_on(intel_dp);
2489 edp_panel_on(intel_dp);
2490 edp_panel_vdd_off(intel_dp, true);
2491
2492 pps_unlock(intel_dp);
2493
61234fa5
VS
2494 if (IS_VALLEYVIEW(dev))
2495 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2496
f01eca2e 2497 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2498 intel_dp_start_link_train(intel_dp);
33a34e4e 2499 intel_dp_complete_link_train(intel_dp);
3ab9c637 2500 intel_dp_stop_link_train(intel_dp);
c1dec79a 2501
6e3c9717 2502 if (crtc->config->has_audio) {
c1dec79a
JN
2503 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2504 pipe_name(crtc->pipe));
2505 intel_audio_codec_enable(encoder);
2506 }
ab1f90f9 2507}
89b667f8 2508
ecff4f3b
JN
2509static void g4x_enable_dp(struct intel_encoder *encoder)
2510{
828f5c6e
JN
2511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512
ecff4f3b 2513 intel_enable_dp(encoder);
4be73780 2514 intel_edp_backlight_on(intel_dp);
ab1f90f9 2515}
89b667f8 2516
ab1f90f9
JN
2517static void vlv_enable_dp(struct intel_encoder *encoder)
2518{
828f5c6e
JN
2519 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2520
4be73780 2521 intel_edp_backlight_on(intel_dp);
b32c6f48 2522 intel_psr_enable(intel_dp);
d240f20f
JB
2523}
2524
ecff4f3b 2525static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2526{
2527 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2528 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2529
8ac33ed3
DV
2530 intel_dp_prepare(encoder);
2531
d41f1efb
DV
2532 /* Only ilk+ has port A */
2533 if (dport->port == PORT_A) {
2534 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2535 ironlake_edp_pll_on(intel_dp);
d41f1efb 2536 }
ab1f90f9
JN
2537}
2538
83b84597
VS
2539static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2540{
2541 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2542 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2543 enum pipe pipe = intel_dp->pps_pipe;
2544 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2545
2546 edp_panel_vdd_off_sync(intel_dp);
2547
2548 /*
2549 * VLV seems to get confused when multiple power seqeuencers
2550 * have the same port selected (even if only one has power/vdd
2551 * enabled). The failure manifests as vlv_wait_port_ready() failing
2552 * CHV on the other hand doesn't seem to mind having the same port
2553 * selected in multiple power seqeuencers, but let's clear the
2554 * port select always when logically disconnecting a power sequencer
2555 * from a port.
2556 */
2557 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2558 pipe_name(pipe), port_name(intel_dig_port->port));
2559 I915_WRITE(pp_on_reg, 0);
2560 POSTING_READ(pp_on_reg);
2561
2562 intel_dp->pps_pipe = INVALID_PIPE;
2563}
2564
a4a5d2f8
VS
2565static void vlv_steal_power_sequencer(struct drm_device *dev,
2566 enum pipe pipe)
2567{
2568 struct drm_i915_private *dev_priv = dev->dev_private;
2569 struct intel_encoder *encoder;
2570
2571 lockdep_assert_held(&dev_priv->pps_mutex);
2572
ac3c12e4
VS
2573 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2574 return;
2575
a4a5d2f8
VS
2576 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2577 base.head) {
2578 struct intel_dp *intel_dp;
773538e8 2579 enum port port;
a4a5d2f8
VS
2580
2581 if (encoder->type != INTEL_OUTPUT_EDP)
2582 continue;
2583
2584 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2585 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2586
2587 if (intel_dp->pps_pipe != pipe)
2588 continue;
2589
2590 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2591 pipe_name(pipe), port_name(port));
a4a5d2f8 2592
034e43c6
VS
2593 WARN(encoder->connectors_active,
2594 "stealing pipe %c power sequencer from active eDP port %c\n",
2595 pipe_name(pipe), port_name(port));
a4a5d2f8 2596
a4a5d2f8 2597 /* make sure vdd is off before we steal it */
83b84597 2598 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2599 }
2600}
2601
2602static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2603{
2604 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2605 struct intel_encoder *encoder = &intel_dig_port->base;
2606 struct drm_device *dev = encoder->base.dev;
2607 struct drm_i915_private *dev_priv = dev->dev_private;
2608 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2609
2610 lockdep_assert_held(&dev_priv->pps_mutex);
2611
093e3f13
VS
2612 if (!is_edp(intel_dp))
2613 return;
2614
a4a5d2f8
VS
2615 if (intel_dp->pps_pipe == crtc->pipe)
2616 return;
2617
2618 /*
2619 * If another power sequencer was being used on this
2620 * port previously make sure to turn off vdd there while
2621 * we still have control of it.
2622 */
2623 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2624 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2625
2626 /*
2627 * We may be stealing the power
2628 * sequencer from another port.
2629 */
2630 vlv_steal_power_sequencer(dev, crtc->pipe);
2631
2632 /* now it's all ours */
2633 intel_dp->pps_pipe = crtc->pipe;
2634
2635 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2636 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2637
2638 /* init power sequencer on this pipe and port */
36b5f425
VS
2639 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2640 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2641}
2642
ab1f90f9 2643static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2644{
2bd2ad64 2645 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2646 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2647 struct drm_device *dev = encoder->base.dev;
89b667f8 2648 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2649 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2650 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2651 int pipe = intel_crtc->pipe;
2652 u32 val;
a4fc5ed6 2653
ab1f90f9 2654 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2655
ab3c759a 2656 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2657 val = 0;
2658 if (pipe)
2659 val |= (1<<21);
2660 else
2661 val &= ~(1<<21);
2662 val |= 0x001000c4;
ab3c759a
CML
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2664 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2665 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2666
ab1f90f9
JN
2667 mutex_unlock(&dev_priv->dpio_lock);
2668
2669 intel_enable_dp(encoder);
89b667f8
JB
2670}
2671
ecff4f3b 2672static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2673{
2674 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2675 struct drm_device *dev = encoder->base.dev;
2676 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2677 struct intel_crtc *intel_crtc =
2678 to_intel_crtc(encoder->base.crtc);
e4607fcf 2679 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2680 int pipe = intel_crtc->pipe;
89b667f8 2681
8ac33ed3
DV
2682 intel_dp_prepare(encoder);
2683
89b667f8 2684 /* Program Tx lane resets to default */
0980a60f 2685 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2686 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2687 DPIO_PCS_TX_LANE2_RESET |
2688 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2690 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2691 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2692 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2693 DPIO_PCS_CLK_SOFT_RESET);
2694
2695 /* Fix up inter-pair skew failure */
ab3c759a
CML
2696 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2699 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2700}
2701
e4a1d846
CML
2702static void chv_pre_enable_dp(struct intel_encoder *encoder)
2703{
2704 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2705 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2706 struct drm_device *dev = encoder->base.dev;
2707 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2708 struct intel_crtc *intel_crtc =
2709 to_intel_crtc(encoder->base.crtc);
2710 enum dpio_channel ch = vlv_dport_to_channel(dport);
2711 int pipe = intel_crtc->pipe;
2712 int data, i;
949c1d43 2713 u32 val;
e4a1d846 2714
e4a1d846 2715 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2716
570e2a74
VS
2717 /* allow hardware to manage TX FIFO reset source */
2718 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2719 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2720 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2721
2722 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2723 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2724 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2725
949c1d43 2726 /* Deassert soft data lane reset*/
97fd4d5c 2727 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2728 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2729 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2730
2731 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2732 val |= CHV_PCS_REQ_SOFTRESET_EN;
2733 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2734
2735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2736 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2737 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2738
97fd4d5c 2739 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2740 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2742
2743 /* Program Tx lane latency optimal setting*/
e4a1d846 2744 for (i = 0; i < 4; i++) {
e4a1d846
CML
2745 /* Set the upar bit */
2746 data = (i == 1) ? 0x0 : 0x1;
2747 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2748 data << DPIO_UPAR_SHIFT);
2749 }
2750
2751 /* Data lane stagger programming */
2752 /* FIXME: Fix up value only after power analysis */
2753
2754 mutex_unlock(&dev_priv->dpio_lock);
2755
e4a1d846 2756 intel_enable_dp(encoder);
e4a1d846
CML
2757}
2758
9197c88b
VS
2759static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2760{
2761 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2762 struct drm_device *dev = encoder->base.dev;
2763 struct drm_i915_private *dev_priv = dev->dev_private;
2764 struct intel_crtc *intel_crtc =
2765 to_intel_crtc(encoder->base.crtc);
2766 enum dpio_channel ch = vlv_dport_to_channel(dport);
2767 enum pipe pipe = intel_crtc->pipe;
2768 u32 val;
2769
625695f8
VS
2770 intel_dp_prepare(encoder);
2771
9197c88b
VS
2772 mutex_lock(&dev_priv->dpio_lock);
2773
b9e5ac3c
VS
2774 /* program left/right clock distribution */
2775 if (pipe != PIPE_B) {
2776 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2777 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2778 if (ch == DPIO_CH0)
2779 val |= CHV_BUFLEFTENA1_FORCE;
2780 if (ch == DPIO_CH1)
2781 val |= CHV_BUFRIGHTENA1_FORCE;
2782 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2783 } else {
2784 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2785 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2786 if (ch == DPIO_CH0)
2787 val |= CHV_BUFLEFTENA2_FORCE;
2788 if (ch == DPIO_CH1)
2789 val |= CHV_BUFRIGHTENA2_FORCE;
2790 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2791 }
2792
9197c88b
VS
2793 /* program clock channel usage */
2794 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2795 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2796 if (pipe != PIPE_B)
2797 val &= ~CHV_PCS_USEDCLKCHANNEL;
2798 else
2799 val |= CHV_PCS_USEDCLKCHANNEL;
2800 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2801
2802 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2803 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2804 if (pipe != PIPE_B)
2805 val &= ~CHV_PCS_USEDCLKCHANNEL;
2806 else
2807 val |= CHV_PCS_USEDCLKCHANNEL;
2808 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2809
2810 /*
2811 * This a a bit weird since generally CL
2812 * matches the pipe, but here we need to
2813 * pick the CL based on the port.
2814 */
2815 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2816 if (pipe != PIPE_B)
2817 val &= ~CHV_CMN_USEDCLKCHANNEL;
2818 else
2819 val |= CHV_CMN_USEDCLKCHANNEL;
2820 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2821
2822 mutex_unlock(&dev_priv->dpio_lock);
2823}
2824
a4fc5ed6 2825/*
df0c237d
JB
2826 * Native read with retry for link status and receiver capability reads for
2827 * cases where the sink may still be asleep.
9d1a1031
JN
2828 *
2829 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2830 * supposed to retry 3 times per the spec.
a4fc5ed6 2831 */
9d1a1031
JN
2832static ssize_t
2833intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2834 void *buffer, size_t size)
a4fc5ed6 2835{
9d1a1031
JN
2836 ssize_t ret;
2837 int i;
61da5fab 2838
f6a19066
VS
2839 /*
2840 * Sometime we just get the same incorrect byte repeated
2841 * over the entire buffer. Doing just one throw away read
2842 * initially seems to "solve" it.
2843 */
2844 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2845
61da5fab 2846 for (i = 0; i < 3; i++) {
9d1a1031
JN
2847 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2848 if (ret == size)
2849 return ret;
61da5fab
JB
2850 msleep(1);
2851 }
a4fc5ed6 2852
9d1a1031 2853 return ret;
a4fc5ed6
KP
2854}
2855
2856/*
2857 * Fetch AUX CH registers 0x202 - 0x207 which contain
2858 * link status information
2859 */
2860static bool
93f62dad 2861intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2862{
9d1a1031
JN
2863 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2864 DP_LANE0_1_STATUS,
2865 link_status,
2866 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2867}
2868
1100244e 2869/* These are source-specific values. */
a4fc5ed6 2870static uint8_t
1a2eb460 2871intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2872{
30add22d 2873 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2874 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2875 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2876
7ad14a29
SJ
2877 if (INTEL_INFO(dev)->gen >= 9) {
2878 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2879 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2880 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2881 } else if (IS_VALLEYVIEW(dev))
bd60018a 2882 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2883 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2884 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2885 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2886 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2887 else
bd60018a 2888 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2889}
2890
2891static uint8_t
2892intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2893{
30add22d 2894 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2895 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2896
5a9d1f1a
DL
2897 if (INTEL_INFO(dev)->gen >= 9) {
2898 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2900 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2901 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2902 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2904 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2905 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2906 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2907 default:
2908 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2909 }
2910 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2911 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2912 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2913 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2914 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2915 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2916 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2917 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2919 default:
bd60018a 2920 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2921 }
e2fa6fba
P
2922 } else if (IS_VALLEYVIEW(dev)) {
2923 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2924 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2925 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2926 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2927 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2928 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2929 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2930 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2931 default:
bd60018a 2932 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2933 }
bc7d38a4 2934 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2935 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2936 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2937 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2938 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2940 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2941 default:
bd60018a 2942 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2943 }
2944 } else {
2945 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2946 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2947 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2948 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2949 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2950 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2951 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2953 default:
bd60018a 2954 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2955 }
a4fc5ed6
KP
2956 }
2957}
2958
e2fa6fba
P
2959static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2960{
2961 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2962 struct drm_i915_private *dev_priv = dev->dev_private;
2963 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2964 struct intel_crtc *intel_crtc =
2965 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2966 unsigned long demph_reg_value, preemph_reg_value,
2967 uniqtranscale_reg_value;
2968 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2969 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2970 int pipe = intel_crtc->pipe;
e2fa6fba
P
2971
2972 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2973 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2974 preemph_reg_value = 0x0004000;
2975 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2977 demph_reg_value = 0x2B405555;
2978 uniqtranscale_reg_value = 0x552AB83A;
2979 break;
bd60018a 2980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2981 demph_reg_value = 0x2B404040;
2982 uniqtranscale_reg_value = 0x5548B83A;
2983 break;
bd60018a 2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2985 demph_reg_value = 0x2B245555;
2986 uniqtranscale_reg_value = 0x5560B83A;
2987 break;
bd60018a 2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2989 demph_reg_value = 0x2B405555;
2990 uniqtranscale_reg_value = 0x5598DA3A;
2991 break;
2992 default:
2993 return 0;
2994 }
2995 break;
bd60018a 2996 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2997 preemph_reg_value = 0x0002000;
2998 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2999 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3000 demph_reg_value = 0x2B404040;
3001 uniqtranscale_reg_value = 0x5552B83A;
3002 break;
bd60018a 3003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3004 demph_reg_value = 0x2B404848;
3005 uniqtranscale_reg_value = 0x5580B83A;
3006 break;
bd60018a 3007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3008 demph_reg_value = 0x2B404040;
3009 uniqtranscale_reg_value = 0x55ADDA3A;
3010 break;
3011 default:
3012 return 0;
3013 }
3014 break;
bd60018a 3015 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3016 preemph_reg_value = 0x0000000;
3017 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3018 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3019 demph_reg_value = 0x2B305555;
3020 uniqtranscale_reg_value = 0x5570B83A;
3021 break;
bd60018a 3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3023 demph_reg_value = 0x2B2B4040;
3024 uniqtranscale_reg_value = 0x55ADDA3A;
3025 break;
3026 default:
3027 return 0;
3028 }
3029 break;
bd60018a 3030 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3031 preemph_reg_value = 0x0006000;
3032 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3034 demph_reg_value = 0x1B405555;
3035 uniqtranscale_reg_value = 0x55ADDA3A;
3036 break;
3037 default:
3038 return 0;
3039 }
3040 break;
3041 default:
3042 return 0;
3043 }
3044
0980a60f 3045 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3046 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3047 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3048 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3049 uniqtranscale_reg_value);
ab3c759a
CML
3050 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3051 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3052 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3053 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3054 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3055
3056 return 0;
3057}
3058
e4a1d846
CML
3059static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3060{
3061 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3062 struct drm_i915_private *dev_priv = dev->dev_private;
3063 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3064 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3065 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3066 uint8_t train_set = intel_dp->train_set[0];
3067 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3068 enum pipe pipe = intel_crtc->pipe;
3069 int i;
e4a1d846
CML
3070
3071 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3072 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3073 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3074 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3075 deemph_reg_value = 128;
3076 margin_reg_value = 52;
3077 break;
bd60018a 3078 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3079 deemph_reg_value = 128;
3080 margin_reg_value = 77;
3081 break;
bd60018a 3082 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3083 deemph_reg_value = 128;
3084 margin_reg_value = 102;
3085 break;
bd60018a 3086 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3087 deemph_reg_value = 128;
3088 margin_reg_value = 154;
3089 /* FIXME extra to set for 1200 */
3090 break;
3091 default:
3092 return 0;
3093 }
3094 break;
bd60018a 3095 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3096 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3097 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3098 deemph_reg_value = 85;
3099 margin_reg_value = 78;
3100 break;
bd60018a 3101 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3102 deemph_reg_value = 85;
3103 margin_reg_value = 116;
3104 break;
bd60018a 3105 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3106 deemph_reg_value = 85;
3107 margin_reg_value = 154;
3108 break;
3109 default:
3110 return 0;
3111 }
3112 break;
bd60018a 3113 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3114 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3116 deemph_reg_value = 64;
3117 margin_reg_value = 104;
3118 break;
bd60018a 3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3120 deemph_reg_value = 64;
3121 margin_reg_value = 154;
3122 break;
3123 default:
3124 return 0;
3125 }
3126 break;
bd60018a 3127 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3128 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3130 deemph_reg_value = 43;
3131 margin_reg_value = 154;
3132 break;
3133 default:
3134 return 0;
3135 }
3136 break;
3137 default:
3138 return 0;
3139 }
3140
3141 mutex_lock(&dev_priv->dpio_lock);
3142
3143 /* Clear calc init */
1966e59e
VS
3144 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3145 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3146 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3147 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3148 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3149
3150 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3151 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3152 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3153 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3154 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3155
a02ef3c7
VS
3156 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3157 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3158 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3159 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3160
3161 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3162 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3163 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3164 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3165
e4a1d846 3166 /* Program swing deemph */
f72df8db
VS
3167 for (i = 0; i < 4; i++) {
3168 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3169 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3170 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3171 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3172 }
e4a1d846
CML
3173
3174 /* Program swing margin */
f72df8db
VS
3175 for (i = 0; i < 4; i++) {
3176 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3177 val &= ~DPIO_SWING_MARGIN000_MASK;
3178 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3179 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3180 }
e4a1d846
CML
3181
3182 /* Disable unique transition scale */
f72df8db
VS
3183 for (i = 0; i < 4; i++) {
3184 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3185 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3186 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3187 }
e4a1d846
CML
3188
3189 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3190 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3191 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3192 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3193
3194 /*
3195 * The document said it needs to set bit 27 for ch0 and bit 26
3196 * for ch1. Might be a typo in the doc.
3197 * For now, for this unique transition scale selection, set bit
3198 * 27 for ch0 and ch1.
3199 */
f72df8db
VS
3200 for (i = 0; i < 4; i++) {
3201 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3202 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3203 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3204 }
e4a1d846 3205
f72df8db
VS
3206 for (i = 0; i < 4; i++) {
3207 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3208 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3209 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3210 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3211 }
e4a1d846
CML
3212 }
3213
3214 /* Start swing calculation */
1966e59e
VS
3215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3216 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3217 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3218
3219 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3220 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3221 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3222
3223 /* LRC Bypass */
3224 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3225 val |= DPIO_LRC_BYPASS;
3226 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3227
3228 mutex_unlock(&dev_priv->dpio_lock);
3229
3230 return 0;
3231}
3232
a4fc5ed6 3233static void
0301b3ac
JN
3234intel_get_adjust_train(struct intel_dp *intel_dp,
3235 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3236{
3237 uint8_t v = 0;
3238 uint8_t p = 0;
3239 int lane;
1a2eb460
KP
3240 uint8_t voltage_max;
3241 uint8_t preemph_max;
a4fc5ed6 3242
33a34e4e 3243 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3244 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3245 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3246
3247 if (this_v > v)
3248 v = this_v;
3249 if (this_p > p)
3250 p = this_p;
3251 }
3252
1a2eb460 3253 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3254 if (v >= voltage_max)
3255 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3256
1a2eb460
KP
3257 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3258 if (p >= preemph_max)
3259 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3260
3261 for (lane = 0; lane < 4; lane++)
33a34e4e 3262 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3263}
3264
3265static uint32_t
f0a3424e 3266intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3267{
3cf2efb1 3268 uint32_t signal_levels = 0;
a4fc5ed6 3269
3cf2efb1 3270 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3272 default:
3273 signal_levels |= DP_VOLTAGE_0_4;
3274 break;
bd60018a 3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3276 signal_levels |= DP_VOLTAGE_0_6;
3277 break;
bd60018a 3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3279 signal_levels |= DP_VOLTAGE_0_8;
3280 break;
bd60018a 3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3282 signal_levels |= DP_VOLTAGE_1_2;
3283 break;
3284 }
3cf2efb1 3285 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3286 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3287 default:
3288 signal_levels |= DP_PRE_EMPHASIS_0;
3289 break;
bd60018a 3290 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3291 signal_levels |= DP_PRE_EMPHASIS_3_5;
3292 break;
bd60018a 3293 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3294 signal_levels |= DP_PRE_EMPHASIS_6;
3295 break;
bd60018a 3296 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3297 signal_levels |= DP_PRE_EMPHASIS_9_5;
3298 break;
3299 }
3300 return signal_levels;
3301}
3302
e3421a18
ZW
3303/* Gen6's DP voltage swing and pre-emphasis control */
3304static uint32_t
3305intel_gen6_edp_signal_levels(uint8_t train_set)
3306{
3c5a62b5
YL
3307 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3308 DP_TRAIN_PRE_EMPHASIS_MASK);
3309 switch (signal_levels) {
bd60018a
SJ
3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3312 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3314 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3317 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3320 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3323 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3324 default:
3c5a62b5
YL
3325 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3326 "0x%x\n", signal_levels);
3327 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3328 }
3329}
3330
1a2eb460
KP
3331/* Gen7's DP voltage swing and pre-emphasis control */
3332static uint32_t
3333intel_gen7_edp_signal_levels(uint8_t train_set)
3334{
3335 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3336 DP_TRAIN_PRE_EMPHASIS_MASK);
3337 switch (signal_levels) {
bd60018a 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3339 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3341 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3343 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3344
bd60018a 3345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3346 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3348 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3349
bd60018a 3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3351 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3353 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3354
3355 default:
3356 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3357 "0x%x\n", signal_levels);
3358 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3359 }
3360}
3361
d6c0d722
PZ
3362/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3363static uint32_t
f0a3424e 3364intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3365{
d6c0d722
PZ
3366 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3367 DP_TRAIN_PRE_EMPHASIS_MASK);
3368 switch (signal_levels) {
bd60018a 3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3370 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3372 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3374 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3375 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3376 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3377
bd60018a 3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3379 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3381 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3383 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3384
bd60018a 3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3386 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3388 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3389
3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3391 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3392 default:
3393 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3394 "0x%x\n", signal_levels);
c5fe6a06 3395 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3396 }
a4fc5ed6
KP
3397}
3398
f0a3424e
PZ
3399/* Properly updates "DP" with the correct signal levels. */
3400static void
3401intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3402{
3403 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3404 enum port port = intel_dig_port->port;
f0a3424e
PZ
3405 struct drm_device *dev = intel_dig_port->base.base.dev;
3406 uint32_t signal_levels, mask;
3407 uint8_t train_set = intel_dp->train_set[0];
3408
5a9d1f1a 3409 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3410 signal_levels = intel_hsw_signal_levels(train_set);
3411 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3412 } else if (IS_CHERRYVIEW(dev)) {
3413 signal_levels = intel_chv_signal_levels(intel_dp);
3414 mask = 0;
e2fa6fba
P
3415 } else if (IS_VALLEYVIEW(dev)) {
3416 signal_levels = intel_vlv_signal_levels(intel_dp);
3417 mask = 0;
bc7d38a4 3418 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3419 signal_levels = intel_gen7_edp_signal_levels(train_set);
3420 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3421 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3422 signal_levels = intel_gen6_edp_signal_levels(train_set);
3423 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3424 } else {
3425 signal_levels = intel_gen4_signal_levels(train_set);
3426 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3427 }
3428
3429 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3430
3431 *DP = (*DP & ~mask) | signal_levels;
3432}
3433
a4fc5ed6 3434static bool
ea5b213a 3435intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3436 uint32_t *DP,
58e10eb9 3437 uint8_t dp_train_pat)
a4fc5ed6 3438{
174edf1f
PZ
3439 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3440 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3441 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3442 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3443 int ret, len;
a4fc5ed6 3444
7b13b58a 3445 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3446
70aff66c 3447 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3448 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3449
2cdfe6c8
JN
3450 buf[0] = dp_train_pat;
3451 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3452 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3453 /* don't write DP_TRAINING_LANEx_SET on disable */
3454 len = 1;
3455 } else {
3456 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3457 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3458 len = intel_dp->lane_count + 1;
47ea7542 3459 }
a4fc5ed6 3460
9d1a1031
JN
3461 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3462 buf, len);
2cdfe6c8
JN
3463
3464 return ret == len;
a4fc5ed6
KP
3465}
3466
70aff66c
JN
3467static bool
3468intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3469 uint8_t dp_train_pat)
3470{
953d22e8 3471 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3472 intel_dp_set_signal_levels(intel_dp, DP);
3473 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3474}
3475
3476static bool
3477intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3478 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3479{
3480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3481 struct drm_device *dev = intel_dig_port->base.base.dev;
3482 struct drm_i915_private *dev_priv = dev->dev_private;
3483 int ret;
3484
3485 intel_get_adjust_train(intel_dp, link_status);
3486 intel_dp_set_signal_levels(intel_dp, DP);
3487
3488 I915_WRITE(intel_dp->output_reg, *DP);
3489 POSTING_READ(intel_dp->output_reg);
3490
9d1a1031
JN
3491 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3492 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3493
3494 return ret == intel_dp->lane_count;
3495}
3496
3ab9c637
ID
3497static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3498{
3499 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3500 struct drm_device *dev = intel_dig_port->base.base.dev;
3501 struct drm_i915_private *dev_priv = dev->dev_private;
3502 enum port port = intel_dig_port->port;
3503 uint32_t val;
3504
3505 if (!HAS_DDI(dev))
3506 return;
3507
3508 val = I915_READ(DP_TP_CTL(port));
3509 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3510 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3511 I915_WRITE(DP_TP_CTL(port), val);
3512
3513 /*
3514 * On PORT_A we can have only eDP in SST mode. There the only reason
3515 * we need to set idle transmission mode is to work around a HW issue
3516 * where we enable the pipe while not in idle link-training mode.
3517 * In this case there is requirement to wait for a minimum number of
3518 * idle patterns to be sent.
3519 */
3520 if (port == PORT_A)
3521 return;
3522
3523 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3524 1))
3525 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3526}
3527
33a34e4e 3528/* Enable corresponding port and start training pattern 1 */
c19b0669 3529void
33a34e4e 3530intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3531{
da63a9f2 3532 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3533 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3534 int i;
3535 uint8_t voltage;
cdb0e95b 3536 int voltage_tries, loop_tries;
ea5b213a 3537 uint32_t DP = intel_dp->DP;
6aba5b6c 3538 uint8_t link_config[2];
a4fc5ed6 3539
affa9354 3540 if (HAS_DDI(dev))
c19b0669
PZ
3541 intel_ddi_prepare_link_retrain(encoder);
3542
3cf2efb1 3543 /* Write the link configuration data */
6aba5b6c
JN
3544 link_config[0] = intel_dp->link_bw;
3545 link_config[1] = intel_dp->lane_count;
3546 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3547 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3548 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3549 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3550 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3551 &intel_dp->rate_select, 1);
6aba5b6c
JN
3552
3553 link_config[0] = 0;
3554 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3555 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3556
3557 DP |= DP_PORT_EN;
1a2eb460 3558
70aff66c
JN
3559 /* clock recovery */
3560 if (!intel_dp_reset_link_train(intel_dp, &DP,
3561 DP_TRAINING_PATTERN_1 |
3562 DP_LINK_SCRAMBLING_DISABLE)) {
3563 DRM_ERROR("failed to enable link training\n");
3564 return;
3565 }
3566
a4fc5ed6 3567 voltage = 0xff;
cdb0e95b
KP
3568 voltage_tries = 0;
3569 loop_tries = 0;
a4fc5ed6 3570 for (;;) {
70aff66c 3571 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3572
a7c9655f 3573 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3574 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3575 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3576 break;
93f62dad 3577 }
a4fc5ed6 3578
01916270 3579 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3580 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3581 break;
3582 }
3583
3584 /* Check to see if we've tried the max voltage */
3585 for (i = 0; i < intel_dp->lane_count; i++)
3586 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3587 break;
3b4f819d 3588 if (i == intel_dp->lane_count) {
b06fbda3
DV
3589 ++loop_tries;
3590 if (loop_tries == 5) {
3def84b3 3591 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3592 break;
3593 }
70aff66c
JN
3594 intel_dp_reset_link_train(intel_dp, &DP,
3595 DP_TRAINING_PATTERN_1 |
3596 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3597 voltage_tries = 0;
3598 continue;
3599 }
a4fc5ed6 3600
3cf2efb1 3601 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3602 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3603 ++voltage_tries;
b06fbda3 3604 if (voltage_tries == 5) {
3def84b3 3605 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3606 break;
3607 }
3608 } else
3609 voltage_tries = 0;
3610 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3611
70aff66c
JN
3612 /* Update training set as requested by target */
3613 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3614 DRM_ERROR("failed to update link training\n");
3615 break;
3616 }
a4fc5ed6
KP
3617 }
3618
33a34e4e
JB
3619 intel_dp->DP = DP;
3620}
3621
c19b0669 3622void
33a34e4e
JB
3623intel_dp_complete_link_train(struct intel_dp *intel_dp)
3624{
33a34e4e 3625 bool channel_eq = false;
37f80975 3626 int tries, cr_tries;
33a34e4e 3627 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3628 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3629
3630 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3631 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3632 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3633
a4fc5ed6 3634 /* channel equalization */
70aff66c 3635 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3636 training_pattern |
70aff66c
JN
3637 DP_LINK_SCRAMBLING_DISABLE)) {
3638 DRM_ERROR("failed to start channel equalization\n");
3639 return;
3640 }
3641
a4fc5ed6 3642 tries = 0;
37f80975 3643 cr_tries = 0;
a4fc5ed6
KP
3644 channel_eq = false;
3645 for (;;) {
70aff66c 3646 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3647
37f80975
JB
3648 if (cr_tries > 5) {
3649 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3650 break;
3651 }
3652
a7c9655f 3653 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3654 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3655 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3656 break;
70aff66c 3657 }
a4fc5ed6 3658
37f80975 3659 /* Make sure clock is still ok */
01916270 3660 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3661 intel_dp_start_link_train(intel_dp);
70aff66c 3662 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3663 training_pattern |
70aff66c 3664 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3665 cr_tries++;
3666 continue;
3667 }
3668
1ffdff13 3669 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3670 channel_eq = true;
3671 break;
3672 }
a4fc5ed6 3673
37f80975
JB
3674 /* Try 5 times, then try clock recovery if that fails */
3675 if (tries > 5) {
37f80975 3676 intel_dp_start_link_train(intel_dp);
70aff66c 3677 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3678 training_pattern |
70aff66c 3679 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3680 tries = 0;
3681 cr_tries++;
3682 continue;
3683 }
a4fc5ed6 3684
70aff66c
JN
3685 /* Update training set as requested by target */
3686 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3687 DRM_ERROR("failed to update link training\n");
3688 break;
3689 }
3cf2efb1 3690 ++tries;
869184a6 3691 }
3cf2efb1 3692
3ab9c637
ID
3693 intel_dp_set_idle_link_train(intel_dp);
3694
3695 intel_dp->DP = DP;
3696
d6c0d722 3697 if (channel_eq)
07f42258 3698 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3699
3ab9c637
ID
3700}
3701
3702void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3703{
70aff66c 3704 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3705 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3706}
3707
3708static void
ea5b213a 3709intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3710{
da63a9f2 3711 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3712 enum port port = intel_dig_port->port;
da63a9f2 3713 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3714 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3715 uint32_t DP = intel_dp->DP;
a4fc5ed6 3716
bc76e320 3717 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3718 return;
3719
0c33d8d7 3720 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3721 return;
3722
28c97730 3723 DRM_DEBUG_KMS("\n");
32f9d658 3724
bc7d38a4 3725 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3726 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3727 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3728 } else {
aad3d14d
VS
3729 if (IS_CHERRYVIEW(dev))
3730 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3731 else
3732 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3733 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3734 }
fe255d00 3735 POSTING_READ(intel_dp->output_reg);
5eb08b69 3736
493a7081 3737 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3738 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3739 /* Hardware workaround: leaving our transcoder select
3740 * set to transcoder B while it's off will prevent the
3741 * corresponding HDMI output on transcoder A.
3742 *
3743 * Combine this with another hardware workaround:
3744 * transcoder select bit can only be cleared while the
3745 * port is enabled.
3746 */
3747 DP &= ~DP_PIPEB_SELECT;
3748 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3749 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3750 }
3751
832afda6 3752 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3753 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3754 POSTING_READ(intel_dp->output_reg);
f01eca2e 3755 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3756}
3757
26d61aad
KP
3758static bool
3759intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3760{
a031d709
RV
3761 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3762 struct drm_device *dev = dig_port->base.base.dev;
3763 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3764 uint8_t rev;
a031d709 3765
9d1a1031
JN
3766 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3767 sizeof(intel_dp->dpcd)) < 0)
edb39244 3768 return false; /* aux transfer failed */
92fd8fd1 3769
a8e98153 3770 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3771
edb39244
AJ
3772 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3773 return false; /* DPCD not present */
3774
2293bb5c
SK
3775 /* Check if the panel supports PSR */
3776 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3777 if (is_edp(intel_dp)) {
9d1a1031
JN
3778 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3779 intel_dp->psr_dpcd,
3780 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3781 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3782 dev_priv->psr.sink_support = true;
50003939 3783 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3784 }
50003939
JN
3785 }
3786
7809a611 3787 /* Training Pattern 3 support, both source and sink */
06ea66b6 3788 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3789 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3790 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3791 intel_dp->use_tps3 = true;
f8d8a672 3792 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3793 } else
3794 intel_dp->use_tps3 = false;
3795
fc0f8e25
SJ
3796 /* Intermediate frequency support */
3797 if (is_edp(intel_dp) &&
3798 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3799 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3800 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3801 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3802 int i;
3803
fc0f8e25
SJ
3804 intel_dp_dpcd_read_wake(&intel_dp->aux,
3805 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3806 sink_rates,
3807 sizeof(sink_rates));
ea2d8a42 3808
94ca719e
VS
3809 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3810 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3811
3812 if (val == 0)
3813 break;
3814
94ca719e 3815 intel_dp->sink_rates[i] = val * 200;
ea2d8a42 3816 }
94ca719e 3817 intel_dp->num_sink_rates = i;
fc0f8e25 3818 }
0336400e
VS
3819
3820 intel_dp_print_rates(intel_dp);
3821
edb39244
AJ
3822 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3823 DP_DWN_STRM_PORT_PRESENT))
3824 return true; /* native DP sink */
3825
3826 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3827 return true; /* no per-port downstream info */
3828
9d1a1031
JN
3829 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3830 intel_dp->downstream_ports,
3831 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3832 return false; /* downstream port status fetch failed */
3833
3834 return true;
92fd8fd1
KP
3835}
3836
0d198328
AJ
3837static void
3838intel_dp_probe_oui(struct intel_dp *intel_dp)
3839{
3840 u8 buf[3];
3841
3842 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3843 return;
3844
9d1a1031 3845 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3846 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3847 buf[0], buf[1], buf[2]);
3848
9d1a1031 3849 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3850 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3851 buf[0], buf[1], buf[2]);
3852}
3853
0e32b39c
DA
3854static bool
3855intel_dp_probe_mst(struct intel_dp *intel_dp)
3856{
3857 u8 buf[1];
3858
3859 if (!intel_dp->can_mst)
3860 return false;
3861
3862 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3863 return false;
3864
0e32b39c
DA
3865 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3866 if (buf[0] & DP_MST_CAP) {
3867 DRM_DEBUG_KMS("Sink is MST capable\n");
3868 intel_dp->is_mst = true;
3869 } else {
3870 DRM_DEBUG_KMS("Sink is not MST capable\n");
3871 intel_dp->is_mst = false;
3872 }
3873 }
0e32b39c
DA
3874
3875 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3876 return intel_dp->is_mst;
3877}
3878
d2e216d0
RV
3879int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3880{
3881 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3882 struct drm_device *dev = intel_dig_port->base.base.dev;
3883 struct intel_crtc *intel_crtc =
3884 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3885 u8 buf;
3886 int test_crc_count;
3887 int attempts = 6;
d2e216d0 3888
ad9dc91b 3889 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3890 return -EIO;
d2e216d0 3891
ad9dc91b 3892 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3893 return -ENOTTY;
3894
1dda5f93
RV
3895 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3896 return -EIO;
3897
9d1a1031 3898 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3899 buf | DP_TEST_SINK_START) < 0)
bda0381e 3900 return -EIO;
d2e216d0 3901
1dda5f93 3902 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3903 return -EIO;
ad9dc91b 3904 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3905
ad9dc91b 3906 do {
1dda5f93
RV
3907 if (drm_dp_dpcd_readb(&intel_dp->aux,
3908 DP_TEST_SINK_MISC, &buf) < 0)
3909 return -EIO;
ad9dc91b
RV
3910 intel_wait_for_vblank(dev, intel_crtc->pipe);
3911 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3912
3913 if (attempts == 0) {
90bd1f46
DV
3914 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3915 return -ETIMEDOUT;
ad9dc91b 3916 }
d2e216d0 3917
9d1a1031 3918 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3919 return -EIO;
d2e216d0 3920
1dda5f93
RV
3921 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3922 return -EIO;
3923 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3924 buf & ~DP_TEST_SINK_START) < 0)
3925 return -EIO;
ce31d9f4 3926
d2e216d0
RV
3927 return 0;
3928}
3929
a60f0e38
JB
3930static bool
3931intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3932{
9d1a1031
JN
3933 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3934 DP_DEVICE_SERVICE_IRQ_VECTOR,
3935 sink_irq_vector, 1) == 1;
a60f0e38
JB
3936}
3937
0e32b39c
DA
3938static bool
3939intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3940{
3941 int ret;
3942
3943 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3944 DP_SINK_COUNT_ESI,
3945 sink_irq_vector, 14);
3946 if (ret != 14)
3947 return false;
3948
3949 return true;
3950}
3951
a60f0e38
JB
3952static void
3953intel_dp_handle_test_request(struct intel_dp *intel_dp)
3954{
3955 /* NAK by default */
9d1a1031 3956 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3957}
3958
0e32b39c
DA
3959static int
3960intel_dp_check_mst_status(struct intel_dp *intel_dp)
3961{
3962 bool bret;
3963
3964 if (intel_dp->is_mst) {
3965 u8 esi[16] = { 0 };
3966 int ret = 0;
3967 int retry;
3968 bool handled;
3969 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3970go_again:
3971 if (bret == true) {
3972
3973 /* check link status - esi[10] = 0x200c */
3974 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3975 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3976 intel_dp_start_link_train(intel_dp);
3977 intel_dp_complete_link_train(intel_dp);
3978 intel_dp_stop_link_train(intel_dp);
3979 }
3980
6f34cc39 3981 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3982 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3983
3984 if (handled) {
3985 for (retry = 0; retry < 3; retry++) {
3986 int wret;
3987 wret = drm_dp_dpcd_write(&intel_dp->aux,
3988 DP_SINK_COUNT_ESI+1,
3989 &esi[1], 3);
3990 if (wret == 3) {
3991 break;
3992 }
3993 }
3994
3995 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3996 if (bret == true) {
6f34cc39 3997 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3998 goto go_again;
3999 }
4000 } else
4001 ret = 0;
4002
4003 return ret;
4004 } else {
4005 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4006 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4007 intel_dp->is_mst = false;
4008 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4009 /* send a hotplug event */
4010 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4011 }
4012 }
4013 return -EINVAL;
4014}
4015
a4fc5ed6
KP
4016/*
4017 * According to DP spec
4018 * 5.1.2:
4019 * 1. Read DPCD
4020 * 2. Configure link according to Receiver Capabilities
4021 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4022 * 4. Check link status on receipt of hot-plug interrupt
4023 */
a5146200 4024static void
ea5b213a 4025intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4026{
5b215bcf 4027 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4028 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4029 u8 sink_irq_vector;
93f62dad 4030 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4031
5b215bcf
DA
4032 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4033
da63a9f2 4034 if (!intel_encoder->connectors_active)
d2b996ac 4035 return;
59cd09e1 4036
da63a9f2 4037 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4038 return;
4039
1a125d8a
ID
4040 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4041 return;
4042
92fd8fd1 4043 /* Try to read receiver status if the link appears to be up */
93f62dad 4044 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4045 return;
4046 }
4047
92fd8fd1 4048 /* Now read the DPCD to see if it's actually running */
26d61aad 4049 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4050 return;
4051 }
4052
a60f0e38
JB
4053 /* Try to read the source of the interrupt */
4054 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4055 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4056 /* Clear interrupt source */
9d1a1031
JN
4057 drm_dp_dpcd_writeb(&intel_dp->aux,
4058 DP_DEVICE_SERVICE_IRQ_VECTOR,
4059 sink_irq_vector);
a60f0e38
JB
4060
4061 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4062 intel_dp_handle_test_request(intel_dp);
4063 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4064 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4065 }
4066
1ffdff13 4067 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4068 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4069 intel_encoder->base.name);
33a34e4e
JB
4070 intel_dp_start_link_train(intel_dp);
4071 intel_dp_complete_link_train(intel_dp);
3ab9c637 4072 intel_dp_stop_link_train(intel_dp);
33a34e4e 4073 }
a4fc5ed6 4074}
a4fc5ed6 4075
caf9ab24 4076/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4077static enum drm_connector_status
26d61aad 4078intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4079{
caf9ab24 4080 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4081 uint8_t type;
4082
4083 if (!intel_dp_get_dpcd(intel_dp))
4084 return connector_status_disconnected;
4085
4086 /* if there's no downstream port, we're done */
4087 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4088 return connector_status_connected;
caf9ab24
AJ
4089
4090 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4091 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4092 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4093 uint8_t reg;
9d1a1031
JN
4094
4095 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4096 &reg, 1) < 0)
caf9ab24 4097 return connector_status_unknown;
9d1a1031 4098
23235177
AJ
4099 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4100 : connector_status_disconnected;
caf9ab24
AJ
4101 }
4102
4103 /* If no HPD, poke DDC gently */
0b99836f 4104 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4105 return connector_status_connected;
caf9ab24
AJ
4106
4107 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4108 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4109 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4110 if (type == DP_DS_PORT_TYPE_VGA ||
4111 type == DP_DS_PORT_TYPE_NON_EDID)
4112 return connector_status_unknown;
4113 } else {
4114 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4115 DP_DWN_STRM_PORT_TYPE_MASK;
4116 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4117 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4118 return connector_status_unknown;
4119 }
caf9ab24
AJ
4120
4121 /* Anything else is out of spec, warn and ignore */
4122 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4123 return connector_status_disconnected;
71ba9000
AJ
4124}
4125
d410b56d
CW
4126static enum drm_connector_status
4127edp_detect(struct intel_dp *intel_dp)
4128{
4129 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4130 enum drm_connector_status status;
4131
4132 status = intel_panel_detect(dev);
4133 if (status == connector_status_unknown)
4134 status = connector_status_connected;
4135
4136 return status;
4137}
4138
5eb08b69 4139static enum drm_connector_status
a9756bb5 4140ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4141{
30add22d 4142 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4143 struct drm_i915_private *dev_priv = dev->dev_private;
4144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4145
1b469639
DL
4146 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4147 return connector_status_disconnected;
4148
26d61aad 4149 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4150}
4151
2a592bec
DA
4152static int g4x_digital_port_connected(struct drm_device *dev,
4153 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4154{
a4fc5ed6 4155 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4156 uint32_t bit;
5eb08b69 4157
232a6ee9
TP
4158 if (IS_VALLEYVIEW(dev)) {
4159 switch (intel_dig_port->port) {
4160 case PORT_B:
4161 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4162 break;
4163 case PORT_C:
4164 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4165 break;
4166 case PORT_D:
4167 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4168 break;
4169 default:
2a592bec 4170 return -EINVAL;
232a6ee9
TP
4171 }
4172 } else {
4173 switch (intel_dig_port->port) {
4174 case PORT_B:
4175 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4176 break;
4177 case PORT_C:
4178 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4179 break;
4180 case PORT_D:
4181 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4182 break;
4183 default:
2a592bec 4184 return -EINVAL;
232a6ee9 4185 }
a4fc5ed6
KP
4186 }
4187
10f76a38 4188 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4189 return 0;
4190 return 1;
4191}
4192
4193static enum drm_connector_status
4194g4x_dp_detect(struct intel_dp *intel_dp)
4195{
4196 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4197 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4198 int ret;
4199
4200 /* Can't disconnect eDP, but you can close the lid... */
4201 if (is_edp(intel_dp)) {
4202 enum drm_connector_status status;
4203
4204 status = intel_panel_detect(dev);
4205 if (status == connector_status_unknown)
4206 status = connector_status_connected;
4207 return status;
4208 }
4209
4210 ret = g4x_digital_port_connected(dev, intel_dig_port);
4211 if (ret == -EINVAL)
4212 return connector_status_unknown;
4213 else if (ret == 0)
a4fc5ed6
KP
4214 return connector_status_disconnected;
4215
26d61aad 4216 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4217}
4218
8c241fef 4219static struct edid *
beb60608 4220intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4221{
beb60608 4222 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4223
9cd300e0
JN
4224 /* use cached edid if we have one */
4225 if (intel_connector->edid) {
9cd300e0
JN
4226 /* invalid edid */
4227 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4228 return NULL;
4229
55e9edeb 4230 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4231 } else
4232 return drm_get_edid(&intel_connector->base,
4233 &intel_dp->aux.ddc);
4234}
8c241fef 4235
beb60608
CW
4236static void
4237intel_dp_set_edid(struct intel_dp *intel_dp)
4238{
4239 struct intel_connector *intel_connector = intel_dp->attached_connector;
4240 struct edid *edid;
8c241fef 4241
beb60608
CW
4242 edid = intel_dp_get_edid(intel_dp);
4243 intel_connector->detect_edid = edid;
4244
4245 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4246 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4247 else
4248 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4249}
4250
beb60608
CW
4251static void
4252intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4253{
beb60608 4254 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4255
beb60608
CW
4256 kfree(intel_connector->detect_edid);
4257 intel_connector->detect_edid = NULL;
9cd300e0 4258
beb60608
CW
4259 intel_dp->has_audio = false;
4260}
d6f24d0f 4261
beb60608
CW
4262static enum intel_display_power_domain
4263intel_dp_power_get(struct intel_dp *dp)
4264{
4265 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4266 enum intel_display_power_domain power_domain;
4267
4268 power_domain = intel_display_port_power_domain(encoder);
4269 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4270
4271 return power_domain;
4272}
d6f24d0f 4273
beb60608
CW
4274static void
4275intel_dp_power_put(struct intel_dp *dp,
4276 enum intel_display_power_domain power_domain)
4277{
4278 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4279 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4280}
4281
a9756bb5
ZW
4282static enum drm_connector_status
4283intel_dp_detect(struct drm_connector *connector, bool force)
4284{
4285 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4286 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4287 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4288 struct drm_device *dev = connector->dev;
a9756bb5 4289 enum drm_connector_status status;
671dedd2 4290 enum intel_display_power_domain power_domain;
0e32b39c 4291 bool ret;
a9756bb5 4292
164c8598 4293 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4294 connector->base.id, connector->name);
beb60608 4295 intel_dp_unset_edid(intel_dp);
164c8598 4296
0e32b39c
DA
4297 if (intel_dp->is_mst) {
4298 /* MST devices are disconnected from a monitor POV */
4299 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4300 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4301 return connector_status_disconnected;
0e32b39c
DA
4302 }
4303
beb60608 4304 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4305
d410b56d
CW
4306 /* Can't disconnect eDP, but you can close the lid... */
4307 if (is_edp(intel_dp))
4308 status = edp_detect(intel_dp);
4309 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4310 status = ironlake_dp_detect(intel_dp);
4311 else
4312 status = g4x_dp_detect(intel_dp);
4313 if (status != connector_status_connected)
c8c8fb33 4314 goto out;
a9756bb5 4315
0d198328
AJ
4316 intel_dp_probe_oui(intel_dp);
4317
0e32b39c
DA
4318 ret = intel_dp_probe_mst(intel_dp);
4319 if (ret) {
4320 /* if we are in MST mode then this connector
4321 won't appear connected or have anything with EDID on it */
4322 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4323 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4324 status = connector_status_disconnected;
4325 goto out;
4326 }
4327
beb60608 4328 intel_dp_set_edid(intel_dp);
a9756bb5 4329
d63885da
PZ
4330 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4331 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4332 status = connector_status_connected;
4333
4334out:
beb60608 4335 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4336 return status;
a4fc5ed6
KP
4337}
4338
beb60608
CW
4339static void
4340intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4341{
df0e9248 4342 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4343 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4344 enum intel_display_power_domain power_domain;
a4fc5ed6 4345
beb60608
CW
4346 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4347 connector->base.id, connector->name);
4348 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4349
beb60608
CW
4350 if (connector->status != connector_status_connected)
4351 return;
671dedd2 4352
beb60608
CW
4353 power_domain = intel_dp_power_get(intel_dp);
4354
4355 intel_dp_set_edid(intel_dp);
4356
4357 intel_dp_power_put(intel_dp, power_domain);
4358
4359 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4360 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4361}
4362
4363static int intel_dp_get_modes(struct drm_connector *connector)
4364{
4365 struct intel_connector *intel_connector = to_intel_connector(connector);
4366 struct edid *edid;
4367
4368 edid = intel_connector->detect_edid;
4369 if (edid) {
4370 int ret = intel_connector_update_modes(connector, edid);
4371 if (ret)
4372 return ret;
4373 }
32f9d658 4374
f8779fda 4375 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4376 if (is_edp(intel_attached_dp(connector)) &&
4377 intel_connector->panel.fixed_mode) {
f8779fda 4378 struct drm_display_mode *mode;
beb60608
CW
4379
4380 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4381 intel_connector->panel.fixed_mode);
f8779fda 4382 if (mode) {
32f9d658
ZW
4383 drm_mode_probed_add(connector, mode);
4384 return 1;
4385 }
4386 }
beb60608 4387
32f9d658 4388 return 0;
a4fc5ed6
KP
4389}
4390
1aad7ac0
CW
4391static bool
4392intel_dp_detect_audio(struct drm_connector *connector)
4393{
1aad7ac0 4394 bool has_audio = false;
beb60608 4395 struct edid *edid;
1aad7ac0 4396
beb60608
CW
4397 edid = to_intel_connector(connector)->detect_edid;
4398 if (edid)
1aad7ac0 4399 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4400
1aad7ac0
CW
4401 return has_audio;
4402}
4403
f684960e
CW
4404static int
4405intel_dp_set_property(struct drm_connector *connector,
4406 struct drm_property *property,
4407 uint64_t val)
4408{
e953fd7b 4409 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4410 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4411 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4412 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4413 int ret;
4414
662595df 4415 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4416 if (ret)
4417 return ret;
4418
3f43c48d 4419 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4420 int i = val;
4421 bool has_audio;
4422
4423 if (i == intel_dp->force_audio)
f684960e
CW
4424 return 0;
4425
1aad7ac0 4426 intel_dp->force_audio = i;
f684960e 4427
c3e5f67b 4428 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4429 has_audio = intel_dp_detect_audio(connector);
4430 else
c3e5f67b 4431 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4432
4433 if (has_audio == intel_dp->has_audio)
f684960e
CW
4434 return 0;
4435
1aad7ac0 4436 intel_dp->has_audio = has_audio;
f684960e
CW
4437 goto done;
4438 }
4439
e953fd7b 4440 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4441 bool old_auto = intel_dp->color_range_auto;
4442 uint32_t old_range = intel_dp->color_range;
4443
55bc60db
VS
4444 switch (val) {
4445 case INTEL_BROADCAST_RGB_AUTO:
4446 intel_dp->color_range_auto = true;
4447 break;
4448 case INTEL_BROADCAST_RGB_FULL:
4449 intel_dp->color_range_auto = false;
4450 intel_dp->color_range = 0;
4451 break;
4452 case INTEL_BROADCAST_RGB_LIMITED:
4453 intel_dp->color_range_auto = false;
4454 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4455 break;
4456 default:
4457 return -EINVAL;
4458 }
ae4edb80
DV
4459
4460 if (old_auto == intel_dp->color_range_auto &&
4461 old_range == intel_dp->color_range)
4462 return 0;
4463
e953fd7b
CW
4464 goto done;
4465 }
4466
53b41837
YN
4467 if (is_edp(intel_dp) &&
4468 property == connector->dev->mode_config.scaling_mode_property) {
4469 if (val == DRM_MODE_SCALE_NONE) {
4470 DRM_DEBUG_KMS("no scaling not supported\n");
4471 return -EINVAL;
4472 }
4473
4474 if (intel_connector->panel.fitting_mode == val) {
4475 /* the eDP scaling property is not changed */
4476 return 0;
4477 }
4478 intel_connector->panel.fitting_mode = val;
4479
4480 goto done;
4481 }
4482
f684960e
CW
4483 return -EINVAL;
4484
4485done:
c0c36b94
CW
4486 if (intel_encoder->base.crtc)
4487 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4488
4489 return 0;
4490}
4491
a4fc5ed6 4492static void
73845adf 4493intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4494{
1d508706 4495 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4496
10e972d3 4497 kfree(intel_connector->detect_edid);
beb60608 4498
9cd300e0
JN
4499 if (!IS_ERR_OR_NULL(intel_connector->edid))
4500 kfree(intel_connector->edid);
4501
acd8db10
PZ
4502 /* Can't call is_edp() since the encoder may have been destroyed
4503 * already. */
4504 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4505 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4506
a4fc5ed6 4507 drm_connector_cleanup(connector);
55f78c43 4508 kfree(connector);
a4fc5ed6
KP
4509}
4510
00c09d70 4511void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4512{
da63a9f2
PZ
4513 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4514 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4515
4f71d0cb 4516 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4517 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4518 if (is_edp(intel_dp)) {
4519 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4520 /*
4521 * vdd might still be enabled do to the delayed vdd off.
4522 * Make sure vdd is actually turned off here.
4523 */
773538e8 4524 pps_lock(intel_dp);
4be73780 4525 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4526 pps_unlock(intel_dp);
4527
01527b31
CT
4528 if (intel_dp->edp_notifier.notifier_call) {
4529 unregister_reboot_notifier(&intel_dp->edp_notifier);
4530 intel_dp->edp_notifier.notifier_call = NULL;
4531 }
bd943159 4532 }
c8bd0e49 4533 drm_encoder_cleanup(encoder);
da63a9f2 4534 kfree(intel_dig_port);
24d05927
DV
4535}
4536
07f9cd0b
ID
4537static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4538{
4539 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4540
4541 if (!is_edp(intel_dp))
4542 return;
4543
951468f3
VS
4544 /*
4545 * vdd might still be enabled do to the delayed vdd off.
4546 * Make sure vdd is actually turned off here.
4547 */
afa4e53a 4548 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4549 pps_lock(intel_dp);
07f9cd0b 4550 edp_panel_vdd_off_sync(intel_dp);
773538e8 4551 pps_unlock(intel_dp);
07f9cd0b
ID
4552}
4553
49e6bc51
VS
4554static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4555{
4556 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4557 struct drm_device *dev = intel_dig_port->base.base.dev;
4558 struct drm_i915_private *dev_priv = dev->dev_private;
4559 enum intel_display_power_domain power_domain;
4560
4561 lockdep_assert_held(&dev_priv->pps_mutex);
4562
4563 if (!edp_have_panel_vdd(intel_dp))
4564 return;
4565
4566 /*
4567 * The VDD bit needs a power domain reference, so if the bit is
4568 * already enabled when we boot or resume, grab this reference and
4569 * schedule a vdd off, so we don't hold on to the reference
4570 * indefinitely.
4571 */
4572 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4573 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4574 intel_display_power_get(dev_priv, power_domain);
4575
4576 edp_panel_vdd_schedule_off(intel_dp);
4577}
4578
6d93c0c4
ID
4579static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4580{
49e6bc51
VS
4581 struct intel_dp *intel_dp;
4582
4583 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4584 return;
4585
4586 intel_dp = enc_to_intel_dp(encoder);
4587
4588 pps_lock(intel_dp);
4589
4590 /*
4591 * Read out the current power sequencer assignment,
4592 * in case the BIOS did something with it.
4593 */
4594 if (IS_VALLEYVIEW(encoder->dev))
4595 vlv_initial_power_sequencer_setup(intel_dp);
4596
4597 intel_edp_panel_vdd_sanitize(intel_dp);
4598
4599 pps_unlock(intel_dp);
6d93c0c4
ID
4600}
4601
a4fc5ed6 4602static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4603 .dpms = intel_connector_dpms,
a4fc5ed6 4604 .detect = intel_dp_detect,
beb60608 4605 .force = intel_dp_force,
a4fc5ed6 4606 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4607 .set_property = intel_dp_set_property,
2545e4a6 4608 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4609 .destroy = intel_dp_connector_destroy,
c6f95f27 4610 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4611 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4612};
4613
4614static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4615 .get_modes = intel_dp_get_modes,
4616 .mode_valid = intel_dp_mode_valid,
df0e9248 4617 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4618};
4619
a4fc5ed6 4620static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4621 .reset = intel_dp_encoder_reset,
24d05927 4622 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4623};
4624
0e32b39c 4625void
21d40d37 4626intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4627{
0e32b39c 4628 return;
c8110e52 4629}
6207937d 4630
b2c5c181 4631enum irqreturn
13cf5504
DA
4632intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4633{
4634 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4635 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4636 struct drm_device *dev = intel_dig_port->base.base.dev;
4637 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4638 enum intel_display_power_domain power_domain;
b2c5c181 4639 enum irqreturn ret = IRQ_NONE;
1c767b33 4640
0e32b39c
DA
4641 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4642 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4643
7a7f84cc
VS
4644 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4645 /*
4646 * vdd off can generate a long pulse on eDP which
4647 * would require vdd on to handle it, and thus we
4648 * would end up in an endless cycle of
4649 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4650 */
4651 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4652 port_name(intel_dig_port->port));
a8b3d52f 4653 return IRQ_HANDLED;
7a7f84cc
VS
4654 }
4655
26fbb774
VS
4656 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4657 port_name(intel_dig_port->port),
0e32b39c 4658 long_hpd ? "long" : "short");
13cf5504 4659
1c767b33
ID
4660 power_domain = intel_display_port_power_domain(intel_encoder);
4661 intel_display_power_get(dev_priv, power_domain);
4662
0e32b39c 4663 if (long_hpd) {
2a592bec
DA
4664
4665 if (HAS_PCH_SPLIT(dev)) {
4666 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4667 goto mst_fail;
4668 } else {
4669 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4670 goto mst_fail;
4671 }
0e32b39c
DA
4672
4673 if (!intel_dp_get_dpcd(intel_dp)) {
4674 goto mst_fail;
4675 }
4676
4677 intel_dp_probe_oui(intel_dp);
4678
4679 if (!intel_dp_probe_mst(intel_dp))
4680 goto mst_fail;
4681
4682 } else {
4683 if (intel_dp->is_mst) {
1c767b33 4684 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4685 goto mst_fail;
4686 }
4687
4688 if (!intel_dp->is_mst) {
4689 /*
4690 * we'll check the link status via the normal hot plug path later -
4691 * but for short hpds we should check it now
4692 */
5b215bcf 4693 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4694 intel_dp_check_link_status(intel_dp);
5b215bcf 4695 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4696 }
4697 }
b2c5c181
DV
4698
4699 ret = IRQ_HANDLED;
4700
1c767b33 4701 goto put_power;
0e32b39c
DA
4702mst_fail:
4703 /* if we were in MST mode, and device is not there get out of MST mode */
4704 if (intel_dp->is_mst) {
4705 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4706 intel_dp->is_mst = false;
4707 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4708 }
1c767b33
ID
4709put_power:
4710 intel_display_power_put(dev_priv, power_domain);
4711
4712 return ret;
13cf5504
DA
4713}
4714
e3421a18
ZW
4715/* Return which DP Port should be selected for Transcoder DP control */
4716int
0206e353 4717intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4718{
4719 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4720 struct intel_encoder *intel_encoder;
4721 struct intel_dp *intel_dp;
e3421a18 4722
fa90ecef
PZ
4723 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4724 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4725
fa90ecef
PZ
4726 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4727 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4728 return intel_dp->output_reg;
e3421a18 4729 }
ea5b213a 4730
e3421a18
ZW
4731 return -1;
4732}
4733
36e83a18 4734/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4735bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4736{
4737 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4738 union child_device_config *p_child;
36e83a18 4739 int i;
5d8a7752
VS
4740 static const short port_mapping[] = {
4741 [PORT_B] = PORT_IDPB,
4742 [PORT_C] = PORT_IDPC,
4743 [PORT_D] = PORT_IDPD,
4744 };
36e83a18 4745
3b32a35b
VS
4746 if (port == PORT_A)
4747 return true;
4748
41aa3448 4749 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4750 return false;
4751
41aa3448
RV
4752 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4753 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4754
5d8a7752 4755 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4756 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4757 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4758 return true;
4759 }
4760 return false;
4761}
4762
0e32b39c 4763void
f684960e
CW
4764intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4765{
53b41837
YN
4766 struct intel_connector *intel_connector = to_intel_connector(connector);
4767
3f43c48d 4768 intel_attach_force_audio_property(connector);
e953fd7b 4769 intel_attach_broadcast_rgb_property(connector);
55bc60db 4770 intel_dp->color_range_auto = true;
53b41837
YN
4771
4772 if (is_edp(intel_dp)) {
4773 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4774 drm_object_attach_property(
4775 &connector->base,
53b41837 4776 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4777 DRM_MODE_SCALE_ASPECT);
4778 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4779 }
f684960e
CW
4780}
4781
dada1a9f
ID
4782static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4783{
4784 intel_dp->last_power_cycle = jiffies;
4785 intel_dp->last_power_on = jiffies;
4786 intel_dp->last_backlight_off = jiffies;
4787}
4788
67a54566
DV
4789static void
4790intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4791 struct intel_dp *intel_dp)
67a54566
DV
4792{
4793 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4794 struct edp_power_seq cur, vbt, spec,
4795 *final = &intel_dp->pps_delays;
67a54566 4796 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4797 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4798
e39b999a
VS
4799 lockdep_assert_held(&dev_priv->pps_mutex);
4800
81ddbc69
VS
4801 /* already initialized? */
4802 if (final->t11_t12 != 0)
4803 return;
4804
453c5420 4805 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4806 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4807 pp_on_reg = PCH_PP_ON_DELAYS;
4808 pp_off_reg = PCH_PP_OFF_DELAYS;
4809 pp_div_reg = PCH_PP_DIVISOR;
4810 } else {
bf13e81b
JN
4811 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4812
4813 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4814 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4815 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4816 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4817 }
67a54566
DV
4818
4819 /* Workaround: Need to write PP_CONTROL with the unlock key as
4820 * the very first thing. */
453c5420 4821 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4822 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4823
453c5420
JB
4824 pp_on = I915_READ(pp_on_reg);
4825 pp_off = I915_READ(pp_off_reg);
4826 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4827
4828 /* Pull timing values out of registers */
4829 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4830 PANEL_POWER_UP_DELAY_SHIFT;
4831
4832 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4833 PANEL_LIGHT_ON_DELAY_SHIFT;
4834
4835 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4836 PANEL_LIGHT_OFF_DELAY_SHIFT;
4837
4838 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4839 PANEL_POWER_DOWN_DELAY_SHIFT;
4840
4841 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4842 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4843
4844 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4845 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4846
41aa3448 4847 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4848
4849 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4850 * our hw here, which are all in 100usec. */
4851 spec.t1_t3 = 210 * 10;
4852 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4853 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4854 spec.t10 = 500 * 10;
4855 /* This one is special and actually in units of 100ms, but zero
4856 * based in the hw (so we need to add 100 ms). But the sw vbt
4857 * table multiplies it with 1000 to make it in units of 100usec,
4858 * too. */
4859 spec.t11_t12 = (510 + 100) * 10;
4860
4861 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4862 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4863
4864 /* Use the max of the register settings and vbt. If both are
4865 * unset, fall back to the spec limits. */
36b5f425 4866#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4867 spec.field : \
4868 max(cur.field, vbt.field))
4869 assign_final(t1_t3);
4870 assign_final(t8);
4871 assign_final(t9);
4872 assign_final(t10);
4873 assign_final(t11_t12);
4874#undef assign_final
4875
36b5f425 4876#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4877 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4878 intel_dp->backlight_on_delay = get_delay(t8);
4879 intel_dp->backlight_off_delay = get_delay(t9);
4880 intel_dp->panel_power_down_delay = get_delay(t10);
4881 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4882#undef get_delay
4883
f30d26e4
JN
4884 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4885 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4886 intel_dp->panel_power_cycle_delay);
4887
4888 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4889 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4890}
4891
4892static void
4893intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4894 struct intel_dp *intel_dp)
f30d26e4
JN
4895{
4896 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4897 u32 pp_on, pp_off, pp_div, port_sel = 0;
4898 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4899 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4900 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4901 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4902
e39b999a 4903 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4904
4905 if (HAS_PCH_SPLIT(dev)) {
4906 pp_on_reg = PCH_PP_ON_DELAYS;
4907 pp_off_reg = PCH_PP_OFF_DELAYS;
4908 pp_div_reg = PCH_PP_DIVISOR;
4909 } else {
bf13e81b
JN
4910 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4911
4912 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4913 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4914 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4915 }
4916
b2f19d1a
PZ
4917 /*
4918 * And finally store the new values in the power sequencer. The
4919 * backlight delays are set to 1 because we do manual waits on them. For
4920 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4921 * we'll end up waiting for the backlight off delay twice: once when we
4922 * do the manual sleep, and once when we disable the panel and wait for
4923 * the PP_STATUS bit to become zero.
4924 */
f30d26e4 4925 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4926 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4927 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4928 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4929 /* Compute the divisor for the pp clock, simply match the Bspec
4930 * formula. */
453c5420 4931 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4932 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4933 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4934
4935 /* Haswell doesn't have any port selection bits for the panel
4936 * power sequencer any more. */
bc7d38a4 4937 if (IS_VALLEYVIEW(dev)) {
ad933b56 4938 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4939 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4940 if (port == PORT_A)
a24c144c 4941 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4942 else
a24c144c 4943 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4944 }
4945
453c5420
JB
4946 pp_on |= port_sel;
4947
4948 I915_WRITE(pp_on_reg, pp_on);
4949 I915_WRITE(pp_off_reg, pp_off);
4950 I915_WRITE(pp_div_reg, pp_div);
67a54566 4951
67a54566 4952 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4953 I915_READ(pp_on_reg),
4954 I915_READ(pp_off_reg),
4955 I915_READ(pp_div_reg));
f684960e
CW
4956}
4957
b33a2815
VK
4958/**
4959 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4960 * @dev: DRM device
4961 * @refresh_rate: RR to be programmed
4962 *
4963 * This function gets called when refresh rate (RR) has to be changed from
4964 * one frequency to another. Switches can be between high and low RR
4965 * supported by the panel or to any other RR based on media playback (in
4966 * this case, RR value needs to be passed from user space).
4967 *
4968 * The caller of this function needs to take a lock on dev_priv->drrs.
4969 */
96178eeb 4970static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4971{
4972 struct drm_i915_private *dev_priv = dev->dev_private;
4973 struct intel_encoder *encoder;
96178eeb
VK
4974 struct intel_digital_port *dig_port = NULL;
4975 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4976 struct intel_crtc_state *config = NULL;
439d7ac0 4977 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4978 u32 reg, val;
96178eeb 4979 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4980
4981 if (refresh_rate <= 0) {
4982 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4983 return;
4984 }
4985
96178eeb
VK
4986 if (intel_dp == NULL) {
4987 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4988 return;
4989 }
4990
1fcc9d1c 4991 /*
e4d59f6b
RV
4992 * FIXME: This needs proper synchronization with psr state for some
4993 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4994 */
439d7ac0 4995
96178eeb
VK
4996 dig_port = dp_to_dig_port(intel_dp);
4997 encoder = &dig_port->base;
723f9aab 4998 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
4999
5000 if (!intel_crtc) {
5001 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5002 return;
5003 }
5004
6e3c9717 5005 config = intel_crtc->config;
439d7ac0 5006
96178eeb 5007 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5008 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5009 return;
5010 }
5011
96178eeb
VK
5012 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5013 refresh_rate)
439d7ac0
PB
5014 index = DRRS_LOW_RR;
5015
96178eeb 5016 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5017 DRM_DEBUG_KMS(
5018 "DRRS requested for previously set RR...ignoring\n");
5019 return;
5020 }
5021
5022 if (!intel_crtc->active) {
5023 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5024 return;
5025 }
5026
44395bfe 5027 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5028 switch (index) {
5029 case DRRS_HIGH_RR:
5030 intel_dp_set_m_n(intel_crtc, M1_N1);
5031 break;
5032 case DRRS_LOW_RR:
5033 intel_dp_set_m_n(intel_crtc, M2_N2);
5034 break;
5035 case DRRS_MAX_RR:
5036 default:
5037 DRM_ERROR("Unsupported refreshrate type\n");
5038 }
5039 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5040 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5041 val = I915_READ(reg);
a4c30b1d 5042
439d7ac0 5043 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5044 if (IS_VALLEYVIEW(dev))
5045 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5046 else
5047 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5048 } else {
6fa7aec1
VK
5049 if (IS_VALLEYVIEW(dev))
5050 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5051 else
5052 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5053 }
5054 I915_WRITE(reg, val);
5055 }
5056
4e9ac947
VK
5057 dev_priv->drrs.refresh_rate_type = index;
5058
5059 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5060}
5061
b33a2815
VK
5062/**
5063 * intel_edp_drrs_enable - init drrs struct if supported
5064 * @intel_dp: DP struct
5065 *
5066 * Initializes frontbuffer_bits and drrs.dp
5067 */
c395578e
VK
5068void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5069{
5070 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5071 struct drm_i915_private *dev_priv = dev->dev_private;
5072 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5073 struct drm_crtc *crtc = dig_port->base.base.crtc;
5074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5075
5076 if (!intel_crtc->config->has_drrs) {
5077 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5078 return;
5079 }
5080
5081 mutex_lock(&dev_priv->drrs.mutex);
5082 if (WARN_ON(dev_priv->drrs.dp)) {
5083 DRM_ERROR("DRRS already enabled\n");
5084 goto unlock;
5085 }
5086
5087 dev_priv->drrs.busy_frontbuffer_bits = 0;
5088
5089 dev_priv->drrs.dp = intel_dp;
5090
5091unlock:
5092 mutex_unlock(&dev_priv->drrs.mutex);
5093}
5094
b33a2815
VK
5095/**
5096 * intel_edp_drrs_disable - Disable DRRS
5097 * @intel_dp: DP struct
5098 *
5099 */
c395578e
VK
5100void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5101{
5102 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5103 struct drm_i915_private *dev_priv = dev->dev_private;
5104 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5105 struct drm_crtc *crtc = dig_port->base.base.crtc;
5106 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5107
5108 if (!intel_crtc->config->has_drrs)
5109 return;
5110
5111 mutex_lock(&dev_priv->drrs.mutex);
5112 if (!dev_priv->drrs.dp) {
5113 mutex_unlock(&dev_priv->drrs.mutex);
5114 return;
5115 }
5116
5117 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5118 intel_dp_set_drrs_state(dev_priv->dev,
5119 intel_dp->attached_connector->panel.
5120 fixed_mode->vrefresh);
5121
5122 dev_priv->drrs.dp = NULL;
5123 mutex_unlock(&dev_priv->drrs.mutex);
5124
5125 cancel_delayed_work_sync(&dev_priv->drrs.work);
5126}
5127
4e9ac947
VK
5128static void intel_edp_drrs_downclock_work(struct work_struct *work)
5129{
5130 struct drm_i915_private *dev_priv =
5131 container_of(work, typeof(*dev_priv), drrs.work.work);
5132 struct intel_dp *intel_dp;
5133
5134 mutex_lock(&dev_priv->drrs.mutex);
5135
5136 intel_dp = dev_priv->drrs.dp;
5137
5138 if (!intel_dp)
5139 goto unlock;
5140
439d7ac0 5141 /*
4e9ac947
VK
5142 * The delayed work can race with an invalidate hence we need to
5143 * recheck.
439d7ac0
PB
5144 */
5145
4e9ac947
VK
5146 if (dev_priv->drrs.busy_frontbuffer_bits)
5147 goto unlock;
439d7ac0 5148
4e9ac947
VK
5149 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5150 intel_dp_set_drrs_state(dev_priv->dev,
5151 intel_dp->attached_connector->panel.
5152 downclock_mode->vrefresh);
439d7ac0 5153
4e9ac947 5154unlock:
4e9ac947 5155 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5156}
5157
b33a2815
VK
5158/**
5159 * intel_edp_drrs_invalidate - Invalidate DRRS
5160 * @dev: DRM device
5161 * @frontbuffer_bits: frontbuffer plane tracking bits
5162 *
5163 * When there is a disturbance on screen (due to cursor movement/time
5164 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5165 * high RR.
5166 *
5167 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5168 */
a93fad0f
VK
5169void intel_edp_drrs_invalidate(struct drm_device *dev,
5170 unsigned frontbuffer_bits)
5171{
5172 struct drm_i915_private *dev_priv = dev->dev_private;
5173 struct drm_crtc *crtc;
5174 enum pipe pipe;
5175
9da7d693 5176 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5177 return;
5178
88f933a8 5179 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5180
a93fad0f 5181 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5182 if (!dev_priv->drrs.dp) {
5183 mutex_unlock(&dev_priv->drrs.mutex);
5184 return;
5185 }
5186
a93fad0f
VK
5187 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5188 pipe = to_intel_crtc(crtc)->pipe;
5189
5190 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5191 intel_dp_set_drrs_state(dev_priv->dev,
5192 dev_priv->drrs.dp->attached_connector->panel.
5193 fixed_mode->vrefresh);
5194 }
5195
5196 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5197
5198 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5199 mutex_unlock(&dev_priv->drrs.mutex);
5200}
5201
b33a2815
VK
5202/**
5203 * intel_edp_drrs_flush - Flush DRRS
5204 * @dev: DRM device
5205 * @frontbuffer_bits: frontbuffer plane tracking bits
5206 *
5207 * When there is no movement on screen, DRRS work can be scheduled.
5208 * This DRRS work is responsible for setting relevant registers after a
5209 * timeout of 1 second.
5210 *
5211 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5212 */
a93fad0f
VK
5213void intel_edp_drrs_flush(struct drm_device *dev,
5214 unsigned frontbuffer_bits)
5215{
5216 struct drm_i915_private *dev_priv = dev->dev_private;
5217 struct drm_crtc *crtc;
5218 enum pipe pipe;
5219
9da7d693 5220 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5221 return;
5222
88f933a8 5223 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5224
a93fad0f 5225 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5226 if (!dev_priv->drrs.dp) {
5227 mutex_unlock(&dev_priv->drrs.mutex);
5228 return;
5229 }
5230
a93fad0f
VK
5231 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5232 pipe = to_intel_crtc(crtc)->pipe;
5233 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5234
a93fad0f
VK
5235 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5236 !dev_priv->drrs.busy_frontbuffer_bits)
5237 schedule_delayed_work(&dev_priv->drrs.work,
5238 msecs_to_jiffies(1000));
5239 mutex_unlock(&dev_priv->drrs.mutex);
5240}
5241
b33a2815
VK
5242/**
5243 * DOC: Display Refresh Rate Switching (DRRS)
5244 *
5245 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5246 * which enables swtching between low and high refresh rates,
5247 * dynamically, based on the usage scenario. This feature is applicable
5248 * for internal panels.
5249 *
5250 * Indication that the panel supports DRRS is given by the panel EDID, which
5251 * would list multiple refresh rates for one resolution.
5252 *
5253 * DRRS is of 2 types - static and seamless.
5254 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5255 * (may appear as a blink on screen) and is used in dock-undock scenario.
5256 * Seamless DRRS involves changing RR without any visual effect to the user
5257 * and can be used during normal system usage. This is done by programming
5258 * certain registers.
5259 *
5260 * Support for static/seamless DRRS may be indicated in the VBT based on
5261 * inputs from the panel spec.
5262 *
5263 * DRRS saves power by switching to low RR based on usage scenarios.
5264 *
5265 * eDP DRRS:-
5266 * The implementation is based on frontbuffer tracking implementation.
5267 * When there is a disturbance on the screen triggered by user activity or a
5268 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5269 * When there is no movement on screen, after a timeout of 1 second, a switch
5270 * to low RR is made.
5271 * For integration with frontbuffer tracking code,
5272 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5273 *
5274 * DRRS can be further extended to support other internal panels and also
5275 * the scenario of video playback wherein RR is set based on the rate
5276 * requested by userspace.
5277 */
5278
5279/**
5280 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5281 * @intel_connector: eDP connector
5282 * @fixed_mode: preferred mode of panel
5283 *
5284 * This function is called only once at driver load to initialize basic
5285 * DRRS stuff.
5286 *
5287 * Returns:
5288 * Downclock mode if panel supports it, else return NULL.
5289 * DRRS support is determined by the presence of downclock mode (apart
5290 * from VBT setting).
5291 */
4f9db5b5 5292static struct drm_display_mode *
96178eeb
VK
5293intel_dp_drrs_init(struct intel_connector *intel_connector,
5294 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5295{
5296 struct drm_connector *connector = &intel_connector->base;
96178eeb 5297 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5298 struct drm_i915_private *dev_priv = dev->dev_private;
5299 struct drm_display_mode *downclock_mode = NULL;
5300
9da7d693
DV
5301 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5302 mutex_init(&dev_priv->drrs.mutex);
5303
4f9db5b5
PB
5304 if (INTEL_INFO(dev)->gen <= 6) {
5305 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5306 return NULL;
5307 }
5308
5309 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5310 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5311 return NULL;
5312 }
5313
5314 downclock_mode = intel_find_panel_downclock
5315 (dev, fixed_mode, connector);
5316
5317 if (!downclock_mode) {
a1d26342 5318 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5319 return NULL;
5320 }
5321
96178eeb 5322 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5323
96178eeb 5324 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5325 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5326 return downclock_mode;
5327}
5328
ed92f0b2 5329static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5330 struct intel_connector *intel_connector)
ed92f0b2
PZ
5331{
5332 struct drm_connector *connector = &intel_connector->base;
5333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5334 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5335 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5336 struct drm_i915_private *dev_priv = dev->dev_private;
5337 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5338 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5339 bool has_dpcd;
5340 struct drm_display_mode *scan;
5341 struct edid *edid;
6517d273 5342 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5343
5344 if (!is_edp(intel_dp))
5345 return true;
5346
49e6bc51
VS
5347 pps_lock(intel_dp);
5348 intel_edp_panel_vdd_sanitize(intel_dp);
5349 pps_unlock(intel_dp);
63635217 5350
ed92f0b2 5351 /* Cache DPCD and EDID for edp. */
ed92f0b2 5352 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5353
5354 if (has_dpcd) {
5355 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5356 dev_priv->no_aux_handshake =
5357 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5358 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5359 } else {
5360 /* if this fails, presume the device is a ghost */
5361 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5362 return false;
5363 }
5364
5365 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5366 pps_lock(intel_dp);
36b5f425 5367 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5368 pps_unlock(intel_dp);
ed92f0b2 5369
060c8778 5370 mutex_lock(&dev->mode_config.mutex);
0b99836f 5371 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5372 if (edid) {
5373 if (drm_add_edid_modes(connector, edid)) {
5374 drm_mode_connector_update_edid_property(connector,
5375 edid);
5376 drm_edid_to_eld(connector, edid);
5377 } else {
5378 kfree(edid);
5379 edid = ERR_PTR(-EINVAL);
5380 }
5381 } else {
5382 edid = ERR_PTR(-ENOENT);
5383 }
5384 intel_connector->edid = edid;
5385
5386 /* prefer fixed mode from EDID if available */
5387 list_for_each_entry(scan, &connector->probed_modes, head) {
5388 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5389 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5390 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5391 intel_connector, fixed_mode);
ed92f0b2
PZ
5392 break;
5393 }
5394 }
5395
5396 /* fallback to VBT if available for eDP */
5397 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5398 fixed_mode = drm_mode_duplicate(dev,
5399 dev_priv->vbt.lfp_lvds_vbt_mode);
5400 if (fixed_mode)
5401 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5402 }
060c8778 5403 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5404
01527b31
CT
5405 if (IS_VALLEYVIEW(dev)) {
5406 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5407 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5408
5409 /*
5410 * Figure out the current pipe for the initial backlight setup.
5411 * If the current pipe isn't valid, try the PPS pipe, and if that
5412 * fails just assume pipe A.
5413 */
5414 if (IS_CHERRYVIEW(dev))
5415 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5416 else
5417 pipe = PORT_TO_PIPE(intel_dp->DP);
5418
5419 if (pipe != PIPE_A && pipe != PIPE_B)
5420 pipe = intel_dp->pps_pipe;
5421
5422 if (pipe != PIPE_A && pipe != PIPE_B)
5423 pipe = PIPE_A;
5424
5425 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5426 pipe_name(pipe));
01527b31
CT
5427 }
5428
4f9db5b5 5429 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5430 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5431 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5432
5433 return true;
5434}
5435
16c25533 5436bool
f0fec3f2
PZ
5437intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5438 struct intel_connector *intel_connector)
a4fc5ed6 5439{
f0fec3f2
PZ
5440 struct drm_connector *connector = &intel_connector->base;
5441 struct intel_dp *intel_dp = &intel_dig_port->dp;
5442 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5443 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5444 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5445 enum port port = intel_dig_port->port;
0b99836f 5446 int type;
a4fc5ed6 5447
a4a5d2f8
VS
5448 intel_dp->pps_pipe = INVALID_PIPE;
5449
ec5b01dd 5450 /* intel_dp vfuncs */
b6b5e383
DL
5451 if (INTEL_INFO(dev)->gen >= 9)
5452 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5453 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5454 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5455 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5456 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5457 else if (HAS_PCH_SPLIT(dev))
5458 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5459 else
5460 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5461
b9ca5fad
DL
5462 if (INTEL_INFO(dev)->gen >= 9)
5463 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5464 else
5465 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5466
0767935e
DV
5467 /* Preserve the current hw state. */
5468 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5469 intel_dp->attached_connector = intel_connector;
3d3dc149 5470
3b32a35b 5471 if (intel_dp_is_edp(dev, port))
b329530c 5472 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5473 else
5474 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5475
f7d24902
ID
5476 /*
5477 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5478 * for DP the encoder type can be set by the caller to
5479 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5480 */
5481 if (type == DRM_MODE_CONNECTOR_eDP)
5482 intel_encoder->type = INTEL_OUTPUT_EDP;
5483
c17ed5b5
VS
5484 /* eDP only on port B and/or C on vlv/chv */
5485 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5486 port != PORT_B && port != PORT_C))
5487 return false;
5488
e7281eab
ID
5489 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5490 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5491 port_name(port));
5492
b329530c 5493 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5494 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5495
a4fc5ed6
KP
5496 connector->interlace_allowed = true;
5497 connector->doublescan_allowed = 0;
5498
f0fec3f2 5499 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5500 edp_panel_vdd_work);
a4fc5ed6 5501
df0e9248 5502 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5503 drm_connector_register(connector);
a4fc5ed6 5504
affa9354 5505 if (HAS_DDI(dev))
bcbc889b
PZ
5506 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5507 else
5508 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5509 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5510
0b99836f 5511 /* Set up the hotplug pin. */
ab9d7c30
PZ
5512 switch (port) {
5513 case PORT_A:
1d843f9d 5514 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5515 break;
5516 case PORT_B:
1d843f9d 5517 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5518 break;
5519 case PORT_C:
1d843f9d 5520 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5521 break;
5522 case PORT_D:
1d843f9d 5523 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5524 break;
5525 default:
ad1c0b19 5526 BUG();
5eb08b69
ZW
5527 }
5528
dada1a9f 5529 if (is_edp(intel_dp)) {
773538e8 5530 pps_lock(intel_dp);
1e74a324
VS
5531 intel_dp_init_panel_power_timestamps(intel_dp);
5532 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5533 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5534 else
36b5f425 5535 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5536 pps_unlock(intel_dp);
dada1a9f 5537 }
0095e6dc 5538
9d1a1031 5539 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5540
0e32b39c 5541 /* init MST on ports that can support it */
c86ea3d0 5542 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5543 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5544 intel_dp_mst_encoder_init(intel_dig_port,
5545 intel_connector->base.base.id);
0e32b39c
DA
5546 }
5547 }
5548
36b5f425 5549 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5550 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5551 if (is_edp(intel_dp)) {
5552 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5553 /*
5554 * vdd might still be enabled do to the delayed vdd off.
5555 * Make sure vdd is actually turned off here.
5556 */
773538e8 5557 pps_lock(intel_dp);
4be73780 5558 edp_panel_vdd_off_sync(intel_dp);
773538e8 5559 pps_unlock(intel_dp);
15b1d171 5560 }
34ea3d38 5561 drm_connector_unregister(connector);
b2f246a8 5562 drm_connector_cleanup(connector);
16c25533 5563 return false;
b2f246a8 5564 }
32f9d658 5565
f684960e
CW
5566 intel_dp_add_properties(intel_dp, connector);
5567
a4fc5ed6
KP
5568 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5569 * 0xd. Failure to do so will result in spurious interrupts being
5570 * generated on the port when a cable is not attached.
5571 */
5572 if (IS_G4X(dev) && !IS_GM45(dev)) {
5573 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5574 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5575 }
16c25533
PZ
5576
5577 return true;
a4fc5ed6 5578}
f0fec3f2
PZ
5579
5580void
5581intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5582{
13cf5504 5583 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5584 struct intel_digital_port *intel_dig_port;
5585 struct intel_encoder *intel_encoder;
5586 struct drm_encoder *encoder;
5587 struct intel_connector *intel_connector;
5588
b14c5679 5589 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5590 if (!intel_dig_port)
5591 return;
5592
b14c5679 5593 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5594 if (!intel_connector) {
5595 kfree(intel_dig_port);
5596 return;
5597 }
5598
5599 intel_encoder = &intel_dig_port->base;
5600 encoder = &intel_encoder->base;
5601
5602 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5603 DRM_MODE_ENCODER_TMDS);
5604
5bfe2ac0 5605 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5606 intel_encoder->disable = intel_disable_dp;
00c09d70 5607 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5608 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5609 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5610 if (IS_CHERRYVIEW(dev)) {
9197c88b 5611 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5612 intel_encoder->pre_enable = chv_pre_enable_dp;
5613 intel_encoder->enable = vlv_enable_dp;
580d3811 5614 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5615 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5616 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5617 intel_encoder->pre_enable = vlv_pre_enable_dp;
5618 intel_encoder->enable = vlv_enable_dp;
49277c31 5619 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5620 } else {
ecff4f3b
JN
5621 intel_encoder->pre_enable = g4x_pre_enable_dp;
5622 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5623 if (INTEL_INFO(dev)->gen >= 5)
5624 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5625 }
f0fec3f2 5626
174edf1f 5627 intel_dig_port->port = port;
f0fec3f2
PZ
5628 intel_dig_port->dp.output_reg = output_reg;
5629
00c09d70 5630 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5631 if (IS_CHERRYVIEW(dev)) {
5632 if (port == PORT_D)
5633 intel_encoder->crtc_mask = 1 << 2;
5634 else
5635 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5636 } else {
5637 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5638 }
bc079e8b 5639 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5640 intel_encoder->hot_plug = intel_dp_hot_plug;
5641
13cf5504
DA
5642 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5643 dev_priv->hpd_irq_port[port] = intel_dig_port;
5644
15b1d171
PZ
5645 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5646 drm_encoder_cleanup(encoder);
5647 kfree(intel_dig_port);
b2f246a8 5648 kfree(intel_connector);
15b1d171 5649 }
f0fec3f2 5650}
0e32b39c
DA
5651
5652void intel_dp_mst_suspend(struct drm_device *dev)
5653{
5654 struct drm_i915_private *dev_priv = dev->dev_private;
5655 int i;
5656
5657 /* disable MST */
5658 for (i = 0; i < I915_MAX_PORTS; i++) {
5659 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5660 if (!intel_dig_port)
5661 continue;
5662
5663 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5664 if (!intel_dig_port->dp.can_mst)
5665 continue;
5666 if (intel_dig_port->dp.is_mst)
5667 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5668 }
5669 }
5670}
5671
5672void intel_dp_mst_resume(struct drm_device *dev)
5673{
5674 struct drm_i915_private *dev_priv = dev->dev_private;
5675 int i;
5676
5677 for (i = 0; i < I915_MAX_PORTS; i++) {
5678 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5679 if (!intel_dig_port)
5680 continue;
5681 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5682 int ret;
5683
5684 if (!intel_dig_port->dp.can_mst)
5685 continue;
5686
5687 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5688 if (ret != 0) {
5689 intel_dp_check_mst_status(&intel_dig_port->dp);
5690 }
5691 }
5692 }
5693}