drm/i915: Bump command parser version number.
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15 97 324000, 432000, 540000 };
fe51bfb9
VS
98static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
f4896f15 101static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 102
cfcb0fc9
JB
103/**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110static bool is_edp(struct intel_dp *intel_dp)
111{
da63a9f2
PZ
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
115}
116
68b4d824 117static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 118{
68b4d824
ID
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
122}
123
df0e9248
CW
124static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125{
fa90ecef 126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
127}
128
ea5b213a 129static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 130static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 131static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 132static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
133static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
a4fc5ed6 135
e0fce78f
VS
136static unsigned int intel_dp_unused_lane_mask(int lane_count)
137{
138 return ~((1 << lane_count) - 1) & 0xf;
139}
140
ed4e9c1d
VS
141static int
142intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 143{
7183dc29 144 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
145
146 switch (max_link_bw) {
147 case DP_LINK_BW_1_62:
148 case DP_LINK_BW_2_7:
1db10e28 149 case DP_LINK_BW_5_4:
d4eead50 150 break;
a4fc5ed6 151 default:
d4eead50
ID
152 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
153 max_link_bw);
a4fc5ed6
KP
154 max_link_bw = DP_LINK_BW_1_62;
155 break;
156 }
157 return max_link_bw;
158}
159
eeb6324d
PZ
160static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
161{
162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
163 struct drm_device *dev = intel_dig_port->base.base.dev;
164 u8 source_max, sink_max;
165
166 source_max = 4;
167 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
168 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
169 source_max = 2;
170
171 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
172
173 return min(source_max, sink_max);
174}
175
cd9dde44
AJ
176/*
177 * The units on the numbers in the next two are... bizarre. Examples will
178 * make it clearer; this one parallels an example in the eDP spec.
179 *
180 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
181 *
182 * 270000 * 1 * 8 / 10 == 216000
183 *
184 * The actual data capacity of that configuration is 2.16Gbit/s, so the
185 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
186 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
187 * 119000. At 18bpp that's 2142000 kilobits per second.
188 *
189 * Thus the strange-looking division by 10 in intel_dp_link_required, to
190 * get the result in decakilobits instead of kilobits.
191 */
192
a4fc5ed6 193static int
c898261c 194intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 195{
cd9dde44 196 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
197}
198
fe27d53e
DA
199static int
200intel_dp_max_data_rate(int max_link_clock, int max_lanes)
201{
202 return (max_link_clock * max_lanes * 8) / 10;
203}
204
c19de8eb 205static enum drm_mode_status
a4fc5ed6
KP
206intel_dp_mode_valid(struct drm_connector *connector,
207 struct drm_display_mode *mode)
208{
df0e9248 209 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
210 struct intel_connector *intel_connector = to_intel_connector(connector);
211 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
212 int target_clock = mode->clock;
213 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 214
dd06f90e
JN
215 if (is_edp(intel_dp) && fixed_mode) {
216 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
217 return MODE_PANEL;
218
dd06f90e 219 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 220 return MODE_PANEL;
03afc4a2
DV
221
222 target_clock = fixed_mode->clock;
7de56f43
ZY
223 }
224
50fec21a 225 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 226 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
227
228 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
229 mode_rate = intel_dp_link_required(target_clock, 18);
230
231 if (mode_rate > max_rate)
c4867936 232 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
233
234 if (mode->clock < 10000)
235 return MODE_CLOCK_LOW;
236
0af78a2b
DV
237 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
238 return MODE_H_ILLEGAL;
239
a4fc5ed6
KP
240 return MODE_OK;
241}
242
a4f1289e 243uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
244{
245 int i;
246 uint32_t v = 0;
247
248 if (src_bytes > 4)
249 src_bytes = 4;
250 for (i = 0; i < src_bytes; i++)
251 v |= ((uint32_t) src[i]) << ((3-i) * 8);
252 return v;
253}
254
c2af70e2 255static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
256{
257 int i;
258 if (dst_bytes > 4)
259 dst_bytes = 4;
260 for (i = 0; i < dst_bytes; i++)
261 dst[i] = src >> ((3-i) * 8);
262}
263
fb0f8fbf
KP
264/* hrawclock is 1/4 the FSB frequency */
265static int
266intel_hrawclk(struct drm_device *dev)
267{
268 struct drm_i915_private *dev_priv = dev->dev_private;
269 uint32_t clkcfg;
270
9473c8f4
VP
271 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
272 if (IS_VALLEYVIEW(dev))
273 return 200;
274
fb0f8fbf
KP
275 clkcfg = I915_READ(CLKCFG);
276 switch (clkcfg & CLKCFG_FSB_MASK) {
277 case CLKCFG_FSB_400:
278 return 100;
279 case CLKCFG_FSB_533:
280 return 133;
281 case CLKCFG_FSB_667:
282 return 166;
283 case CLKCFG_FSB_800:
284 return 200;
285 case CLKCFG_FSB_1067:
286 return 266;
287 case CLKCFG_FSB_1333:
288 return 333;
289 /* these two are just a guess; one of them might be right */
290 case CLKCFG_FSB_1600:
291 case CLKCFG_FSB_1600_ALT:
292 return 400;
293 default:
294 return 133;
295 }
296}
297
bf13e81b
JN
298static void
299intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 300 struct intel_dp *intel_dp);
bf13e81b
JN
301static void
302intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 303 struct intel_dp *intel_dp);
bf13e81b 304
773538e8
VS
305static void pps_lock(struct intel_dp *intel_dp)
306{
307 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
308 struct intel_encoder *encoder = &intel_dig_port->base;
309 struct drm_device *dev = encoder->base.dev;
310 struct drm_i915_private *dev_priv = dev->dev_private;
311 enum intel_display_power_domain power_domain;
312
313 /*
314 * See vlv_power_sequencer_reset() why we need
315 * a power domain reference here.
316 */
317 power_domain = intel_display_port_power_domain(encoder);
318 intel_display_power_get(dev_priv, power_domain);
319
320 mutex_lock(&dev_priv->pps_mutex);
321}
322
323static void pps_unlock(struct intel_dp *intel_dp)
324{
325 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
326 struct intel_encoder *encoder = &intel_dig_port->base;
327 struct drm_device *dev = encoder->base.dev;
328 struct drm_i915_private *dev_priv = dev->dev_private;
329 enum intel_display_power_domain power_domain;
330
331 mutex_unlock(&dev_priv->pps_mutex);
332
333 power_domain = intel_display_port_power_domain(encoder);
334 intel_display_power_put(dev_priv, power_domain);
335}
336
961a0db0
VS
337static void
338vlv_power_sequencer_kick(struct intel_dp *intel_dp)
339{
340 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
341 struct drm_device *dev = intel_dig_port->base.base.dev;
342 struct drm_i915_private *dev_priv = dev->dev_private;
343 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
344 bool pll_enabled, release_cl_override = false;
345 enum dpio_phy phy = DPIO_PHY(pipe);
346 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
347 uint32_t DP;
348
349 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
350 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
351 pipe_name(pipe), port_name(intel_dig_port->port)))
352 return;
353
354 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
355 pipe_name(pipe), port_name(intel_dig_port->port));
356
357 /* Preserve the BIOS-computed detected bit. This is
358 * supposed to be read-only.
359 */
360 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
361 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
362 DP |= DP_PORT_WIDTH(1);
363 DP |= DP_LINK_TRAIN_PAT_1;
364
365 if (IS_CHERRYVIEW(dev))
366 DP |= DP_PIPE_SELECT_CHV(pipe);
367 else if (pipe == PIPE_B)
368 DP |= DP_PIPEB_SELECT;
369
d288f65f
VS
370 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
371
372 /*
373 * The DPLL for the pipe must be enabled for this to work.
374 * So enable temporarily it if it's not already enabled.
375 */
0047eedc
VS
376 if (!pll_enabled) {
377 release_cl_override = IS_CHERRYVIEW(dev) &&
378 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
379
d288f65f
VS
380 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
381 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
0047eedc 382 }
d288f65f 383
961a0db0
VS
384 /*
385 * Similar magic as in intel_dp_enable_port().
386 * We _must_ do this port enable + disable trick
387 * to make this power seqeuencer lock onto the port.
388 * Otherwise even VDD force bit won't work.
389 */
390 I915_WRITE(intel_dp->output_reg, DP);
391 POSTING_READ(intel_dp->output_reg);
392
393 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
394 POSTING_READ(intel_dp->output_reg);
395
396 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
397 POSTING_READ(intel_dp->output_reg);
d288f65f 398
0047eedc 399 if (!pll_enabled) {
d288f65f 400 vlv_force_pll_off(dev, pipe);
0047eedc
VS
401
402 if (release_cl_override)
403 chv_phy_powergate_ch(dev_priv, phy, ch, false);
404 }
961a0db0
VS
405}
406
bf13e81b
JN
407static enum pipe
408vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
409{
410 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
411 struct drm_device *dev = intel_dig_port->base.base.dev;
412 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
413 struct intel_encoder *encoder;
414 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 415 enum pipe pipe;
bf13e81b 416
e39b999a 417 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 418
a8c3344e
VS
419 /* We should never land here with regular DP ports */
420 WARN_ON(!is_edp(intel_dp));
421
a4a5d2f8
VS
422 if (intel_dp->pps_pipe != INVALID_PIPE)
423 return intel_dp->pps_pipe;
424
425 /*
426 * We don't have power sequencer currently.
427 * Pick one that's not used by other ports.
428 */
429 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
430 base.head) {
431 struct intel_dp *tmp;
432
433 if (encoder->type != INTEL_OUTPUT_EDP)
434 continue;
435
436 tmp = enc_to_intel_dp(&encoder->base);
437
438 if (tmp->pps_pipe != INVALID_PIPE)
439 pipes &= ~(1 << tmp->pps_pipe);
440 }
441
442 /*
443 * Didn't find one. This should not happen since there
444 * are two power sequencers and up to two eDP ports.
445 */
446 if (WARN_ON(pipes == 0))
a8c3344e
VS
447 pipe = PIPE_A;
448 else
449 pipe = ffs(pipes) - 1;
a4a5d2f8 450
a8c3344e
VS
451 vlv_steal_power_sequencer(dev, pipe);
452 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
453
454 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
455 pipe_name(intel_dp->pps_pipe),
456 port_name(intel_dig_port->port));
457
458 /* init power sequencer on this pipe and port */
36b5f425
VS
459 intel_dp_init_panel_power_sequencer(dev, intel_dp);
460 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 461
961a0db0
VS
462 /*
463 * Even vdd force doesn't work until we've made
464 * the power sequencer lock in on the port.
465 */
466 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
467
468 return intel_dp->pps_pipe;
469}
470
6491ab27
VS
471typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
472 enum pipe pipe);
473
474static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
475 enum pipe pipe)
476{
477 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
478}
479
480static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
481 enum pipe pipe)
482{
483 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
484}
485
486static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
487 enum pipe pipe)
488{
489 return true;
490}
bf13e81b 491
a4a5d2f8 492static enum pipe
6491ab27
VS
493vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
494 enum port port,
495 vlv_pipe_check pipe_check)
a4a5d2f8
VS
496{
497 enum pipe pipe;
bf13e81b 498
bf13e81b
JN
499 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
500 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
501 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
502
503 if (port_sel != PANEL_PORT_SELECT_VLV(port))
504 continue;
505
6491ab27
VS
506 if (!pipe_check(dev_priv, pipe))
507 continue;
508
a4a5d2f8 509 return pipe;
bf13e81b
JN
510 }
511
a4a5d2f8
VS
512 return INVALID_PIPE;
513}
514
515static void
516vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
517{
518 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
519 struct drm_device *dev = intel_dig_port->base.base.dev;
520 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
521 enum port port = intel_dig_port->port;
522
523 lockdep_assert_held(&dev_priv->pps_mutex);
524
525 /* try to find a pipe with this port selected */
6491ab27
VS
526 /* first pick one where the panel is on */
527 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
528 vlv_pipe_has_pp_on);
529 /* didn't find one? pick one where vdd is on */
530 if (intel_dp->pps_pipe == INVALID_PIPE)
531 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
532 vlv_pipe_has_vdd_on);
533 /* didn't find one? pick one with just the correct port */
534 if (intel_dp->pps_pipe == INVALID_PIPE)
535 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
536 vlv_pipe_any);
a4a5d2f8
VS
537
538 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
539 if (intel_dp->pps_pipe == INVALID_PIPE) {
540 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
541 port_name(port));
542 return;
bf13e81b
JN
543 }
544
a4a5d2f8
VS
545 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
546 port_name(port), pipe_name(intel_dp->pps_pipe));
547
36b5f425
VS
548 intel_dp_init_panel_power_sequencer(dev, intel_dp);
549 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
550}
551
773538e8
VS
552void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
553{
554 struct drm_device *dev = dev_priv->dev;
555 struct intel_encoder *encoder;
556
557 if (WARN_ON(!IS_VALLEYVIEW(dev)))
558 return;
559
560 /*
561 * We can't grab pps_mutex here due to deadlock with power_domain
562 * mutex when power_domain functions are called while holding pps_mutex.
563 * That also means that in order to use pps_pipe the code needs to
564 * hold both a power domain reference and pps_mutex, and the power domain
565 * reference get/put must be done while _not_ holding pps_mutex.
566 * pps_{lock,unlock}() do these steps in the correct order, so one
567 * should use them always.
568 */
569
570 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
571 struct intel_dp *intel_dp;
572
573 if (encoder->type != INTEL_OUTPUT_EDP)
574 continue;
575
576 intel_dp = enc_to_intel_dp(&encoder->base);
577 intel_dp->pps_pipe = INVALID_PIPE;
578 }
bf13e81b
JN
579}
580
581static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
582{
583 struct drm_device *dev = intel_dp_to_dev(intel_dp);
584
b0a08bec
VK
585 if (IS_BROXTON(dev))
586 return BXT_PP_CONTROL(0);
587 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
588 return PCH_PP_CONTROL;
589 else
590 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
591}
592
593static u32 _pp_stat_reg(struct intel_dp *intel_dp)
594{
595 struct drm_device *dev = intel_dp_to_dev(intel_dp);
596
b0a08bec
VK
597 if (IS_BROXTON(dev))
598 return BXT_PP_STATUS(0);
599 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
600 return PCH_PP_STATUS;
601 else
602 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
603}
604
01527b31
CT
605/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
606 This function only applicable when panel PM state is not to be tracked */
607static int edp_notify_handler(struct notifier_block *this, unsigned long code,
608 void *unused)
609{
610 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
611 edp_notifier);
612 struct drm_device *dev = intel_dp_to_dev(intel_dp);
613 struct drm_i915_private *dev_priv = dev->dev_private;
614 u32 pp_div;
615 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
616
617 if (!is_edp(intel_dp) || code != SYS_RESTART)
618 return 0;
619
773538e8 620 pps_lock(intel_dp);
e39b999a 621
01527b31 622 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
623 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
624
01527b31
CT
625 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
626 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
627 pp_div = I915_READ(pp_div_reg);
628 pp_div &= PP_REFERENCE_DIVIDER_MASK;
629
630 /* 0x1F write to PP_DIV_REG sets max cycle delay */
631 I915_WRITE(pp_div_reg, pp_div | 0x1F);
632 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
633 msleep(intel_dp->panel_power_cycle_delay);
634 }
635
773538e8 636 pps_unlock(intel_dp);
e39b999a 637
01527b31
CT
638 return 0;
639}
640
4be73780 641static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 642{
30add22d 643 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
644 struct drm_i915_private *dev_priv = dev->dev_private;
645
e39b999a
VS
646 lockdep_assert_held(&dev_priv->pps_mutex);
647
9a42356b
VS
648 if (IS_VALLEYVIEW(dev) &&
649 intel_dp->pps_pipe == INVALID_PIPE)
650 return false;
651
bf13e81b 652 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
653}
654
4be73780 655static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 656{
30add22d 657 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
658 struct drm_i915_private *dev_priv = dev->dev_private;
659
e39b999a
VS
660 lockdep_assert_held(&dev_priv->pps_mutex);
661
9a42356b
VS
662 if (IS_VALLEYVIEW(dev) &&
663 intel_dp->pps_pipe == INVALID_PIPE)
664 return false;
665
773538e8 666 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
667}
668
9b984dae
KP
669static void
670intel_dp_check_edp(struct intel_dp *intel_dp)
671{
30add22d 672 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 673 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 674
9b984dae
KP
675 if (!is_edp(intel_dp))
676 return;
453c5420 677
4be73780 678 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
679 WARN(1, "eDP powered off while attempting aux channel communication.\n");
680 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
681 I915_READ(_pp_stat_reg(intel_dp)),
682 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
683 }
684}
685
9ee32fea
DV
686static uint32_t
687intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
688{
689 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690 struct drm_device *dev = intel_dig_port->base.base.dev;
691 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 692 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
693 uint32_t status;
694 bool done;
695
ef04f00d 696#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 697 if (has_aux_irq)
b18ac466 698 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 699 msecs_to_jiffies_timeout(10));
9ee32fea
DV
700 else
701 done = wait_for_atomic(C, 10) == 0;
702 if (!done)
703 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
704 has_aux_irq);
705#undef C
706
707 return status;
708}
709
ec5b01dd 710static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 711{
174edf1f
PZ
712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 714
ec5b01dd
DL
715 /*
716 * The clock divider is based off the hrawclk, and would like to run at
717 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 718 */
ec5b01dd
DL
719 return index ? 0 : intel_hrawclk(dev) / 2;
720}
721
722static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
723{
724 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
725 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 726 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
727
728 if (index)
729 return 0;
730
731 if (intel_dig_port->port == PORT_A) {
05024da3
VS
732 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
733
ec5b01dd
DL
734 } else {
735 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
736 }
737}
738
739static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740{
741 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
742 struct drm_device *dev = intel_dig_port->base.base.dev;
743 struct drm_i915_private *dev_priv = dev->dev_private;
744
745 if (intel_dig_port->port == PORT_A) {
746 if (index)
747 return 0;
05024da3 748 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
749 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
750 /* Workaround for non-ULT HSW */
bc86625a
CW
751 switch (index) {
752 case 0: return 63;
753 case 1: return 72;
754 default: return 0;
755 }
ec5b01dd 756 } else {
bc86625a 757 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 758 }
b84a1cf8
RV
759}
760
ec5b01dd
DL
761static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
762{
763 return index ? 0 : 100;
764}
765
b6b5e383
DL
766static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
767{
768 /*
769 * SKL doesn't need us to program the AUX clock divider (Hardware will
770 * derive the clock from CDCLK automatically). We still implement the
771 * get_aux_clock_divider vfunc to plug-in into the existing code.
772 */
773 return index ? 0 : 1;
774}
775
5ed12a19
DL
776static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
777 bool has_aux_irq,
778 int send_bytes,
779 uint32_t aux_clock_divider)
780{
781 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
782 struct drm_device *dev = intel_dig_port->base.base.dev;
783 uint32_t precharge, timeout;
784
785 if (IS_GEN6(dev))
786 precharge = 3;
787 else
788 precharge = 5;
789
790 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
791 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
792 else
793 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
794
795 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 796 DP_AUX_CH_CTL_DONE |
5ed12a19 797 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 798 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 799 timeout |
788d4433 800 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
801 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
802 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 803 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
804}
805
b9ca5fad
DL
806static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
807 bool has_aux_irq,
808 int send_bytes,
809 uint32_t unused)
810{
811 return DP_AUX_CH_CTL_SEND_BUSY |
812 DP_AUX_CH_CTL_DONE |
813 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
814 DP_AUX_CH_CTL_TIME_OUT_ERROR |
815 DP_AUX_CH_CTL_TIME_OUT_1600us |
816 DP_AUX_CH_CTL_RECEIVE_ERROR |
817 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
818 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
819}
820
b84a1cf8
RV
821static int
822intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 823 const uint8_t *send, int send_bytes,
b84a1cf8
RV
824 uint8_t *recv, int recv_size)
825{
826 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
827 struct drm_device *dev = intel_dig_port->base.base.dev;
828 struct drm_i915_private *dev_priv = dev->dev_private;
829 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
830 uint32_t ch_data = ch_ctl + 4;
bc86625a 831 uint32_t aux_clock_divider;
b84a1cf8
RV
832 int i, ret, recv_bytes;
833 uint32_t status;
5ed12a19 834 int try, clock = 0;
4e6b788c 835 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
836 bool vdd;
837
773538e8 838 pps_lock(intel_dp);
e39b999a 839
72c3500a
VS
840 /*
841 * We will be called with VDD already enabled for dpcd/edid/oui reads.
842 * In such cases we want to leave VDD enabled and it's up to upper layers
843 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
844 * ourselves.
845 */
1e0560e0 846 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
847
848 /* dp aux is extremely sensitive to irq latency, hence request the
849 * lowest possible wakeup latency and so prevent the cpu from going into
850 * deep sleep states.
851 */
852 pm_qos_update_request(&dev_priv->pm_qos, 0);
853
854 intel_dp_check_edp(intel_dp);
5eb08b69 855
c67a470b
PZ
856 intel_aux_display_runtime_get(dev_priv);
857
11bee43e
JB
858 /* Try to wait for any previous AUX channel activity */
859 for (try = 0; try < 3; try++) {
ef04f00d 860 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
861 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
862 break;
863 msleep(1);
864 }
865
866 if (try == 3) {
02196c77
MK
867 static u32 last_status = -1;
868 const u32 status = I915_READ(ch_ctl);
869
870 if (status != last_status) {
871 WARN(1, "dp_aux_ch not started status 0x%08x\n",
872 status);
873 last_status = status;
874 }
875
9ee32fea
DV
876 ret = -EBUSY;
877 goto out;
4f7f7b7e
CW
878 }
879
46a5ae9f
PZ
880 /* Only 5 data registers! */
881 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
882 ret = -E2BIG;
883 goto out;
884 }
885
ec5b01dd 886 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
887 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
888 has_aux_irq,
889 send_bytes,
890 aux_clock_divider);
5ed12a19 891
bc86625a
CW
892 /* Must try at least 3 times according to DP spec */
893 for (try = 0; try < 5; try++) {
894 /* Load the send data into the aux channel data registers */
895 for (i = 0; i < send_bytes; i += 4)
896 I915_WRITE(ch_data + i,
a4f1289e
RV
897 intel_dp_pack_aux(send + i,
898 send_bytes - i));
bc86625a
CW
899
900 /* Send the command and wait for it to complete */
5ed12a19 901 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
902
903 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
904
905 /* Clear done status and any errors */
906 I915_WRITE(ch_ctl,
907 status |
908 DP_AUX_CH_CTL_DONE |
909 DP_AUX_CH_CTL_TIME_OUT_ERROR |
910 DP_AUX_CH_CTL_RECEIVE_ERROR);
911
74ebf294 912 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 913 continue;
74ebf294
TP
914
915 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
916 * 400us delay required for errors and timeouts
917 * Timeout errors from the HW already meet this
918 * requirement so skip to next iteration
919 */
920 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
921 usleep_range(400, 500);
bc86625a 922 continue;
74ebf294 923 }
bc86625a 924 if (status & DP_AUX_CH_CTL_DONE)
e058c945 925 goto done;
bc86625a 926 }
a4fc5ed6
KP
927 }
928
a4fc5ed6 929 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 930 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
931 ret = -EBUSY;
932 goto out;
a4fc5ed6
KP
933 }
934
e058c945 935done:
a4fc5ed6
KP
936 /* Check for timeout or receive error.
937 * Timeouts occur when the sink is not connected
938 */
a5b3da54 939 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 940 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
941 ret = -EIO;
942 goto out;
a5b3da54 943 }
1ae8c0a5
KP
944
945 /* Timeouts occur when the device isn't connected, so they're
946 * "normal" -- don't fill the kernel log with these */
a5b3da54 947 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 948 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
949 ret = -ETIMEDOUT;
950 goto out;
a4fc5ed6
KP
951 }
952
953 /* Unload any bytes sent back from the other side */
954 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
955 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
956 if (recv_bytes > recv_size)
957 recv_bytes = recv_size;
0206e353 958
4f7f7b7e 959 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
960 intel_dp_unpack_aux(I915_READ(ch_data + i),
961 recv + i, recv_bytes - i);
a4fc5ed6 962
9ee32fea
DV
963 ret = recv_bytes;
964out:
965 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 966 intel_aux_display_runtime_put(dev_priv);
9ee32fea 967
884f19e9
JN
968 if (vdd)
969 edp_panel_vdd_off(intel_dp, false);
970
773538e8 971 pps_unlock(intel_dp);
e39b999a 972
9ee32fea 973 return ret;
a4fc5ed6
KP
974}
975
a6c8aff0
JN
976#define BARE_ADDRESS_SIZE 3
977#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
978static ssize_t
979intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 980{
9d1a1031
JN
981 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
982 uint8_t txbuf[20], rxbuf[20];
983 size_t txsize, rxsize;
a4fc5ed6 984 int ret;
a4fc5ed6 985
d2d9cbbd
VS
986 txbuf[0] = (msg->request << 4) |
987 ((msg->address >> 16) & 0xf);
988 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
989 txbuf[2] = msg->address & 0xff;
990 txbuf[3] = msg->size - 1;
46a5ae9f 991
9d1a1031
JN
992 switch (msg->request & ~DP_AUX_I2C_MOT) {
993 case DP_AUX_NATIVE_WRITE:
994 case DP_AUX_I2C_WRITE:
a6c8aff0 995 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 996 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 997
9d1a1031
JN
998 if (WARN_ON(txsize > 20))
999 return -E2BIG;
a4fc5ed6 1000
9d1a1031 1001 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 1002
9d1a1031
JN
1003 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1004 if (ret > 0) {
1005 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 1006
a1ddefd8
JN
1007 if (ret > 1) {
1008 /* Number of bytes written in a short write. */
1009 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1010 } else {
1011 /* Return payload size. */
1012 ret = msg->size;
1013 }
9d1a1031
JN
1014 }
1015 break;
46a5ae9f 1016
9d1a1031
JN
1017 case DP_AUX_NATIVE_READ:
1018 case DP_AUX_I2C_READ:
a6c8aff0 1019 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1020 rxsize = msg->size + 1;
a4fc5ed6 1021
9d1a1031
JN
1022 if (WARN_ON(rxsize > 20))
1023 return -E2BIG;
a4fc5ed6 1024
9d1a1031
JN
1025 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1026 if (ret > 0) {
1027 msg->reply = rxbuf[0] >> 4;
1028 /*
1029 * Assume happy day, and copy the data. The caller is
1030 * expected to check msg->reply before touching it.
1031 *
1032 * Return payload size.
1033 */
1034 ret--;
1035 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1036 }
9d1a1031
JN
1037 break;
1038
1039 default:
1040 ret = -EINVAL;
1041 break;
a4fc5ed6 1042 }
f51a44b9 1043
9d1a1031 1044 return ret;
a4fc5ed6
KP
1045}
1046
9d1a1031
JN
1047static void
1048intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1049{
1050 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1051 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1052 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1053 enum port port = intel_dig_port->port;
500ea70d 1054 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1055 const char *name = NULL;
500ea70d 1056 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1057 int ret;
1058
500ea70d
RV
1059 /* On SKL we don't have Aux for port E so we rely on VBT to set
1060 * a proper alternate aux channel.
1061 */
1062 if (IS_SKYLAKE(dev) && port == PORT_E) {
1063 switch (info->alternate_aux_channel) {
1064 case DP_AUX_B:
1065 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1066 break;
1067 case DP_AUX_C:
1068 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1069 break;
1070 case DP_AUX_D:
1071 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1072 break;
1073 case DP_AUX_A:
1074 default:
1075 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1076 }
1077 }
1078
33ad6626
JN
1079 switch (port) {
1080 case PORT_A:
1081 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1082 name = "DPDDC-A";
ab2c0672 1083 break;
33ad6626
JN
1084 case PORT_B:
1085 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1086 name = "DPDDC-B";
ab2c0672 1087 break;
33ad6626
JN
1088 case PORT_C:
1089 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1090 name = "DPDDC-C";
ab2c0672 1091 break;
33ad6626
JN
1092 case PORT_D:
1093 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1094 name = "DPDDC-D";
33ad6626 1095 break;
500ea70d
RV
1096 case PORT_E:
1097 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1098 name = "DPDDC-E";
1099 break;
33ad6626
JN
1100 default:
1101 BUG();
ab2c0672
DA
1102 }
1103
1b1aad75
DL
1104 /*
1105 * The AUX_CTL register is usually DP_CTL + 0x10.
1106 *
1107 * On Haswell and Broadwell though:
1108 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1109 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1110 *
1111 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1112 */
500ea70d 1113 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1114 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1115
0b99836f 1116 intel_dp->aux.name = name;
9d1a1031
JN
1117 intel_dp->aux.dev = dev->dev;
1118 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1119
0b99836f
JN
1120 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1121 connector->base.kdev->kobj.name);
8316f337 1122
4f71d0cb 1123 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1124 if (ret < 0) {
4f71d0cb 1125 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1126 name, ret);
1127 return;
ab2c0672 1128 }
8a5e6aeb 1129
0b99836f
JN
1130 ret = sysfs_create_link(&connector->base.kdev->kobj,
1131 &intel_dp->aux.ddc.dev.kobj,
1132 intel_dp->aux.ddc.dev.kobj.name);
1133 if (ret < 0) {
1134 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1135 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1136 }
a4fc5ed6
KP
1137}
1138
80f65de3
ID
1139static void
1140intel_dp_connector_unregister(struct intel_connector *intel_connector)
1141{
1142 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1143
0e32b39c
DA
1144 if (!intel_connector->mst_port)
1145 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1146 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1147 intel_connector_unregister(intel_connector);
1148}
1149
5416d871 1150static void
840b32b7 1151skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1152{
1153 u32 ctrl1;
1154
dd3cd74a
ACO
1155 memset(&pipe_config->dpll_hw_state, 0,
1156 sizeof(pipe_config->dpll_hw_state));
1157
5416d871
DL
1158 pipe_config->ddi_pll_sel = SKL_DPLL0;
1159 pipe_config->dpll_hw_state.cfgcr1 = 0;
1160 pipe_config->dpll_hw_state.cfgcr2 = 0;
1161
1162 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1163 switch (pipe_config->port_clock / 2) {
c3346ef6 1164 case 81000:
71cd8423 1165 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1166 SKL_DPLL0);
1167 break;
c3346ef6 1168 case 135000:
71cd8423 1169 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1170 SKL_DPLL0);
1171 break;
c3346ef6 1172 case 270000:
71cd8423 1173 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1174 SKL_DPLL0);
1175 break;
c3346ef6 1176 case 162000:
71cd8423 1177 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1178 SKL_DPLL0);
1179 break;
1180 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1181 results in CDCLK change. Need to handle the change of CDCLK by
1182 disabling pipes and re-enabling them */
1183 case 108000:
71cd8423 1184 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1185 SKL_DPLL0);
1186 break;
1187 case 216000:
71cd8423 1188 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1189 SKL_DPLL0);
1190 break;
1191
5416d871
DL
1192 }
1193 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1194}
1195
0e50338c 1196static void
840b32b7 1197hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1198{
ee46f3c7
ACO
1199 memset(&pipe_config->dpll_hw_state, 0,
1200 sizeof(pipe_config->dpll_hw_state));
1201
840b32b7
VS
1202 switch (pipe_config->port_clock / 2) {
1203 case 81000:
0e50338c
DV
1204 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1205 break;
840b32b7 1206 case 135000:
0e50338c
DV
1207 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1208 break;
840b32b7 1209 case 270000:
0e50338c
DV
1210 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1211 break;
1212 }
1213}
1214
fc0f8e25 1215static int
12f6a2e2 1216intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1217{
94ca719e
VS
1218 if (intel_dp->num_sink_rates) {
1219 *sink_rates = intel_dp->sink_rates;
1220 return intel_dp->num_sink_rates;
fc0f8e25 1221 }
12f6a2e2
VS
1222
1223 *sink_rates = default_rates;
1224
1225 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1226}
1227
a8f3ef61 1228static int
1db10e28 1229intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1230{
64987fc5
SJ
1231 if (IS_BROXTON(dev)) {
1232 *source_rates = bxt_rates;
1233 return ARRAY_SIZE(bxt_rates);
1234 } else if (IS_SKYLAKE(dev)) {
637a9c63
SJ
1235 *source_rates = skl_rates;
1236 return ARRAY_SIZE(skl_rates);
fe51bfb9
VS
1237 } else if (IS_CHERRYVIEW(dev)) {
1238 *source_rates = chv_rates;
1239 return ARRAY_SIZE(chv_rates);
a8f3ef61 1240 }
636280ba
VS
1241
1242 *source_rates = default_rates;
1243
1db10e28
VS
1244 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1245 /* WaDisableHBR2:skl */
1246 return (DP_LINK_BW_2_7 >> 3) + 1;
1247 else if (INTEL_INFO(dev)->gen >= 8 ||
1248 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1249 return (DP_LINK_BW_5_4 >> 3) + 1;
1250 else
1251 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1252}
1253
c6bb3538
DV
1254static void
1255intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1256 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1257{
1258 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1259 const struct dp_link_dpll *divisor = NULL;
1260 int i, count = 0;
c6bb3538
DV
1261
1262 if (IS_G4X(dev)) {
9dd4ffdf
CML
1263 divisor = gen4_dpll;
1264 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1265 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1266 divisor = pch_dpll;
1267 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1268 } else if (IS_CHERRYVIEW(dev)) {
1269 divisor = chv_dpll;
1270 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1271 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1272 divisor = vlv_dpll;
1273 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1274 }
9dd4ffdf
CML
1275
1276 if (divisor && count) {
1277 for (i = 0; i < count; i++) {
840b32b7 1278 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1279 pipe_config->dpll = divisor[i].dpll;
1280 pipe_config->clock_set = true;
1281 break;
1282 }
1283 }
c6bb3538
DV
1284 }
1285}
1286
2ecae76a
VS
1287static int intersect_rates(const int *source_rates, int source_len,
1288 const int *sink_rates, int sink_len,
94ca719e 1289 int *common_rates)
a8f3ef61
SJ
1290{
1291 int i = 0, j = 0, k = 0;
1292
a8f3ef61
SJ
1293 while (i < source_len && j < sink_len) {
1294 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1295 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1296 return k;
94ca719e 1297 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1298 ++k;
1299 ++i;
1300 ++j;
1301 } else if (source_rates[i] < sink_rates[j]) {
1302 ++i;
1303 } else {
1304 ++j;
1305 }
1306 }
1307 return k;
1308}
1309
94ca719e
VS
1310static int intel_dp_common_rates(struct intel_dp *intel_dp,
1311 int *common_rates)
2ecae76a
VS
1312{
1313 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1314 const int *source_rates, *sink_rates;
1315 int source_len, sink_len;
1316
1317 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1318 source_len = intel_dp_source_rates(dev, &source_rates);
1319
1320 return intersect_rates(source_rates, source_len,
1321 sink_rates, sink_len,
94ca719e 1322 common_rates);
2ecae76a
VS
1323}
1324
0336400e
VS
1325static void snprintf_int_array(char *str, size_t len,
1326 const int *array, int nelem)
1327{
1328 int i;
1329
1330 str[0] = '\0';
1331
1332 for (i = 0; i < nelem; i++) {
b2f505be 1333 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1334 if (r >= len)
1335 return;
1336 str += r;
1337 len -= r;
1338 }
1339}
1340
1341static void intel_dp_print_rates(struct intel_dp *intel_dp)
1342{
1343 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1344 const int *source_rates, *sink_rates;
94ca719e
VS
1345 int source_len, sink_len, common_len;
1346 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1347 char str[128]; /* FIXME: too big for stack? */
1348
1349 if ((drm_debug & DRM_UT_KMS) == 0)
1350 return;
1351
1352 source_len = intel_dp_source_rates(dev, &source_rates);
1353 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1354 DRM_DEBUG_KMS("source rates: %s\n", str);
1355
1356 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1357 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1358 DRM_DEBUG_KMS("sink rates: %s\n", str);
1359
94ca719e
VS
1360 common_len = intel_dp_common_rates(intel_dp, common_rates);
1361 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1362 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1363}
1364
f4896f15 1365static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1366{
1367 int i = 0;
1368
1369 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1370 if (find == rates[i])
1371 break;
1372
1373 return i;
1374}
1375
50fec21a
VS
1376int
1377intel_dp_max_link_rate(struct intel_dp *intel_dp)
1378{
1379 int rates[DP_MAX_SUPPORTED_RATES] = {};
1380 int len;
1381
94ca719e 1382 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1383 if (WARN_ON(len <= 0))
1384 return 162000;
1385
1386 return rates[rate_to_index(0, rates) - 1];
1387}
1388
ed4e9c1d
VS
1389int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1390{
94ca719e 1391 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1392}
1393
04a60f9f
VS
1394static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1395 uint8_t *link_bw, uint8_t *rate_select)
1396{
1397 if (intel_dp->num_sink_rates) {
1398 *link_bw = 0;
1399 *rate_select =
1400 intel_dp_rate_select(intel_dp, port_clock);
1401 } else {
1402 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1403 *rate_select = 0;
1404 }
1405}
1406
00c09d70 1407bool
5bfe2ac0 1408intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1409 struct intel_crtc_state *pipe_config)
a4fc5ed6 1410{
5bfe2ac0 1411 struct drm_device *dev = encoder->base.dev;
36008365 1412 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1413 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1414 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1415 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1416 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1417 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1418 int lane_count, clock;
56071a20 1419 int min_lane_count = 1;
eeb6324d 1420 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1421 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1422 int min_clock = 0;
a8f3ef61 1423 int max_clock;
083f9560 1424 int bpp, mode_rate;
ff9a6750 1425 int link_avail, link_clock;
94ca719e
VS
1426 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1427 int common_len;
04a60f9f 1428 uint8_t link_bw, rate_select;
a8f3ef61 1429
94ca719e 1430 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1431
1432 /* No common link rates between source and sink */
94ca719e 1433 WARN_ON(common_len <= 0);
a8f3ef61 1434
94ca719e 1435 max_clock = common_len - 1;
a4fc5ed6 1436
bc7d38a4 1437 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1438 pipe_config->has_pch_encoder = true;
1439
03afc4a2 1440 pipe_config->has_dp_encoder = true;
f769cd24 1441 pipe_config->has_drrs = false;
9fcb1704 1442 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1443
dd06f90e
JN
1444 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1445 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1446 adjusted_mode);
a1b2278e
CK
1447
1448 if (INTEL_INFO(dev)->gen >= 9) {
1449 int ret;
e435d6e5 1450 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1451 if (ret)
1452 return ret;
1453 }
1454
2dd24552
JB
1455 if (!HAS_PCH_SPLIT(dev))
1456 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1457 intel_connector->panel.fitting_mode);
1458 else
b074cec8
JB
1459 intel_pch_panel_fitting(intel_crtc, pipe_config,
1460 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1461 }
1462
cb1793ce 1463 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1464 return false;
1465
083f9560 1466 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1467 "max bw %d pixel clock %iKHz\n",
94ca719e 1468 max_lane_count, common_rates[max_clock],
241bfc38 1469 adjusted_mode->crtc_clock);
083f9560 1470
36008365
DV
1471 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1472 * bpc in between. */
3e7ca985 1473 bpp = pipe_config->pipe_bpp;
56071a20 1474 if (is_edp(intel_dp)) {
22ce5628
TS
1475
1476 /* Get bpp from vbt only for panels that dont have bpp in edid */
1477 if (intel_connector->base.display_info.bpc == 0 &&
1478 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1479 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1480 dev_priv->vbt.edp_bpp);
1481 bpp = dev_priv->vbt.edp_bpp;
1482 }
1483
344c5bbc
JN
1484 /*
1485 * Use the maximum clock and number of lanes the eDP panel
1486 * advertizes being capable of. The panels are generally
1487 * designed to support only a single clock and lane
1488 * configuration, and typically these values correspond to the
1489 * native resolution of the panel.
1490 */
1491 min_lane_count = max_lane_count;
1492 min_clock = max_clock;
7984211e 1493 }
657445fe 1494
36008365 1495 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1496 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1497 bpp);
36008365 1498
c6930992 1499 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1500 for (lane_count = min_lane_count;
1501 lane_count <= max_lane_count;
1502 lane_count <<= 1) {
1503
94ca719e 1504 link_clock = common_rates[clock];
36008365
DV
1505 link_avail = intel_dp_max_data_rate(link_clock,
1506 lane_count);
1507
1508 if (mode_rate <= link_avail) {
1509 goto found;
1510 }
1511 }
1512 }
1513 }
c4867936 1514
36008365 1515 return false;
3685a8f3 1516
36008365 1517found:
55bc60db
VS
1518 if (intel_dp->color_range_auto) {
1519 /*
1520 * See:
1521 * CEA-861-E - 5.1 Default Encoding Parameters
1522 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1523 */
0f2a2a75
VS
1524 pipe_config->limited_color_range =
1525 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1526 } else {
1527 pipe_config->limited_color_range =
1528 intel_dp->limited_color_range;
55bc60db
VS
1529 }
1530
90a6b7b0 1531 pipe_config->lane_count = lane_count;
a8f3ef61 1532
657445fe 1533 pipe_config->pipe_bpp = bpp;
94ca719e 1534 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1535
04a60f9f
VS
1536 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1537 &link_bw, &rate_select);
1538
1539 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1540 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1541 pipe_config->port_clock, bpp);
36008365
DV
1542 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1543 mode_rate, link_avail);
a4fc5ed6 1544
03afc4a2 1545 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1546 adjusted_mode->crtc_clock,
1547 pipe_config->port_clock,
03afc4a2 1548 &pipe_config->dp_m_n);
9d1a455b 1549
439d7ac0 1550 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1551 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1552 pipe_config->has_drrs = true;
439d7ac0
PB
1553 intel_link_compute_m_n(bpp, lane_count,
1554 intel_connector->panel.downclock_mode->clock,
1555 pipe_config->port_clock,
1556 &pipe_config->dp_m2_n2);
1557 }
1558
5416d871 1559 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
840b32b7 1560 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1561 else if (IS_BROXTON(dev))
1562 /* handled in ddi */;
5416d871 1563 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1564 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1565 else
840b32b7 1566 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1567
03afc4a2 1568 return true;
a4fc5ed6
KP
1569}
1570
7c62a164 1571static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1572{
7c62a164
DV
1573 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1574 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1575 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1576 struct drm_i915_private *dev_priv = dev->dev_private;
1577 u32 dpa_ctl;
1578
6e3c9717
ACO
1579 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1580 crtc->config->port_clock);
ea9b6006
DV
1581 dpa_ctl = I915_READ(DP_A);
1582 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1583
6e3c9717 1584 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1585 /* For a long time we've carried around a ILK-DevA w/a for the
1586 * 160MHz clock. If we're really unlucky, it's still required.
1587 */
1588 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1589 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1590 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1591 } else {
1592 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1593 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1594 }
1ce17038 1595
ea9b6006
DV
1596 I915_WRITE(DP_A, dpa_ctl);
1597
1598 POSTING_READ(DP_A);
1599 udelay(500);
1600}
1601
901c2daf
VS
1602void intel_dp_set_link_params(struct intel_dp *intel_dp,
1603 const struct intel_crtc_state *pipe_config)
1604{
1605 intel_dp->link_rate = pipe_config->port_clock;
1606 intel_dp->lane_count = pipe_config->lane_count;
1607}
1608
8ac33ed3 1609static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1610{
b934223d 1611 struct drm_device *dev = encoder->base.dev;
417e822d 1612 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1613 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1614 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1615 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1616 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1617
901c2daf
VS
1618 intel_dp_set_link_params(intel_dp, crtc->config);
1619
417e822d 1620 /*
1a2eb460 1621 * There are four kinds of DP registers:
417e822d
KP
1622 *
1623 * IBX PCH
1a2eb460
KP
1624 * SNB CPU
1625 * IVB CPU
417e822d
KP
1626 * CPT PCH
1627 *
1628 * IBX PCH and CPU are the same for almost everything,
1629 * except that the CPU DP PLL is configured in this
1630 * register
1631 *
1632 * CPT PCH is quite different, having many bits moved
1633 * to the TRANS_DP_CTL register instead. That
1634 * configuration happens (oddly) in ironlake_pch_enable
1635 */
9c9e7927 1636
417e822d
KP
1637 /* Preserve the BIOS-computed detected bit. This is
1638 * supposed to be read-only.
1639 */
1640 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1641
417e822d 1642 /* Handle DP bits in common between all three register formats */
417e822d 1643 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1644 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1645
6e3c9717 1646 if (crtc->config->has_audio)
ea5b213a 1647 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1648
417e822d 1649 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1650
39e5fa88 1651 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1652 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1653 intel_dp->DP |= DP_SYNC_HS_HIGH;
1654 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1655 intel_dp->DP |= DP_SYNC_VS_HIGH;
1656 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1657
6aba5b6c 1658 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1659 intel_dp->DP |= DP_ENHANCED_FRAMING;
1660
7c62a164 1661 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1662 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1663 u32 trans_dp;
1664
39e5fa88 1665 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1666
1667 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1668 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1669 trans_dp |= TRANS_DP_ENH_FRAMING;
1670 else
1671 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1672 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1673 } else {
0f2a2a75
VS
1674 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1675 crtc->config->limited_color_range)
1676 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1677
1678 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1679 intel_dp->DP |= DP_SYNC_HS_HIGH;
1680 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1681 intel_dp->DP |= DP_SYNC_VS_HIGH;
1682 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1683
6aba5b6c 1684 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1685 intel_dp->DP |= DP_ENHANCED_FRAMING;
1686
39e5fa88 1687 if (IS_CHERRYVIEW(dev))
44f37d1f 1688 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1689 else if (crtc->pipe == PIPE_B)
1690 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1691 }
a4fc5ed6
KP
1692}
1693
ffd6749d
PZ
1694#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1695#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1696
1a5ef5b7
PZ
1697#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1698#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1699
ffd6749d
PZ
1700#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1701#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1702
4be73780 1703static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1704 u32 mask,
1705 u32 value)
bd943159 1706{
30add22d 1707 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1708 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1709 u32 pp_stat_reg, pp_ctrl_reg;
1710
e39b999a
VS
1711 lockdep_assert_held(&dev_priv->pps_mutex);
1712
bf13e81b
JN
1713 pp_stat_reg = _pp_stat_reg(intel_dp);
1714 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1715
99ea7127 1716 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1717 mask, value,
1718 I915_READ(pp_stat_reg),
1719 I915_READ(pp_ctrl_reg));
32ce697c 1720
453c5420 1721 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1722 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1723 I915_READ(pp_stat_reg),
1724 I915_READ(pp_ctrl_reg));
32ce697c 1725 }
54c136d4
CW
1726
1727 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1728}
32ce697c 1729
4be73780 1730static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1731{
1732 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1733 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1734}
1735
4be73780 1736static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1737{
1738 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1739 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1740}
1741
4be73780 1742static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1743{
1744 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1745
1746 /* When we disable the VDD override bit last we have to do the manual
1747 * wait. */
1748 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1749 intel_dp->panel_power_cycle_delay);
1750
4be73780 1751 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1752}
1753
4be73780 1754static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1755{
1756 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1757 intel_dp->backlight_on_delay);
1758}
1759
4be73780 1760static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1761{
1762 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1763 intel_dp->backlight_off_delay);
1764}
99ea7127 1765
832dd3c1
KP
1766/* Read the current pp_control value, unlocking the register if it
1767 * is locked
1768 */
1769
453c5420 1770static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1771{
453c5420
JB
1772 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1773 struct drm_i915_private *dev_priv = dev->dev_private;
1774 u32 control;
832dd3c1 1775
e39b999a
VS
1776 lockdep_assert_held(&dev_priv->pps_mutex);
1777
bf13e81b 1778 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1779 if (!IS_BROXTON(dev)) {
1780 control &= ~PANEL_UNLOCK_MASK;
1781 control |= PANEL_UNLOCK_REGS;
1782 }
832dd3c1 1783 return control;
bd943159
KP
1784}
1785
951468f3
VS
1786/*
1787 * Must be paired with edp_panel_vdd_off().
1788 * Must hold pps_mutex around the whole on/off sequence.
1789 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1790 */
1e0560e0 1791static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1792{
30add22d 1793 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1794 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1795 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1796 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1797 enum intel_display_power_domain power_domain;
5d613501 1798 u32 pp;
453c5420 1799 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1800 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1801
e39b999a
VS
1802 lockdep_assert_held(&dev_priv->pps_mutex);
1803
97af61f5 1804 if (!is_edp(intel_dp))
adddaaf4 1805 return false;
bd943159 1806
2c623c11 1807 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1808 intel_dp->want_panel_vdd = true;
99ea7127 1809
4be73780 1810 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1811 return need_to_disable;
b0665d57 1812
4e6e1a54
ID
1813 power_domain = intel_display_port_power_domain(intel_encoder);
1814 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1815
3936fcf4
VS
1816 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1817 port_name(intel_dig_port->port));
bd943159 1818
4be73780
DV
1819 if (!edp_have_panel_power(intel_dp))
1820 wait_panel_power_cycle(intel_dp);
99ea7127 1821
453c5420 1822 pp = ironlake_get_pp_control(intel_dp);
5d613501 1823 pp |= EDP_FORCE_VDD;
ebf33b18 1824
bf13e81b
JN
1825 pp_stat_reg = _pp_stat_reg(intel_dp);
1826 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1827
1828 I915_WRITE(pp_ctrl_reg, pp);
1829 POSTING_READ(pp_ctrl_reg);
1830 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1831 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1832 /*
1833 * If the panel wasn't on, delay before accessing aux channel
1834 */
4be73780 1835 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1836 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1837 port_name(intel_dig_port->port));
f01eca2e 1838 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1839 }
adddaaf4
JN
1840
1841 return need_to_disable;
1842}
1843
951468f3
VS
1844/*
1845 * Must be paired with intel_edp_panel_vdd_off() or
1846 * intel_edp_panel_off().
1847 * Nested calls to these functions are not allowed since
1848 * we drop the lock. Caller must use some higher level
1849 * locking to prevent nested calls from other threads.
1850 */
b80d6c78 1851void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1852{
c695b6b6 1853 bool vdd;
adddaaf4 1854
c695b6b6
VS
1855 if (!is_edp(intel_dp))
1856 return;
1857
773538e8 1858 pps_lock(intel_dp);
c695b6b6 1859 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1860 pps_unlock(intel_dp);
c695b6b6 1861
e2c719b7 1862 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1863 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1864}
1865
4be73780 1866static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1867{
30add22d 1868 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1869 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1870 struct intel_digital_port *intel_dig_port =
1871 dp_to_dig_port(intel_dp);
1872 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1873 enum intel_display_power_domain power_domain;
5d613501 1874 u32 pp;
453c5420 1875 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1876
e39b999a 1877 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1878
15e899a0 1879 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1880
15e899a0 1881 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1882 return;
b0665d57 1883
3936fcf4
VS
1884 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1885 port_name(intel_dig_port->port));
bd943159 1886
be2c9196
VS
1887 pp = ironlake_get_pp_control(intel_dp);
1888 pp &= ~EDP_FORCE_VDD;
453c5420 1889
be2c9196
VS
1890 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1891 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1892
be2c9196
VS
1893 I915_WRITE(pp_ctrl_reg, pp);
1894 POSTING_READ(pp_ctrl_reg);
90791a5c 1895
be2c9196
VS
1896 /* Make sure sequencer is idle before allowing subsequent activity */
1897 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1898 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1899
be2c9196
VS
1900 if ((pp & POWER_TARGET_ON) == 0)
1901 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1902
be2c9196
VS
1903 power_domain = intel_display_port_power_domain(intel_encoder);
1904 intel_display_power_put(dev_priv, power_domain);
bd943159 1905}
5d613501 1906
4be73780 1907static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1908{
1909 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1910 struct intel_dp, panel_vdd_work);
bd943159 1911
773538e8 1912 pps_lock(intel_dp);
15e899a0
VS
1913 if (!intel_dp->want_panel_vdd)
1914 edp_panel_vdd_off_sync(intel_dp);
773538e8 1915 pps_unlock(intel_dp);
bd943159
KP
1916}
1917
aba86890
ID
1918static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1919{
1920 unsigned long delay;
1921
1922 /*
1923 * Queue the timer to fire a long time from now (relative to the power
1924 * down delay) to keep the panel power up across a sequence of
1925 * operations.
1926 */
1927 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1928 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1929}
1930
951468f3
VS
1931/*
1932 * Must be paired with edp_panel_vdd_on().
1933 * Must hold pps_mutex around the whole on/off sequence.
1934 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1935 */
4be73780 1936static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1937{
e39b999a
VS
1938 struct drm_i915_private *dev_priv =
1939 intel_dp_to_dev(intel_dp)->dev_private;
1940
1941 lockdep_assert_held(&dev_priv->pps_mutex);
1942
97af61f5
KP
1943 if (!is_edp(intel_dp))
1944 return;
5d613501 1945
e2c719b7 1946 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1947 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1948
bd943159
KP
1949 intel_dp->want_panel_vdd = false;
1950
aba86890 1951 if (sync)
4be73780 1952 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1953 else
1954 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1955}
1956
9f0fb5be 1957static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1958{
30add22d 1959 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1960 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1961 u32 pp;
453c5420 1962 u32 pp_ctrl_reg;
9934c132 1963
9f0fb5be
VS
1964 lockdep_assert_held(&dev_priv->pps_mutex);
1965
97af61f5 1966 if (!is_edp(intel_dp))
bd943159 1967 return;
99ea7127 1968
3936fcf4
VS
1969 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1970 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1971
e7a89ace
VS
1972 if (WARN(edp_have_panel_power(intel_dp),
1973 "eDP port %c panel power already on\n",
1974 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1975 return;
9934c132 1976
4be73780 1977 wait_panel_power_cycle(intel_dp);
37c6c9b0 1978
bf13e81b 1979 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1980 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1981 if (IS_GEN5(dev)) {
1982 /* ILK workaround: disable reset around power sequence */
1983 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1984 I915_WRITE(pp_ctrl_reg, pp);
1985 POSTING_READ(pp_ctrl_reg);
05ce1a49 1986 }
37c6c9b0 1987
1c0ae80a 1988 pp |= POWER_TARGET_ON;
99ea7127
KP
1989 if (!IS_GEN5(dev))
1990 pp |= PANEL_POWER_RESET;
1991
453c5420
JB
1992 I915_WRITE(pp_ctrl_reg, pp);
1993 POSTING_READ(pp_ctrl_reg);
9934c132 1994
4be73780 1995 wait_panel_on(intel_dp);
dce56b3c 1996 intel_dp->last_power_on = jiffies;
9934c132 1997
05ce1a49
KP
1998 if (IS_GEN5(dev)) {
1999 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2000 I915_WRITE(pp_ctrl_reg, pp);
2001 POSTING_READ(pp_ctrl_reg);
05ce1a49 2002 }
9f0fb5be 2003}
e39b999a 2004
9f0fb5be
VS
2005void intel_edp_panel_on(struct intel_dp *intel_dp)
2006{
2007 if (!is_edp(intel_dp))
2008 return;
2009
2010 pps_lock(intel_dp);
2011 edp_panel_on(intel_dp);
773538e8 2012 pps_unlock(intel_dp);
9934c132
JB
2013}
2014
9f0fb5be
VS
2015
2016static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2017{
4e6e1a54
ID
2018 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2019 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2020 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2021 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2022 enum intel_display_power_domain power_domain;
99ea7127 2023 u32 pp;
453c5420 2024 u32 pp_ctrl_reg;
9934c132 2025
9f0fb5be
VS
2026 lockdep_assert_held(&dev_priv->pps_mutex);
2027
97af61f5
KP
2028 if (!is_edp(intel_dp))
2029 return;
37c6c9b0 2030
3936fcf4
VS
2031 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2032 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2033
3936fcf4
VS
2034 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2035 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2036
453c5420 2037 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2038 /* We need to switch off panel power _and_ force vdd, for otherwise some
2039 * panels get very unhappy and cease to work. */
b3064154
PJ
2040 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2041 EDP_BLC_ENABLE);
453c5420 2042
bf13e81b 2043 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2044
849e39f5
PZ
2045 intel_dp->want_panel_vdd = false;
2046
453c5420
JB
2047 I915_WRITE(pp_ctrl_reg, pp);
2048 POSTING_READ(pp_ctrl_reg);
9934c132 2049
dce56b3c 2050 intel_dp->last_power_cycle = jiffies;
4be73780 2051 wait_panel_off(intel_dp);
849e39f5
PZ
2052
2053 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2054 power_domain = intel_display_port_power_domain(intel_encoder);
2055 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2056}
e39b999a 2057
9f0fb5be
VS
2058void intel_edp_panel_off(struct intel_dp *intel_dp)
2059{
2060 if (!is_edp(intel_dp))
2061 return;
e39b999a 2062
9f0fb5be
VS
2063 pps_lock(intel_dp);
2064 edp_panel_off(intel_dp);
773538e8 2065 pps_unlock(intel_dp);
9934c132
JB
2066}
2067
1250d107
JN
2068/* Enable backlight in the panel power control. */
2069static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2070{
da63a9f2
PZ
2071 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2072 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2073 struct drm_i915_private *dev_priv = dev->dev_private;
2074 u32 pp;
453c5420 2075 u32 pp_ctrl_reg;
32f9d658 2076
01cb9ea6
JB
2077 /*
2078 * If we enable the backlight right away following a panel power
2079 * on, we may see slight flicker as the panel syncs with the eDP
2080 * link. So delay a bit to make sure the image is solid before
2081 * allowing it to appear.
2082 */
4be73780 2083 wait_backlight_on(intel_dp);
e39b999a 2084
773538e8 2085 pps_lock(intel_dp);
e39b999a 2086
453c5420 2087 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2088 pp |= EDP_BLC_ENABLE;
453c5420 2089
bf13e81b 2090 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2091
2092 I915_WRITE(pp_ctrl_reg, pp);
2093 POSTING_READ(pp_ctrl_reg);
e39b999a 2094
773538e8 2095 pps_unlock(intel_dp);
32f9d658
ZW
2096}
2097
1250d107
JN
2098/* Enable backlight PWM and backlight PP control. */
2099void intel_edp_backlight_on(struct intel_dp *intel_dp)
2100{
2101 if (!is_edp(intel_dp))
2102 return;
2103
2104 DRM_DEBUG_KMS("\n");
2105
2106 intel_panel_enable_backlight(intel_dp->attached_connector);
2107 _intel_edp_backlight_on(intel_dp);
2108}
2109
2110/* Disable backlight in the panel power control. */
2111static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2112{
30add22d 2113 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2114 struct drm_i915_private *dev_priv = dev->dev_private;
2115 u32 pp;
453c5420 2116 u32 pp_ctrl_reg;
32f9d658 2117
f01eca2e
KP
2118 if (!is_edp(intel_dp))
2119 return;
2120
773538e8 2121 pps_lock(intel_dp);
e39b999a 2122
453c5420 2123 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2124 pp &= ~EDP_BLC_ENABLE;
453c5420 2125
bf13e81b 2126 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2127
2128 I915_WRITE(pp_ctrl_reg, pp);
2129 POSTING_READ(pp_ctrl_reg);
f7d2323c 2130
773538e8 2131 pps_unlock(intel_dp);
e39b999a
VS
2132
2133 intel_dp->last_backlight_off = jiffies;
f7d2323c 2134 edp_wait_backlight_off(intel_dp);
1250d107 2135}
f7d2323c 2136
1250d107
JN
2137/* Disable backlight PP control and backlight PWM. */
2138void intel_edp_backlight_off(struct intel_dp *intel_dp)
2139{
2140 if (!is_edp(intel_dp))
2141 return;
2142
2143 DRM_DEBUG_KMS("\n");
f7d2323c 2144
1250d107 2145 _intel_edp_backlight_off(intel_dp);
f7d2323c 2146 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2147}
a4fc5ed6 2148
73580fb7
JN
2149/*
2150 * Hook for controlling the panel power control backlight through the bl_power
2151 * sysfs attribute. Take care to handle multiple calls.
2152 */
2153static void intel_edp_backlight_power(struct intel_connector *connector,
2154 bool enable)
2155{
2156 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2157 bool is_enabled;
2158
773538e8 2159 pps_lock(intel_dp);
e39b999a 2160 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2161 pps_unlock(intel_dp);
73580fb7
JN
2162
2163 if (is_enabled == enable)
2164 return;
2165
23ba9373
JN
2166 DRM_DEBUG_KMS("panel power control backlight %s\n",
2167 enable ? "enable" : "disable");
73580fb7
JN
2168
2169 if (enable)
2170 _intel_edp_backlight_on(intel_dp);
2171 else
2172 _intel_edp_backlight_off(intel_dp);
2173}
2174
2bd2ad64 2175static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2176{
da63a9f2
PZ
2177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2178 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2179 struct drm_device *dev = crtc->dev;
d240f20f
JB
2180 struct drm_i915_private *dev_priv = dev->dev_private;
2181 u32 dpa_ctl;
2182
2bd2ad64
DV
2183 assert_pipe_disabled(dev_priv,
2184 to_intel_crtc(crtc)->pipe);
2185
d240f20f
JB
2186 DRM_DEBUG_KMS("\n");
2187 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2188 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2189 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2190
2191 /* We don't adjust intel_dp->DP while tearing down the link, to
2192 * facilitate link retraining (e.g. after hotplug). Hence clear all
2193 * enable bits here to ensure that we don't enable too much. */
2194 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2195 intel_dp->DP |= DP_PLL_ENABLE;
2196 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2197 POSTING_READ(DP_A);
2198 udelay(200);
d240f20f
JB
2199}
2200
2bd2ad64 2201static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2202{
da63a9f2
PZ
2203 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2204 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2205 struct drm_device *dev = crtc->dev;
d240f20f
JB
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2207 u32 dpa_ctl;
2208
2bd2ad64
DV
2209 assert_pipe_disabled(dev_priv,
2210 to_intel_crtc(crtc)->pipe);
2211
d240f20f 2212 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2213 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2214 "dp pll off, should be on\n");
2215 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2216
2217 /* We can't rely on the value tracked for the DP register in
2218 * intel_dp->DP because link_down must not change that (otherwise link
2219 * re-training will fail. */
298b0b39 2220 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2221 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2222 POSTING_READ(DP_A);
d240f20f
JB
2223 udelay(200);
2224}
2225
c7ad3810 2226/* If the sink supports it, try to set the power state appropriately */
c19b0669 2227void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2228{
2229 int ret, i;
2230
2231 /* Should have a valid DPCD by this point */
2232 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2233 return;
2234
2235 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2236 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2237 DP_SET_POWER_D3);
c7ad3810
JB
2238 } else {
2239 /*
2240 * When turning on, we need to retry for 1ms to give the sink
2241 * time to wake up.
2242 */
2243 for (i = 0; i < 3; i++) {
9d1a1031
JN
2244 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2245 DP_SET_POWER_D0);
c7ad3810
JB
2246 if (ret == 1)
2247 break;
2248 msleep(1);
2249 }
2250 }
f9cac721
JN
2251
2252 if (ret != 1)
2253 DRM_DEBUG_KMS("failed to %s sink power state\n",
2254 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2255}
2256
19d8fe15
DV
2257static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2258 enum pipe *pipe)
d240f20f 2259{
19d8fe15 2260 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2261 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2262 struct drm_device *dev = encoder->base.dev;
2263 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2264 enum intel_display_power_domain power_domain;
2265 u32 tmp;
2266
2267 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2268 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2269 return false;
2270
2271 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2272
2273 if (!(tmp & DP_PORT_EN))
2274 return false;
2275
39e5fa88 2276 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2277 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2278 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2279 enum pipe p;
19d8fe15 2280
adc289d7
VS
2281 for_each_pipe(dev_priv, p) {
2282 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2283 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2284 *pipe = p;
19d8fe15
DV
2285 return true;
2286 }
2287 }
19d8fe15 2288
4a0833ec
DV
2289 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2290 intel_dp->output_reg);
39e5fa88
VS
2291 } else if (IS_CHERRYVIEW(dev)) {
2292 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2293 } else {
2294 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2295 }
d240f20f 2296
19d8fe15
DV
2297 return true;
2298}
d240f20f 2299
045ac3b5 2300static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2301 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2302{
2303 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2304 u32 tmp, flags = 0;
63000ef6
XZ
2305 struct drm_device *dev = encoder->base.dev;
2306 struct drm_i915_private *dev_priv = dev->dev_private;
2307 enum port port = dp_to_dig_port(intel_dp)->port;
2308 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2309 int dotclock;
045ac3b5 2310
9ed109a7 2311 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2312
2313 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2314
39e5fa88 2315 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2316 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2317
2318 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2319 flags |= DRM_MODE_FLAG_PHSYNC;
2320 else
2321 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2322
b81e34c2 2323 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2324 flags |= DRM_MODE_FLAG_PVSYNC;
2325 else
2326 flags |= DRM_MODE_FLAG_NVSYNC;
2327 } else {
39e5fa88 2328 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2329 flags |= DRM_MODE_FLAG_PHSYNC;
2330 else
2331 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2332
39e5fa88 2333 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2334 flags |= DRM_MODE_FLAG_PVSYNC;
2335 else
2336 flags |= DRM_MODE_FLAG_NVSYNC;
2337 }
045ac3b5 2338
2d112de7 2339 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2340
8c875fca
VS
2341 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2342 tmp & DP_COLOR_RANGE_16_235)
2343 pipe_config->limited_color_range = true;
2344
eb14cb74
VS
2345 pipe_config->has_dp_encoder = true;
2346
90a6b7b0
VS
2347 pipe_config->lane_count =
2348 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2349
eb14cb74
VS
2350 intel_dp_get_m_n(crtc, pipe_config);
2351
18442d08 2352 if (port == PORT_A) {
f1f644dc
JB
2353 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2354 pipe_config->port_clock = 162000;
2355 else
2356 pipe_config->port_clock = 270000;
2357 }
18442d08
VS
2358
2359 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2360 &pipe_config->dp_m_n);
2361
2362 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2363 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2364
2d112de7 2365 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2366
c6cd2ee2
JN
2367 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2368 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2369 /*
2370 * This is a big fat ugly hack.
2371 *
2372 * Some machines in UEFI boot mode provide us a VBT that has 18
2373 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2374 * unknown we fail to light up. Yet the same BIOS boots up with
2375 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2376 * max, not what it tells us to use.
2377 *
2378 * Note: This will still be broken if the eDP panel is not lit
2379 * up by the BIOS, and thus we can't get the mode at module
2380 * load.
2381 */
2382 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2383 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2384 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2385 }
045ac3b5
JB
2386}
2387
e8cb4558 2388static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2389{
e8cb4558 2390 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2391 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2392 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2393
6e3c9717 2394 if (crtc->config->has_audio)
495a5bb8 2395 intel_audio_codec_disable(encoder);
6cb49835 2396
b32c6f48
RV
2397 if (HAS_PSR(dev) && !HAS_DDI(dev))
2398 intel_psr_disable(intel_dp);
2399
6cb49835
DV
2400 /* Make sure the panel is off before trying to change the mode. But also
2401 * ensure that we have vdd while we switch off the panel. */
24f3e092 2402 intel_edp_panel_vdd_on(intel_dp);
4be73780 2403 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2404 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2405 intel_edp_panel_off(intel_dp);
3739850b 2406
08aff3fe
VS
2407 /* disable the port before the pipe on g4x */
2408 if (INTEL_INFO(dev)->gen < 5)
3739850b 2409 intel_dp_link_down(intel_dp);
d240f20f
JB
2410}
2411
08aff3fe 2412static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2413{
2bd2ad64 2414 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2415 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2416
49277c31 2417 intel_dp_link_down(intel_dp);
08aff3fe
VS
2418 if (port == PORT_A)
2419 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2420}
2421
2422static void vlv_post_disable_dp(struct intel_encoder *encoder)
2423{
2424 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2425
2426 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2427}
2428
580d3811
VS
2429static void chv_post_disable_dp(struct intel_encoder *encoder)
2430{
2431 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2432 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2433 struct drm_device *dev = encoder->base.dev;
2434 struct drm_i915_private *dev_priv = dev->dev_private;
2435 struct intel_crtc *intel_crtc =
2436 to_intel_crtc(encoder->base.crtc);
2437 enum dpio_channel ch = vlv_dport_to_channel(dport);
2438 enum pipe pipe = intel_crtc->pipe;
2439 u32 val;
2440
2441 intel_dp_link_down(intel_dp);
2442
a580516d 2443 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2444
2445 /* Propagate soft reset to data lane reset */
97fd4d5c 2446 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2447 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2448 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2449
e0fce78f
VS
2450 if (intel_crtc->config->lane_count > 2) {
2451 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2452 val |= CHV_PCS_REQ_SOFTRESET_EN;
2453 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2454 }
97fd4d5c
VS
2455
2456 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2457 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2458 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2459
e0fce78f
VS
2460 if (intel_crtc->config->lane_count > 2) {
2461 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2462 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2463 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2464 }
580d3811 2465
a580516d 2466 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2467}
2468
7b13b58a
VS
2469static void
2470_intel_dp_set_link_train(struct intel_dp *intel_dp,
2471 uint32_t *DP,
2472 uint8_t dp_train_pat)
2473{
2474 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2475 struct drm_device *dev = intel_dig_port->base.base.dev;
2476 struct drm_i915_private *dev_priv = dev->dev_private;
2477 enum port port = intel_dig_port->port;
2478
2479 if (HAS_DDI(dev)) {
2480 uint32_t temp = I915_READ(DP_TP_CTL(port));
2481
2482 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2483 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2484 else
2485 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2486
2487 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2488 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2489 case DP_TRAINING_PATTERN_DISABLE:
2490 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2491
2492 break;
2493 case DP_TRAINING_PATTERN_1:
2494 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2495 break;
2496 case DP_TRAINING_PATTERN_2:
2497 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2498 break;
2499 case DP_TRAINING_PATTERN_3:
2500 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2501 break;
2502 }
2503 I915_WRITE(DP_TP_CTL(port), temp);
2504
39e5fa88
VS
2505 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2506 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2507 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2508
2509 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2510 case DP_TRAINING_PATTERN_DISABLE:
2511 *DP |= DP_LINK_TRAIN_OFF_CPT;
2512 break;
2513 case DP_TRAINING_PATTERN_1:
2514 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2515 break;
2516 case DP_TRAINING_PATTERN_2:
2517 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2518 break;
2519 case DP_TRAINING_PATTERN_3:
2520 DRM_ERROR("DP training pattern 3 not supported\n");
2521 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2522 break;
2523 }
2524
2525 } else {
2526 if (IS_CHERRYVIEW(dev))
2527 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2528 else
2529 *DP &= ~DP_LINK_TRAIN_MASK;
2530
2531 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2532 case DP_TRAINING_PATTERN_DISABLE:
2533 *DP |= DP_LINK_TRAIN_OFF;
2534 break;
2535 case DP_TRAINING_PATTERN_1:
2536 *DP |= DP_LINK_TRAIN_PAT_1;
2537 break;
2538 case DP_TRAINING_PATTERN_2:
2539 *DP |= DP_LINK_TRAIN_PAT_2;
2540 break;
2541 case DP_TRAINING_PATTERN_3:
2542 if (IS_CHERRYVIEW(dev)) {
2543 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2544 } else {
2545 DRM_ERROR("DP training pattern 3 not supported\n");
2546 *DP |= DP_LINK_TRAIN_PAT_2;
2547 }
2548 break;
2549 }
2550 }
2551}
2552
2553static void intel_dp_enable_port(struct intel_dp *intel_dp)
2554{
2555 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2556 struct drm_i915_private *dev_priv = dev->dev_private;
2557
7b13b58a
VS
2558 /* enable with pattern 1 (as per spec) */
2559 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2560 DP_TRAINING_PATTERN_1);
2561
2562 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2563 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2564
2565 /*
2566 * Magic for VLV/CHV. We _must_ first set up the register
2567 * without actually enabling the port, and then do another
2568 * write to enable the port. Otherwise link training will
2569 * fail when the power sequencer is freshly used for this port.
2570 */
2571 intel_dp->DP |= DP_PORT_EN;
2572
2573 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2574 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2575}
2576
e8cb4558 2577static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2578{
e8cb4558
DV
2579 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2580 struct drm_device *dev = encoder->base.dev;
2581 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2582 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2583 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2584
0c33d8d7
DV
2585 if (WARN_ON(dp_reg & DP_PORT_EN))
2586 return;
5d613501 2587
093e3f13
VS
2588 pps_lock(intel_dp);
2589
2590 if (IS_VALLEYVIEW(dev))
2591 vlv_init_panel_power_sequencer(intel_dp);
2592
7b13b58a 2593 intel_dp_enable_port(intel_dp);
093e3f13
VS
2594
2595 edp_panel_vdd_on(intel_dp);
2596 edp_panel_on(intel_dp);
2597 edp_panel_vdd_off(intel_dp, true);
2598
2599 pps_unlock(intel_dp);
2600
e0fce78f
VS
2601 if (IS_VALLEYVIEW(dev)) {
2602 unsigned int lane_mask = 0x0;
2603
2604 if (IS_CHERRYVIEW(dev))
2605 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2606
9b6de0a1
VS
2607 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2608 lane_mask);
e0fce78f 2609 }
61234fa5 2610
f01eca2e 2611 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2612 intel_dp_start_link_train(intel_dp);
33a34e4e 2613 intel_dp_complete_link_train(intel_dp);
3ab9c637 2614 intel_dp_stop_link_train(intel_dp);
c1dec79a 2615
6e3c9717 2616 if (crtc->config->has_audio) {
c1dec79a
JN
2617 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2618 pipe_name(crtc->pipe));
2619 intel_audio_codec_enable(encoder);
2620 }
ab1f90f9 2621}
89b667f8 2622
ecff4f3b
JN
2623static void g4x_enable_dp(struct intel_encoder *encoder)
2624{
828f5c6e
JN
2625 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2626
ecff4f3b 2627 intel_enable_dp(encoder);
4be73780 2628 intel_edp_backlight_on(intel_dp);
ab1f90f9 2629}
89b667f8 2630
ab1f90f9
JN
2631static void vlv_enable_dp(struct intel_encoder *encoder)
2632{
828f5c6e
JN
2633 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2634
4be73780 2635 intel_edp_backlight_on(intel_dp);
b32c6f48 2636 intel_psr_enable(intel_dp);
d240f20f
JB
2637}
2638
ecff4f3b 2639static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2640{
2641 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2642 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2643
8ac33ed3
DV
2644 intel_dp_prepare(encoder);
2645
d41f1efb
DV
2646 /* Only ilk+ has port A */
2647 if (dport->port == PORT_A) {
2648 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2649 ironlake_edp_pll_on(intel_dp);
d41f1efb 2650 }
ab1f90f9
JN
2651}
2652
83b84597
VS
2653static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2654{
2655 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2656 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2657 enum pipe pipe = intel_dp->pps_pipe;
2658 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2659
2660 edp_panel_vdd_off_sync(intel_dp);
2661
2662 /*
2663 * VLV seems to get confused when multiple power seqeuencers
2664 * have the same port selected (even if only one has power/vdd
2665 * enabled). The failure manifests as vlv_wait_port_ready() failing
2666 * CHV on the other hand doesn't seem to mind having the same port
2667 * selected in multiple power seqeuencers, but let's clear the
2668 * port select always when logically disconnecting a power sequencer
2669 * from a port.
2670 */
2671 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2672 pipe_name(pipe), port_name(intel_dig_port->port));
2673 I915_WRITE(pp_on_reg, 0);
2674 POSTING_READ(pp_on_reg);
2675
2676 intel_dp->pps_pipe = INVALID_PIPE;
2677}
2678
a4a5d2f8
VS
2679static void vlv_steal_power_sequencer(struct drm_device *dev,
2680 enum pipe pipe)
2681{
2682 struct drm_i915_private *dev_priv = dev->dev_private;
2683 struct intel_encoder *encoder;
2684
2685 lockdep_assert_held(&dev_priv->pps_mutex);
2686
ac3c12e4
VS
2687 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2688 return;
2689
a4a5d2f8
VS
2690 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2691 base.head) {
2692 struct intel_dp *intel_dp;
773538e8 2693 enum port port;
a4a5d2f8
VS
2694
2695 if (encoder->type != INTEL_OUTPUT_EDP)
2696 continue;
2697
2698 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2699 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2700
2701 if (intel_dp->pps_pipe != pipe)
2702 continue;
2703
2704 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2705 pipe_name(pipe), port_name(port));
a4a5d2f8 2706
e02f9a06 2707 WARN(encoder->base.crtc,
034e43c6
VS
2708 "stealing pipe %c power sequencer from active eDP port %c\n",
2709 pipe_name(pipe), port_name(port));
a4a5d2f8 2710
a4a5d2f8 2711 /* make sure vdd is off before we steal it */
83b84597 2712 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2713 }
2714}
2715
2716static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2717{
2718 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2719 struct intel_encoder *encoder = &intel_dig_port->base;
2720 struct drm_device *dev = encoder->base.dev;
2721 struct drm_i915_private *dev_priv = dev->dev_private;
2722 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2723
2724 lockdep_assert_held(&dev_priv->pps_mutex);
2725
093e3f13
VS
2726 if (!is_edp(intel_dp))
2727 return;
2728
a4a5d2f8
VS
2729 if (intel_dp->pps_pipe == crtc->pipe)
2730 return;
2731
2732 /*
2733 * If another power sequencer was being used on this
2734 * port previously make sure to turn off vdd there while
2735 * we still have control of it.
2736 */
2737 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2738 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2739
2740 /*
2741 * We may be stealing the power
2742 * sequencer from another port.
2743 */
2744 vlv_steal_power_sequencer(dev, crtc->pipe);
2745
2746 /* now it's all ours */
2747 intel_dp->pps_pipe = crtc->pipe;
2748
2749 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2750 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2751
2752 /* init power sequencer on this pipe and port */
36b5f425
VS
2753 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2754 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2755}
2756
ab1f90f9 2757static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2758{
2bd2ad64 2759 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2760 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2761 struct drm_device *dev = encoder->base.dev;
89b667f8 2762 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2763 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2764 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2765 int pipe = intel_crtc->pipe;
2766 u32 val;
a4fc5ed6 2767
a580516d 2768 mutex_lock(&dev_priv->sb_lock);
89b667f8 2769
ab3c759a 2770 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2771 val = 0;
2772 if (pipe)
2773 val |= (1<<21);
2774 else
2775 val &= ~(1<<21);
2776 val |= 0x001000c4;
ab3c759a
CML
2777 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2778 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2779 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2780
a580516d 2781 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2782
2783 intel_enable_dp(encoder);
89b667f8
JB
2784}
2785
ecff4f3b 2786static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2787{
2788 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2789 struct drm_device *dev = encoder->base.dev;
2790 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2791 struct intel_crtc *intel_crtc =
2792 to_intel_crtc(encoder->base.crtc);
e4607fcf 2793 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2794 int pipe = intel_crtc->pipe;
89b667f8 2795
8ac33ed3
DV
2796 intel_dp_prepare(encoder);
2797
89b667f8 2798 /* Program Tx lane resets to default */
a580516d 2799 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2800 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2801 DPIO_PCS_TX_LANE2_RESET |
2802 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2804 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2805 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2806 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2807 DPIO_PCS_CLK_SOFT_RESET);
2808
2809 /* Fix up inter-pair skew failure */
ab3c759a
CML
2810 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2811 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2812 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2813 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2814}
2815
e4a1d846
CML
2816static void chv_pre_enable_dp(struct intel_encoder *encoder)
2817{
2818 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2819 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2820 struct drm_device *dev = encoder->base.dev;
2821 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2822 struct intel_crtc *intel_crtc =
2823 to_intel_crtc(encoder->base.crtc);
2824 enum dpio_channel ch = vlv_dport_to_channel(dport);
2825 int pipe = intel_crtc->pipe;
2e523e98 2826 int data, i, stagger;
949c1d43 2827 u32 val;
e4a1d846 2828
a580516d 2829 mutex_lock(&dev_priv->sb_lock);
949c1d43 2830
570e2a74
VS
2831 /* allow hardware to manage TX FIFO reset source */
2832 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2833 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2834 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2835
e0fce78f
VS
2836 if (intel_crtc->config->lane_count > 2) {
2837 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2838 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2839 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2840 }
570e2a74 2841
949c1d43 2842 /* Deassert soft data lane reset*/
97fd4d5c 2843 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2844 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2845 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2846
e0fce78f
VS
2847 if (intel_crtc->config->lane_count > 2) {
2848 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2849 val |= CHV_PCS_REQ_SOFTRESET_EN;
2850 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2851 }
97fd4d5c
VS
2852
2853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2854 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2856
e0fce78f
VS
2857 if (intel_crtc->config->lane_count > 2) {
2858 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2859 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2860 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2861 }
949c1d43
VS
2862
2863 /* Program Tx lane latency optimal setting*/
e0fce78f 2864 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 2865 /* Set the upar bit */
e0fce78f
VS
2866 if (intel_crtc->config->lane_count == 1)
2867 data = 0x0;
2868 else
2869 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
2870 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2871 data << DPIO_UPAR_SHIFT);
2872 }
2873
2874 /* Data lane stagger programming */
2e523e98
VS
2875 if (intel_crtc->config->port_clock > 270000)
2876 stagger = 0x18;
2877 else if (intel_crtc->config->port_clock > 135000)
2878 stagger = 0xd;
2879 else if (intel_crtc->config->port_clock > 67500)
2880 stagger = 0x7;
2881 else if (intel_crtc->config->port_clock > 33750)
2882 stagger = 0x4;
2883 else
2884 stagger = 0x2;
2885
2886 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2887 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2888 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2889
e0fce78f
VS
2890 if (intel_crtc->config->lane_count > 2) {
2891 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2892 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2893 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2894 }
2e523e98
VS
2895
2896 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2897 DPIO_LANESTAGGER_STRAP(stagger) |
2898 DPIO_LANESTAGGER_STRAP_OVRD |
2899 DPIO_TX1_STAGGER_MASK(0x1f) |
2900 DPIO_TX1_STAGGER_MULT(6) |
2901 DPIO_TX2_STAGGER_MULT(0));
2902
e0fce78f
VS
2903 if (intel_crtc->config->lane_count > 2) {
2904 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2905 DPIO_LANESTAGGER_STRAP(stagger) |
2906 DPIO_LANESTAGGER_STRAP_OVRD |
2907 DPIO_TX1_STAGGER_MASK(0x1f) |
2908 DPIO_TX1_STAGGER_MULT(7) |
2909 DPIO_TX2_STAGGER_MULT(5));
2910 }
e4a1d846 2911
a580516d 2912 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2913
e4a1d846 2914 intel_enable_dp(encoder);
b0b33846
VS
2915
2916 /* Second common lane will stay alive on its own now */
2917 if (dport->release_cl2_override) {
2918 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2919 dport->release_cl2_override = false;
2920 }
e4a1d846
CML
2921}
2922
9197c88b
VS
2923static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2924{
2925 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2926 struct drm_device *dev = encoder->base.dev;
2927 struct drm_i915_private *dev_priv = dev->dev_private;
2928 struct intel_crtc *intel_crtc =
2929 to_intel_crtc(encoder->base.crtc);
2930 enum dpio_channel ch = vlv_dport_to_channel(dport);
2931 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
2932 unsigned int lane_mask =
2933 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
2934 u32 val;
2935
625695f8
VS
2936 intel_dp_prepare(encoder);
2937
b0b33846
VS
2938 /*
2939 * Must trick the second common lane into life.
2940 * Otherwise we can't even access the PLL.
2941 */
2942 if (ch == DPIO_CH0 && pipe == PIPE_B)
2943 dport->release_cl2_override =
2944 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2945
e0fce78f
VS
2946 chv_phy_powergate_lanes(encoder, true, lane_mask);
2947
a580516d 2948 mutex_lock(&dev_priv->sb_lock);
9197c88b 2949
b9e5ac3c
VS
2950 /* program left/right clock distribution */
2951 if (pipe != PIPE_B) {
2952 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2953 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2954 if (ch == DPIO_CH0)
2955 val |= CHV_BUFLEFTENA1_FORCE;
2956 if (ch == DPIO_CH1)
2957 val |= CHV_BUFRIGHTENA1_FORCE;
2958 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2959 } else {
2960 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2961 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2962 if (ch == DPIO_CH0)
2963 val |= CHV_BUFLEFTENA2_FORCE;
2964 if (ch == DPIO_CH1)
2965 val |= CHV_BUFRIGHTENA2_FORCE;
2966 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2967 }
2968
9197c88b
VS
2969 /* program clock channel usage */
2970 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2971 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2972 if (pipe != PIPE_B)
2973 val &= ~CHV_PCS_USEDCLKCHANNEL;
2974 else
2975 val |= CHV_PCS_USEDCLKCHANNEL;
2976 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2977
e0fce78f
VS
2978 if (intel_crtc->config->lane_count > 2) {
2979 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2980 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2981 if (pipe != PIPE_B)
2982 val &= ~CHV_PCS_USEDCLKCHANNEL;
2983 else
2984 val |= CHV_PCS_USEDCLKCHANNEL;
2985 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2986 }
9197c88b
VS
2987
2988 /*
2989 * This a a bit weird since generally CL
2990 * matches the pipe, but here we need to
2991 * pick the CL based on the port.
2992 */
2993 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2994 if (pipe != PIPE_B)
2995 val &= ~CHV_CMN_USEDCLKCHANNEL;
2996 else
2997 val |= CHV_CMN_USEDCLKCHANNEL;
2998 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2999
a580516d 3000 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3001}
3002
d6db995f
VS
3003static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3004{
3005 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3006 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3007 u32 val;
3008
3009 mutex_lock(&dev_priv->sb_lock);
3010
3011 /* disable left/right clock distribution */
3012 if (pipe != PIPE_B) {
3013 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3014 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3015 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3016 } else {
3017 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3018 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3019 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3020 }
3021
3022 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3023
b0b33846
VS
3024 /*
3025 * Leave the power down bit cleared for at least one
3026 * lane so that chv_powergate_phy_ch() will power
3027 * on something when the channel is otherwise unused.
3028 * When the port is off and the override is removed
3029 * the lanes power down anyway, so otherwise it doesn't
3030 * really matter what the state of power down bits is
3031 * after this.
3032 */
e0fce78f 3033 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3034}
3035
a4fc5ed6 3036/*
df0c237d
JB
3037 * Native read with retry for link status and receiver capability reads for
3038 * cases where the sink may still be asleep.
9d1a1031
JN
3039 *
3040 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3041 * supposed to retry 3 times per the spec.
a4fc5ed6 3042 */
9d1a1031
JN
3043static ssize_t
3044intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3045 void *buffer, size_t size)
a4fc5ed6 3046{
9d1a1031
JN
3047 ssize_t ret;
3048 int i;
61da5fab 3049
f6a19066
VS
3050 /*
3051 * Sometime we just get the same incorrect byte repeated
3052 * over the entire buffer. Doing just one throw away read
3053 * initially seems to "solve" it.
3054 */
3055 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3056
61da5fab 3057 for (i = 0; i < 3; i++) {
9d1a1031
JN
3058 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3059 if (ret == size)
3060 return ret;
61da5fab
JB
3061 msleep(1);
3062 }
a4fc5ed6 3063
9d1a1031 3064 return ret;
a4fc5ed6
KP
3065}
3066
3067/*
3068 * Fetch AUX CH registers 0x202 - 0x207 which contain
3069 * link status information
3070 */
3071static bool
93f62dad 3072intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3073{
9d1a1031
JN
3074 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3075 DP_LANE0_1_STATUS,
3076 link_status,
3077 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3078}
3079
1100244e 3080/* These are source-specific values. */
a4fc5ed6 3081static uint8_t
1a2eb460 3082intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3083{
30add22d 3084 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3085 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3086 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3087
9314726b
VK
3088 if (IS_BROXTON(dev))
3089 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3090 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3091 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3092 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3093 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 3094 } else if (IS_VALLEYVIEW(dev))
bd60018a 3095 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3096 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3097 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3098 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3099 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3100 else
bd60018a 3101 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3102}
3103
3104static uint8_t
3105intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3106{
30add22d 3107 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3108 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3109
5a9d1f1a
DL
3110 if (INTEL_INFO(dev)->gen >= 9) {
3111 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3112 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3113 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3115 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3116 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3117 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3119 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3120 default:
3121 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3122 }
3123 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3124 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3125 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3126 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3128 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3130 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3131 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3132 default:
bd60018a 3133 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3134 }
e2fa6fba
P
3135 } else if (IS_VALLEYVIEW(dev)) {
3136 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3137 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3138 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3144 default:
bd60018a 3145 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3146 }
bc7d38a4 3147 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3148 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3149 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3150 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3151 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3153 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3154 default:
bd60018a 3155 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3156 }
3157 } else {
3158 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3159 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3160 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3161 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3162 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3164 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3165 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3166 default:
bd60018a 3167 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3168 }
a4fc5ed6
KP
3169 }
3170}
3171
5829975c 3172static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3173{
3174 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3175 struct drm_i915_private *dev_priv = dev->dev_private;
3176 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3177 struct intel_crtc *intel_crtc =
3178 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3179 unsigned long demph_reg_value, preemph_reg_value,
3180 uniqtranscale_reg_value;
3181 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3182 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3183 int pipe = intel_crtc->pipe;
e2fa6fba
P
3184
3185 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3186 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3187 preemph_reg_value = 0x0004000;
3188 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3189 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3190 demph_reg_value = 0x2B405555;
3191 uniqtranscale_reg_value = 0x552AB83A;
3192 break;
bd60018a 3193 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3194 demph_reg_value = 0x2B404040;
3195 uniqtranscale_reg_value = 0x5548B83A;
3196 break;
bd60018a 3197 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3198 demph_reg_value = 0x2B245555;
3199 uniqtranscale_reg_value = 0x5560B83A;
3200 break;
bd60018a 3201 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3202 demph_reg_value = 0x2B405555;
3203 uniqtranscale_reg_value = 0x5598DA3A;
3204 break;
3205 default:
3206 return 0;
3207 }
3208 break;
bd60018a 3209 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3210 preemph_reg_value = 0x0002000;
3211 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3212 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3213 demph_reg_value = 0x2B404040;
3214 uniqtranscale_reg_value = 0x5552B83A;
3215 break;
bd60018a 3216 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3217 demph_reg_value = 0x2B404848;
3218 uniqtranscale_reg_value = 0x5580B83A;
3219 break;
bd60018a 3220 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3221 demph_reg_value = 0x2B404040;
3222 uniqtranscale_reg_value = 0x55ADDA3A;
3223 break;
3224 default:
3225 return 0;
3226 }
3227 break;
bd60018a 3228 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3229 preemph_reg_value = 0x0000000;
3230 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3232 demph_reg_value = 0x2B305555;
3233 uniqtranscale_reg_value = 0x5570B83A;
3234 break;
bd60018a 3235 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3236 demph_reg_value = 0x2B2B4040;
3237 uniqtranscale_reg_value = 0x55ADDA3A;
3238 break;
3239 default:
3240 return 0;
3241 }
3242 break;
bd60018a 3243 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3244 preemph_reg_value = 0x0006000;
3245 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3247 demph_reg_value = 0x1B405555;
3248 uniqtranscale_reg_value = 0x55ADDA3A;
3249 break;
3250 default:
3251 return 0;
3252 }
3253 break;
3254 default:
3255 return 0;
3256 }
3257
a580516d 3258 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3259 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3260 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3261 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3262 uniqtranscale_reg_value);
ab3c759a
CML
3263 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3264 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3265 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3266 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3267 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3268
3269 return 0;
3270}
3271
67fa24b4
VS
3272static bool chv_need_uniq_trans_scale(uint8_t train_set)
3273{
3274 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3275 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3276}
3277
5829975c 3278static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3279{
3280 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3281 struct drm_i915_private *dev_priv = dev->dev_private;
3282 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3283 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3284 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3285 uint8_t train_set = intel_dp->train_set[0];
3286 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3287 enum pipe pipe = intel_crtc->pipe;
3288 int i;
e4a1d846
CML
3289
3290 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3291 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3292 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3294 deemph_reg_value = 128;
3295 margin_reg_value = 52;
3296 break;
bd60018a 3297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3298 deemph_reg_value = 128;
3299 margin_reg_value = 77;
3300 break;
bd60018a 3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3302 deemph_reg_value = 128;
3303 margin_reg_value = 102;
3304 break;
bd60018a 3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3306 deemph_reg_value = 128;
3307 margin_reg_value = 154;
3308 /* FIXME extra to set for 1200 */
3309 break;
3310 default:
3311 return 0;
3312 }
3313 break;
bd60018a 3314 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3315 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3317 deemph_reg_value = 85;
3318 margin_reg_value = 78;
3319 break;
bd60018a 3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3321 deemph_reg_value = 85;
3322 margin_reg_value = 116;
3323 break;
bd60018a 3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3325 deemph_reg_value = 85;
3326 margin_reg_value = 154;
3327 break;
3328 default:
3329 return 0;
3330 }
3331 break;
bd60018a 3332 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3333 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3335 deemph_reg_value = 64;
3336 margin_reg_value = 104;
3337 break;
bd60018a 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3339 deemph_reg_value = 64;
3340 margin_reg_value = 154;
3341 break;
3342 default:
3343 return 0;
3344 }
3345 break;
bd60018a 3346 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3347 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3348 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3349 deemph_reg_value = 43;
3350 margin_reg_value = 154;
3351 break;
3352 default:
3353 return 0;
3354 }
3355 break;
3356 default:
3357 return 0;
3358 }
3359
a580516d 3360 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3361
3362 /* Clear calc init */
1966e59e
VS
3363 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3364 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3365 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3366 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3367 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3368
e0fce78f
VS
3369 if (intel_crtc->config->lane_count > 2) {
3370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3371 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3372 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3373 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3374 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3375 }
e4a1d846 3376
a02ef3c7
VS
3377 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3378 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3379 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3380 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3381
e0fce78f
VS
3382 if (intel_crtc->config->lane_count > 2) {
3383 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3384 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3385 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3386 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3387 }
a02ef3c7 3388
e4a1d846 3389 /* Program swing deemph */
e0fce78f 3390 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3391 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3392 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3393 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3394 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3395 }
e4a1d846
CML
3396
3397 /* Program swing margin */
e0fce78f 3398 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3399 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3400
1fb44505
VS
3401 val &= ~DPIO_SWING_MARGIN000_MASK;
3402 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3403
3404 /*
3405 * Supposedly this value shouldn't matter when unique transition
3406 * scale is disabled, but in fact it does matter. Let's just
3407 * always program the same value and hope it's OK.
3408 */
3409 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3410 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3411
f72df8db
VS
3412 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3413 }
e4a1d846 3414
67fa24b4
VS
3415 /*
3416 * The document said it needs to set bit 27 for ch0 and bit 26
3417 * for ch1. Might be a typo in the doc.
3418 * For now, for this unique transition scale selection, set bit
3419 * 27 for ch0 and ch1.
3420 */
e0fce78f 3421 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3422 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3423 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3424 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3425 else
3426 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3427 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3428 }
3429
3430 /* Start swing calculation */
1966e59e
VS
3431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3432 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3434
e0fce78f
VS
3435 if (intel_crtc->config->lane_count > 2) {
3436 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3437 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3438 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3439 }
e4a1d846
CML
3440
3441 /* LRC Bypass */
3442 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3443 val |= DPIO_LRC_BYPASS;
3444 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3445
a580516d 3446 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3447
3448 return 0;
3449}
3450
a4fc5ed6 3451static void
0301b3ac
JN
3452intel_get_adjust_train(struct intel_dp *intel_dp,
3453 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3454{
3455 uint8_t v = 0;
3456 uint8_t p = 0;
3457 int lane;
1a2eb460
KP
3458 uint8_t voltage_max;
3459 uint8_t preemph_max;
a4fc5ed6 3460
901c2daf 3461 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3462 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3463 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3464
3465 if (this_v > v)
3466 v = this_v;
3467 if (this_p > p)
3468 p = this_p;
3469 }
3470
1a2eb460 3471 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3472 if (v >= voltage_max)
3473 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3474
1a2eb460
KP
3475 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3476 if (p >= preemph_max)
3477 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3478
3479 for (lane = 0; lane < 4; lane++)
33a34e4e 3480 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3481}
3482
3483static uint32_t
5829975c 3484gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3485{
3cf2efb1 3486 uint32_t signal_levels = 0;
a4fc5ed6 3487
3cf2efb1 3488 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3490 default:
3491 signal_levels |= DP_VOLTAGE_0_4;
3492 break;
bd60018a 3493 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3494 signal_levels |= DP_VOLTAGE_0_6;
3495 break;
bd60018a 3496 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3497 signal_levels |= DP_VOLTAGE_0_8;
3498 break;
bd60018a 3499 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3500 signal_levels |= DP_VOLTAGE_1_2;
3501 break;
3502 }
3cf2efb1 3503 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3504 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3505 default:
3506 signal_levels |= DP_PRE_EMPHASIS_0;
3507 break;
bd60018a 3508 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3509 signal_levels |= DP_PRE_EMPHASIS_3_5;
3510 break;
bd60018a 3511 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3512 signal_levels |= DP_PRE_EMPHASIS_6;
3513 break;
bd60018a 3514 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3515 signal_levels |= DP_PRE_EMPHASIS_9_5;
3516 break;
3517 }
3518 return signal_levels;
3519}
3520
e3421a18
ZW
3521/* Gen6's DP voltage swing and pre-emphasis control */
3522static uint32_t
5829975c 3523gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3524{
3c5a62b5
YL
3525 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3526 DP_TRAIN_PRE_EMPHASIS_MASK);
3527 switch (signal_levels) {
bd60018a
SJ
3528 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3529 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3530 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3531 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3532 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3533 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3534 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3535 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3536 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3537 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3538 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3539 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3540 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3541 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3542 default:
3c5a62b5
YL
3543 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3544 "0x%x\n", signal_levels);
3545 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3546 }
3547}
3548
1a2eb460
KP
3549/* Gen7's DP voltage swing and pre-emphasis control */
3550static uint32_t
5829975c 3551gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3552{
3553 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3554 DP_TRAIN_PRE_EMPHASIS_MASK);
3555 switch (signal_levels) {
bd60018a 3556 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3557 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3558 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3559 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3560 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3561 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3562
bd60018a 3563 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3564 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3565 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3566 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3567
bd60018a 3568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3569 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3570 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3571 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3572
3573 default:
3574 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3575 "0x%x\n", signal_levels);
3576 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3577 }
3578}
3579
f0a3424e
PZ
3580/* Properly updates "DP" with the correct signal levels. */
3581static void
3582intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3583{
3584 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3585 enum port port = intel_dig_port->port;
f0a3424e 3586 struct drm_device *dev = intel_dig_port->base.base.dev;
f8896f5d 3587 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3588 uint8_t train_set = intel_dp->train_set[0];
3589
f8896f5d
DW
3590 if (HAS_DDI(dev)) {
3591 signal_levels = ddi_signal_levels(intel_dp);
3592
3593 if (IS_BROXTON(dev))
3594 signal_levels = 0;
3595 else
3596 mask = DDI_BUF_EMP_MASK;
e4a1d846 3597 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3598 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3599 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3600 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3601 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3602 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3603 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3604 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3605 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3606 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3607 } else {
5829975c 3608 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3609 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3610 }
3611
96fb9f9b
VK
3612 if (mask)
3613 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3614
3615 DRM_DEBUG_KMS("Using vswing level %d\n",
3616 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3617 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3618 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3619 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3620
3621 *DP = (*DP & ~mask) | signal_levels;
3622}
3623
a4fc5ed6 3624static bool
ea5b213a 3625intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3626 uint32_t *DP,
58e10eb9 3627 uint8_t dp_train_pat)
a4fc5ed6 3628{
174edf1f 3629 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3630 struct drm_i915_private *dev_priv =
3631 to_i915(intel_dig_port->base.base.dev);
2cdfe6c8
JN
3632 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3633 int ret, len;
a4fc5ed6 3634
7b13b58a 3635 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3636
70aff66c 3637 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3638 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3639
2cdfe6c8
JN
3640 buf[0] = dp_train_pat;
3641 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3642 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3643 /* don't write DP_TRAINING_LANEx_SET on disable */
3644 len = 1;
3645 } else {
3646 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
901c2daf
VS
3647 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3648 len = intel_dp->lane_count + 1;
47ea7542 3649 }
a4fc5ed6 3650
9d1a1031
JN
3651 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3652 buf, len);
2cdfe6c8
JN
3653
3654 return ret == len;
a4fc5ed6
KP
3655}
3656
70aff66c
JN
3657static bool
3658intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3659 uint8_t dp_train_pat)
3660{
4e96c977
MK
3661 if (!intel_dp->train_set_valid)
3662 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3663 intel_dp_set_signal_levels(intel_dp, DP);
3664 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3665}
3666
3667static bool
3668intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3669 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3670{
3671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3672 struct drm_i915_private *dev_priv =
3673 to_i915(intel_dig_port->base.base.dev);
70aff66c
JN
3674 int ret;
3675
3676 intel_get_adjust_train(intel_dp, link_status);
3677 intel_dp_set_signal_levels(intel_dp, DP);
3678
3679 I915_WRITE(intel_dp->output_reg, *DP);
3680 POSTING_READ(intel_dp->output_reg);
3681
9d1a1031 3682 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
901c2daf 3683 intel_dp->train_set, intel_dp->lane_count);
70aff66c 3684
901c2daf 3685 return ret == intel_dp->lane_count;
70aff66c
JN
3686}
3687
3ab9c637
ID
3688static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3689{
3690 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3691 struct drm_device *dev = intel_dig_port->base.base.dev;
3692 struct drm_i915_private *dev_priv = dev->dev_private;
3693 enum port port = intel_dig_port->port;
3694 uint32_t val;
3695
3696 if (!HAS_DDI(dev))
3697 return;
3698
3699 val = I915_READ(DP_TP_CTL(port));
3700 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3701 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3702 I915_WRITE(DP_TP_CTL(port), val);
3703
3704 /*
3705 * On PORT_A we can have only eDP in SST mode. There the only reason
3706 * we need to set idle transmission mode is to work around a HW issue
3707 * where we enable the pipe while not in idle link-training mode.
3708 * In this case there is requirement to wait for a minimum number of
3709 * idle patterns to be sent.
3710 */
3711 if (port == PORT_A)
3712 return;
3713
3714 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3715 1))
3716 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3717}
3718
33a34e4e 3719/* Enable corresponding port and start training pattern 1 */
c19b0669 3720void
33a34e4e 3721intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3722{
da63a9f2 3723 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3724 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3725 int i;
3726 uint8_t voltage;
cdb0e95b 3727 int voltage_tries, loop_tries;
ea5b213a 3728 uint32_t DP = intel_dp->DP;
6aba5b6c 3729 uint8_t link_config[2];
04a60f9f 3730 uint8_t link_bw, rate_select;
a4fc5ed6 3731
affa9354 3732 if (HAS_DDI(dev))
c19b0669
PZ
3733 intel_ddi_prepare_link_retrain(encoder);
3734
901c2daf 3735 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
04a60f9f
VS
3736 &link_bw, &rate_select);
3737
3cf2efb1 3738 /* Write the link configuration data */
04a60f9f 3739 link_config[0] = link_bw;
901c2daf 3740 link_config[1] = intel_dp->lane_count;
6aba5b6c
JN
3741 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3742 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3743 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3744 if (intel_dp->num_sink_rates)
a8f3ef61 3745 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
04a60f9f 3746 &rate_select, 1);
6aba5b6c
JN
3747
3748 link_config[0] = 0;
3749 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3750 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3751
3752 DP |= DP_PORT_EN;
1a2eb460 3753
70aff66c
JN
3754 /* clock recovery */
3755 if (!intel_dp_reset_link_train(intel_dp, &DP,
3756 DP_TRAINING_PATTERN_1 |
3757 DP_LINK_SCRAMBLING_DISABLE)) {
3758 DRM_ERROR("failed to enable link training\n");
3759 return;
3760 }
3761
a4fc5ed6 3762 voltage = 0xff;
cdb0e95b
KP
3763 voltage_tries = 0;
3764 loop_tries = 0;
a4fc5ed6 3765 for (;;) {
70aff66c 3766 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3767
a7c9655f 3768 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3769 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3770 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3771 break;
93f62dad 3772 }
a4fc5ed6 3773
901c2daf 3774 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3775 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3776 break;
3777 }
3778
4e96c977
MK
3779 /*
3780 * if we used previously trained voltage and pre-emphasis values
3781 * and we don't get clock recovery, reset link training values
3782 */
3783 if (intel_dp->train_set_valid) {
3784 DRM_DEBUG_KMS("clock recovery not ok, reset");
3785 /* clear the flag as we are not reusing train set */
3786 intel_dp->train_set_valid = false;
3787 if (!intel_dp_reset_link_train(intel_dp, &DP,
3788 DP_TRAINING_PATTERN_1 |
3789 DP_LINK_SCRAMBLING_DISABLE)) {
3790 DRM_ERROR("failed to enable link training\n");
3791 return;
3792 }
3793 continue;
3794 }
3795
3cf2efb1 3796 /* Check to see if we've tried the max voltage */
901c2daf 3797 for (i = 0; i < intel_dp->lane_count; i++)
3cf2efb1 3798 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3799 break;
901c2daf 3800 if (i == intel_dp->lane_count) {
b06fbda3
DV
3801 ++loop_tries;
3802 if (loop_tries == 5) {
3def84b3 3803 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3804 break;
3805 }
70aff66c
JN
3806 intel_dp_reset_link_train(intel_dp, &DP,
3807 DP_TRAINING_PATTERN_1 |
3808 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3809 voltage_tries = 0;
3810 continue;
3811 }
a4fc5ed6 3812
3cf2efb1 3813 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3814 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3815 ++voltage_tries;
b06fbda3 3816 if (voltage_tries == 5) {
3def84b3 3817 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3818 break;
3819 }
3820 } else
3821 voltage_tries = 0;
3822 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3823
70aff66c
JN
3824 /* Update training set as requested by target */
3825 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3826 DRM_ERROR("failed to update link training\n");
3827 break;
3828 }
a4fc5ed6
KP
3829 }
3830
33a34e4e
JB
3831 intel_dp->DP = DP;
3832}
3833
c19b0669 3834void
33a34e4e
JB
3835intel_dp_complete_link_train(struct intel_dp *intel_dp)
3836{
33a34e4e 3837 bool channel_eq = false;
37f80975 3838 int tries, cr_tries;
33a34e4e 3839 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3840 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3841
a79b8165 3842 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
901c2daf 3843 if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
06ea66b6 3844 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3845
a4fc5ed6 3846 /* channel equalization */
70aff66c 3847 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3848 training_pattern |
70aff66c
JN
3849 DP_LINK_SCRAMBLING_DISABLE)) {
3850 DRM_ERROR("failed to start channel equalization\n");
3851 return;
3852 }
3853
a4fc5ed6 3854 tries = 0;
37f80975 3855 cr_tries = 0;
a4fc5ed6
KP
3856 channel_eq = false;
3857 for (;;) {
70aff66c 3858 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3859
37f80975
JB
3860 if (cr_tries > 5) {
3861 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3862 break;
3863 }
3864
a7c9655f 3865 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3866 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3867 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3868 break;
70aff66c 3869 }
a4fc5ed6 3870
37f80975 3871 /* Make sure clock is still ok */
90a6b7b0 3872 if (!drm_dp_clock_recovery_ok(link_status,
901c2daf 3873 intel_dp->lane_count)) {
4e96c977 3874 intel_dp->train_set_valid = false;
37f80975 3875 intel_dp_start_link_train(intel_dp);
70aff66c 3876 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3877 training_pattern |
70aff66c 3878 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3879 cr_tries++;
3880 continue;
3881 }
3882
90a6b7b0 3883 if (drm_dp_channel_eq_ok(link_status,
901c2daf 3884 intel_dp->lane_count)) {
3cf2efb1
CW
3885 channel_eq = true;
3886 break;
3887 }
a4fc5ed6 3888
37f80975
JB
3889 /* Try 5 times, then try clock recovery if that fails */
3890 if (tries > 5) {
4e96c977 3891 intel_dp->train_set_valid = false;
37f80975 3892 intel_dp_start_link_train(intel_dp);
70aff66c 3893 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3894 training_pattern |
70aff66c 3895 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3896 tries = 0;
3897 cr_tries++;
3898 continue;
3899 }
a4fc5ed6 3900
70aff66c
JN
3901 /* Update training set as requested by target */
3902 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3903 DRM_ERROR("failed to update link training\n");
3904 break;
3905 }
3cf2efb1 3906 ++tries;
869184a6 3907 }
3cf2efb1 3908
3ab9c637
ID
3909 intel_dp_set_idle_link_train(intel_dp);
3910
3911 intel_dp->DP = DP;
3912
4e96c977 3913 if (channel_eq) {
5fa836a9 3914 intel_dp->train_set_valid = true;
07f42258 3915 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3916 }
3ab9c637
ID
3917}
3918
3919void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3920{
70aff66c 3921 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3922 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3923}
3924
3925static void
ea5b213a 3926intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3927{
da63a9f2 3928 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3929 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3930 enum port port = intel_dig_port->port;
da63a9f2 3931 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3932 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3933 uint32_t DP = intel_dp->DP;
a4fc5ed6 3934
bc76e320 3935 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3936 return;
3937
0c33d8d7 3938 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3939 return;
3940
28c97730 3941 DRM_DEBUG_KMS("\n");
32f9d658 3942
39e5fa88
VS
3943 if ((IS_GEN7(dev) && port == PORT_A) ||
3944 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3945 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3946 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3947 } else {
aad3d14d
VS
3948 if (IS_CHERRYVIEW(dev))
3949 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3950 else
3951 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3952 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3953 }
1612c8bd 3954 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3955 POSTING_READ(intel_dp->output_reg);
5eb08b69 3956
1612c8bd
VS
3957 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3958 I915_WRITE(intel_dp->output_reg, DP);
3959 POSTING_READ(intel_dp->output_reg);
3960
3961 /*
3962 * HW workaround for IBX, we need to move the port
3963 * to transcoder A after disabling it to allow the
3964 * matching HDMI port to be enabled on transcoder A.
3965 */
3966 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3967 /* always enable with pattern 1 (as per spec) */
3968 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3969 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3970 I915_WRITE(intel_dp->output_reg, DP);
3971 POSTING_READ(intel_dp->output_reg);
3972
3973 DP &= ~DP_PORT_EN;
5bddd17f 3974 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3975 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3976 }
3977
f01eca2e 3978 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3979}
3980
26d61aad
KP
3981static bool
3982intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3983{
a031d709
RV
3984 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3985 struct drm_device *dev = dig_port->base.base.dev;
3986 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3987 uint8_t rev;
a031d709 3988
9d1a1031
JN
3989 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3990 sizeof(intel_dp->dpcd)) < 0)
edb39244 3991 return false; /* aux transfer failed */
92fd8fd1 3992
a8e98153 3993 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3994
edb39244
AJ
3995 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3996 return false; /* DPCD not present */
3997
2293bb5c
SK
3998 /* Check if the panel supports PSR */
3999 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 4000 if (is_edp(intel_dp)) {
9d1a1031
JN
4001 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4002 intel_dp->psr_dpcd,
4003 sizeof(intel_dp->psr_dpcd));
a031d709
RV
4004 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4005 dev_priv->psr.sink_support = true;
50003939 4006 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 4007 }
474d1ec4
SJ
4008
4009 if (INTEL_INFO(dev)->gen >= 9 &&
4010 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4011 uint8_t frame_sync_cap;
4012
4013 dev_priv->psr.sink_support = true;
4014 intel_dp_dpcd_read_wake(&intel_dp->aux,
4015 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4016 &frame_sync_cap, 1);
4017 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4018 /* PSR2 needs frame sync as well */
4019 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4020 DRM_DEBUG_KMS("PSR2 %s on sink",
4021 dev_priv->psr.psr2_support ? "supported" : "not supported");
4022 }
50003939
JN
4023 }
4024
7809a611 4025 /* Training Pattern 3 support, both source and sink */
caa860d9 4026 if (drm_dp_tps3_supported(intel_dp->dpcd) &&
7809a611 4027 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 4028 intel_dp->use_tps3 = true;
f8d8a672 4029 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
4030 } else
4031 intel_dp->use_tps3 = false;
4032
fc0f8e25
SJ
4033 /* Intermediate frequency support */
4034 if (is_edp(intel_dp) &&
4035 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4036 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4037 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 4038 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
4039 int i;
4040
fc0f8e25
SJ
4041 intel_dp_dpcd_read_wake(&intel_dp->aux,
4042 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
4043 sink_rates,
4044 sizeof(sink_rates));
ea2d8a42 4045
94ca719e
VS
4046 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4047 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
4048
4049 if (val == 0)
4050 break;
4051
af77b974
SJ
4052 /* Value read is in kHz while drm clock is saved in deca-kHz */
4053 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 4054 }
94ca719e 4055 intel_dp->num_sink_rates = i;
fc0f8e25 4056 }
0336400e
VS
4057
4058 intel_dp_print_rates(intel_dp);
4059
edb39244
AJ
4060 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4061 DP_DWN_STRM_PORT_PRESENT))
4062 return true; /* native DP sink */
4063
4064 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4065 return true; /* no per-port downstream info */
4066
9d1a1031
JN
4067 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4068 intel_dp->downstream_ports,
4069 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
4070 return false; /* downstream port status fetch failed */
4071
4072 return true;
92fd8fd1
KP
4073}
4074
0d198328
AJ
4075static void
4076intel_dp_probe_oui(struct intel_dp *intel_dp)
4077{
4078 u8 buf[3];
4079
4080 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4081 return;
4082
9d1a1031 4083 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
4084 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4085 buf[0], buf[1], buf[2]);
4086
9d1a1031 4087 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
4088 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4089 buf[0], buf[1], buf[2]);
4090}
4091
0e32b39c
DA
4092static bool
4093intel_dp_probe_mst(struct intel_dp *intel_dp)
4094{
4095 u8 buf[1];
4096
4097 if (!intel_dp->can_mst)
4098 return false;
4099
4100 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4101 return false;
4102
0e32b39c
DA
4103 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4104 if (buf[0] & DP_MST_CAP) {
4105 DRM_DEBUG_KMS("Sink is MST capable\n");
4106 intel_dp->is_mst = true;
4107 } else {
4108 DRM_DEBUG_KMS("Sink is not MST capable\n");
4109 intel_dp->is_mst = false;
4110 }
4111 }
0e32b39c
DA
4112
4113 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4114 return intel_dp->is_mst;
4115}
4116
e5a1cab5 4117static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 4118{
082dcc7c
RV
4119 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4120 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 4121 u8 buf;
e5a1cab5 4122 int ret = 0;
d2e216d0 4123
082dcc7c
RV
4124 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4125 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4126 ret = -EIO;
4127 goto out;
4373f0f2
PZ
4128 }
4129
082dcc7c 4130 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4131 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4132 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4133 ret = -EIO;
4134 goto out;
4135 }
d2e216d0 4136
621d4c76 4137 intel_dp->sink_crc.started = false;
e5a1cab5 4138 out:
082dcc7c 4139 hsw_enable_ips(intel_crtc);
e5a1cab5 4140 return ret;
082dcc7c
RV
4141}
4142
4143static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4144{
4145 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4146 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4147 u8 buf;
e5a1cab5
RV
4148 int ret;
4149
621d4c76 4150 if (intel_dp->sink_crc.started) {
e5a1cab5
RV
4151 ret = intel_dp_sink_crc_stop(intel_dp);
4152 if (ret)
4153 return ret;
4154 }
082dcc7c
RV
4155
4156 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4157 return -EIO;
4158
4159 if (!(buf & DP_TEST_CRC_SUPPORTED))
4160 return -ENOTTY;
4161
621d4c76
RV
4162 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4163
082dcc7c
RV
4164 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4165 return -EIO;
4166
4167 hsw_disable_ips(intel_crtc);
1dda5f93 4168
9d1a1031 4169 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4170 buf | DP_TEST_SINK_START) < 0) {
4171 hsw_enable_ips(intel_crtc);
4172 return -EIO;
4373f0f2
PZ
4173 }
4174
621d4c76 4175 intel_dp->sink_crc.started = true;
082dcc7c
RV
4176 return 0;
4177}
4178
4179int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4180{
4181 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4182 struct drm_device *dev = dig_port->base.base.dev;
4183 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4184 u8 buf;
621d4c76 4185 int count, ret;
082dcc7c 4186 int attempts = 6;
aabc95dc 4187 bool old_equal_new;
082dcc7c
RV
4188
4189 ret = intel_dp_sink_crc_start(intel_dp);
4190 if (ret)
4191 return ret;
4192
ad9dc91b 4193 do {
621d4c76
RV
4194 intel_wait_for_vblank(dev, intel_crtc->pipe);
4195
1dda5f93 4196 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4197 DP_TEST_SINK_MISC, &buf) < 0) {
4198 ret = -EIO;
afe0d67e 4199 goto stop;
4373f0f2 4200 }
621d4c76 4201 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4202
621d4c76
RV
4203 /*
4204 * Count might be reset during the loop. In this case
4205 * last known count needs to be reset as well.
4206 */
4207 if (count == 0)
4208 intel_dp->sink_crc.last_count = 0;
4209
4210 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4211 ret = -EIO;
4212 goto stop;
4213 }
aabc95dc
RV
4214
4215 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4216 !memcmp(intel_dp->sink_crc.last_crc, crc,
4217 6 * sizeof(u8)));
4218
4219 } while (--attempts && (count == 0 || old_equal_new));
621d4c76
RV
4220
4221 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4222 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
ad9dc91b
RV
4223
4224 if (attempts == 0) {
aabc95dc
RV
4225 if (old_equal_new) {
4226 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4227 } else {
4228 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4229 ret = -ETIMEDOUT;
4230 goto stop;
4231 }
ad9dc91b 4232 }
d2e216d0 4233
afe0d67e 4234stop:
082dcc7c 4235 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4236 return ret;
d2e216d0
RV
4237}
4238
a60f0e38
JB
4239static bool
4240intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4241{
9d1a1031
JN
4242 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4243 DP_DEVICE_SERVICE_IRQ_VECTOR,
4244 sink_irq_vector, 1) == 1;
a60f0e38
JB
4245}
4246
0e32b39c
DA
4247static bool
4248intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4249{
4250 int ret;
4251
4252 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4253 DP_SINK_COUNT_ESI,
4254 sink_irq_vector, 14);
4255 if (ret != 14)
4256 return false;
4257
4258 return true;
4259}
4260
c5d5ab7a
TP
4261static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4262{
4263 uint8_t test_result = DP_TEST_ACK;
4264 return test_result;
4265}
4266
4267static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4268{
4269 uint8_t test_result = DP_TEST_NAK;
4270 return test_result;
4271}
4272
4273static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4274{
c5d5ab7a 4275 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4276 struct intel_connector *intel_connector = intel_dp->attached_connector;
4277 struct drm_connector *connector = &intel_connector->base;
4278
4279 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4280 connector->edid_corrupt ||
559be30c
TP
4281 intel_dp->aux.i2c_defer_count > 6) {
4282 /* Check EDID read for NACKs, DEFERs and corruption
4283 * (DP CTS 1.2 Core r1.1)
4284 * 4.2.2.4 : Failed EDID read, I2C_NAK
4285 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4286 * 4.2.2.6 : EDID corruption detected
4287 * Use failsafe mode for all cases
4288 */
4289 if (intel_dp->aux.i2c_nack_count > 0 ||
4290 intel_dp->aux.i2c_defer_count > 0)
4291 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4292 intel_dp->aux.i2c_nack_count,
4293 intel_dp->aux.i2c_defer_count);
4294 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4295 } else {
f79b468e
TS
4296 struct edid *block = intel_connector->detect_edid;
4297
4298 /* We have to write the checksum
4299 * of the last block read
4300 */
4301 block += intel_connector->detect_edid->extensions;
4302
559be30c
TP
4303 if (!drm_dp_dpcd_write(&intel_dp->aux,
4304 DP_TEST_EDID_CHECKSUM,
f79b468e 4305 &block->checksum,
5a1cc655 4306 1))
559be30c
TP
4307 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4308
4309 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4310 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4311 }
4312
4313 /* Set test active flag here so userspace doesn't interrupt things */
4314 intel_dp->compliance_test_active = 1;
4315
c5d5ab7a
TP
4316 return test_result;
4317}
4318
4319static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4320{
c5d5ab7a
TP
4321 uint8_t test_result = DP_TEST_NAK;
4322 return test_result;
4323}
4324
4325static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4326{
4327 uint8_t response = DP_TEST_NAK;
4328 uint8_t rxdata = 0;
4329 int status = 0;
4330
559be30c 4331 intel_dp->compliance_test_active = 0;
c5d5ab7a 4332 intel_dp->compliance_test_type = 0;
559be30c
TP
4333 intel_dp->compliance_test_data = 0;
4334
c5d5ab7a
TP
4335 intel_dp->aux.i2c_nack_count = 0;
4336 intel_dp->aux.i2c_defer_count = 0;
4337
4338 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4339 if (status <= 0) {
4340 DRM_DEBUG_KMS("Could not read test request from sink\n");
4341 goto update_status;
4342 }
4343
4344 switch (rxdata) {
4345 case DP_TEST_LINK_TRAINING:
4346 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4347 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4348 response = intel_dp_autotest_link_training(intel_dp);
4349 break;
4350 case DP_TEST_LINK_VIDEO_PATTERN:
4351 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4352 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4353 response = intel_dp_autotest_video_pattern(intel_dp);
4354 break;
4355 case DP_TEST_LINK_EDID_READ:
4356 DRM_DEBUG_KMS("EDID test requested\n");
4357 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4358 response = intel_dp_autotest_edid(intel_dp);
4359 break;
4360 case DP_TEST_LINK_PHY_TEST_PATTERN:
4361 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4362 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4363 response = intel_dp_autotest_phy_pattern(intel_dp);
4364 break;
4365 default:
4366 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4367 break;
4368 }
4369
4370update_status:
4371 status = drm_dp_dpcd_write(&intel_dp->aux,
4372 DP_TEST_RESPONSE,
4373 &response, 1);
4374 if (status <= 0)
4375 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4376}
4377
0e32b39c
DA
4378static int
4379intel_dp_check_mst_status(struct intel_dp *intel_dp)
4380{
4381 bool bret;
4382
4383 if (intel_dp->is_mst) {
4384 u8 esi[16] = { 0 };
4385 int ret = 0;
4386 int retry;
4387 bool handled;
4388 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4389go_again:
4390 if (bret == true) {
4391
4392 /* check link status - esi[10] = 0x200c */
90a6b7b0 4393 if (intel_dp->active_mst_links &&
901c2daf 4394 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4395 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4396 intel_dp_start_link_train(intel_dp);
4397 intel_dp_complete_link_train(intel_dp);
4398 intel_dp_stop_link_train(intel_dp);
4399 }
4400
6f34cc39 4401 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4402 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4403
4404 if (handled) {
4405 for (retry = 0; retry < 3; retry++) {
4406 int wret;
4407 wret = drm_dp_dpcd_write(&intel_dp->aux,
4408 DP_SINK_COUNT_ESI+1,
4409 &esi[1], 3);
4410 if (wret == 3) {
4411 break;
4412 }
4413 }
4414
4415 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4416 if (bret == true) {
6f34cc39 4417 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4418 goto go_again;
4419 }
4420 } else
4421 ret = 0;
4422
4423 return ret;
4424 } else {
4425 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4426 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4427 intel_dp->is_mst = false;
4428 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4429 /* send a hotplug event */
4430 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4431 }
4432 }
4433 return -EINVAL;
4434}
4435
a4fc5ed6
KP
4436/*
4437 * According to DP spec
4438 * 5.1.2:
4439 * 1. Read DPCD
4440 * 2. Configure link according to Receiver Capabilities
4441 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4442 * 4. Check link status on receipt of hot-plug interrupt
4443 */
a5146200 4444static void
ea5b213a 4445intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4446{
5b215bcf 4447 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4448 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4449 u8 sink_irq_vector;
93f62dad 4450 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4451
5b215bcf
DA
4452 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4453
e02f9a06 4454 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4455 return;
4456
1a125d8a
ID
4457 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4458 return;
4459
92fd8fd1 4460 /* Try to read receiver status if the link appears to be up */
93f62dad 4461 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4462 return;
4463 }
4464
92fd8fd1 4465 /* Now read the DPCD to see if it's actually running */
26d61aad 4466 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4467 return;
4468 }
4469
a60f0e38
JB
4470 /* Try to read the source of the interrupt */
4471 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4472 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4473 /* Clear interrupt source */
9d1a1031
JN
4474 drm_dp_dpcd_writeb(&intel_dp->aux,
4475 DP_DEVICE_SERVICE_IRQ_VECTOR,
4476 sink_irq_vector);
a60f0e38
JB
4477
4478 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4479 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4480 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4481 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4482 }
4483
901c2daf 4484 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4485 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4486 intel_encoder->base.name);
33a34e4e
JB
4487 intel_dp_start_link_train(intel_dp);
4488 intel_dp_complete_link_train(intel_dp);
3ab9c637 4489 intel_dp_stop_link_train(intel_dp);
33a34e4e 4490 }
a4fc5ed6 4491}
a4fc5ed6 4492
caf9ab24 4493/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4494static enum drm_connector_status
26d61aad 4495intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4496{
caf9ab24 4497 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4498 uint8_t type;
4499
4500 if (!intel_dp_get_dpcd(intel_dp))
4501 return connector_status_disconnected;
4502
4503 /* if there's no downstream port, we're done */
4504 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4505 return connector_status_connected;
caf9ab24
AJ
4506
4507 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4508 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4509 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4510 uint8_t reg;
9d1a1031
JN
4511
4512 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4513 &reg, 1) < 0)
caf9ab24 4514 return connector_status_unknown;
9d1a1031 4515
23235177
AJ
4516 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4517 : connector_status_disconnected;
caf9ab24
AJ
4518 }
4519
4520 /* If no HPD, poke DDC gently */
0b99836f 4521 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4522 return connector_status_connected;
caf9ab24
AJ
4523
4524 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4525 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4526 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4527 if (type == DP_DS_PORT_TYPE_VGA ||
4528 type == DP_DS_PORT_TYPE_NON_EDID)
4529 return connector_status_unknown;
4530 } else {
4531 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4532 DP_DWN_STRM_PORT_TYPE_MASK;
4533 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4534 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4535 return connector_status_unknown;
4536 }
caf9ab24
AJ
4537
4538 /* Anything else is out of spec, warn and ignore */
4539 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4540 return connector_status_disconnected;
71ba9000
AJ
4541}
4542
d410b56d
CW
4543static enum drm_connector_status
4544edp_detect(struct intel_dp *intel_dp)
4545{
4546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4547 enum drm_connector_status status;
4548
4549 status = intel_panel_detect(dev);
4550 if (status == connector_status_unknown)
4551 status = connector_status_connected;
4552
4553 return status;
4554}
4555
b93433cc
JN
4556static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4557 struct intel_digital_port *port)
5eb08b69 4558{
b93433cc 4559 u32 bit;
01cb9ea6 4560
0df53b77
JN
4561 switch (port->port) {
4562 case PORT_A:
4563 return true;
4564 case PORT_B:
4565 bit = SDE_PORTB_HOTPLUG;
4566 break;
4567 case PORT_C:
4568 bit = SDE_PORTC_HOTPLUG;
4569 break;
4570 case PORT_D:
4571 bit = SDE_PORTD_HOTPLUG;
4572 break;
4573 default:
4574 MISSING_CASE(port->port);
4575 return false;
4576 }
4577
4578 return I915_READ(SDEISR) & bit;
4579}
4580
4581static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4582 struct intel_digital_port *port)
4583{
4584 u32 bit;
4585
4586 switch (port->port) {
4587 case PORT_A:
4588 return true;
4589 case PORT_B:
4590 bit = SDE_PORTB_HOTPLUG_CPT;
4591 break;
4592 case PORT_C:
4593 bit = SDE_PORTC_HOTPLUG_CPT;
4594 break;
4595 case PORT_D:
4596 bit = SDE_PORTD_HOTPLUG_CPT;
4597 break;
4598 default:
4599 MISSING_CASE(port->port);
4600 return false;
b93433cc 4601 }
1b469639 4602
b93433cc 4603 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4604}
4605
7e66bcf2 4606static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4607 struct intel_digital_port *port)
a4fc5ed6 4608{
9642c81c 4609 u32 bit;
5eb08b69 4610
9642c81c
JN
4611 switch (port->port) {
4612 case PORT_B:
4613 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4614 break;
4615 case PORT_C:
4616 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4617 break;
4618 case PORT_D:
4619 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4620 break;
4621 default:
4622 MISSING_CASE(port->port);
4623 return false;
4624 }
4625
4626 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4627}
4628
4629static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4630 struct intel_digital_port *port)
4631{
4632 u32 bit;
4633
4634 switch (port->port) {
4635 case PORT_B:
4636 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4637 break;
4638 case PORT_C:
4639 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4640 break;
4641 case PORT_D:
4642 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4643 break;
4644 default:
4645 MISSING_CASE(port->port);
4646 return false;
a4fc5ed6
KP
4647 }
4648
1d245987 4649 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4650}
4651
e464bfde
JN
4652static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4653 struct intel_digital_port *port)
4654{
4655 u32 bit;
4656
4657 switch (port->port) {
4658 case PORT_A:
4659 bit = BXT_DE_PORT_HP_DDIA;
4660 break;
4661 case PORT_B:
4662 bit = BXT_DE_PORT_HP_DDIB;
4663 break;
4664 case PORT_C:
4665 bit = BXT_DE_PORT_HP_DDIC;
4666 break;
4667 default:
4668 MISSING_CASE(port->port);
4669 return false;
4670 }
4671
4672 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4673}
4674
7e66bcf2
JN
4675/*
4676 * intel_digital_port_connected - is the specified port connected?
4677 * @dev_priv: i915 private structure
4678 * @port: the port to test
4679 *
4680 * Return %true if @port is connected, %false otherwise.
4681 */
4682static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4683 struct intel_digital_port *port)
4684{
0df53b77 4685 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4686 return ibx_digital_port_connected(dev_priv, port);
0df53b77
JN
4687 if (HAS_PCH_SPLIT(dev_priv))
4688 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4689 else if (IS_BROXTON(dev_priv))
4690 return bxt_digital_port_connected(dev_priv, port);
9642c81c
JN
4691 else if (IS_VALLEYVIEW(dev_priv))
4692 return vlv_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4693 else
4694 return g4x_digital_port_connected(dev_priv, port);
4695}
4696
b93433cc
JN
4697static enum drm_connector_status
4698ironlake_dp_detect(struct intel_dp *intel_dp)
4699{
4700 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4701 struct drm_i915_private *dev_priv = dev->dev_private;
4702 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4703
7e66bcf2 4704 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
b93433cc
JN
4705 return connector_status_disconnected;
4706
4707 return intel_dp_detect_dpcd(intel_dp);
4708}
4709
2a592bec
DA
4710static enum drm_connector_status
4711g4x_dp_detect(struct intel_dp *intel_dp)
4712{
4713 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4714 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2a592bec
DA
4715
4716 /* Can't disconnect eDP, but you can close the lid... */
4717 if (is_edp(intel_dp)) {
4718 enum drm_connector_status status;
4719
4720 status = intel_panel_detect(dev);
4721 if (status == connector_status_unknown)
4722 status = connector_status_connected;
4723 return status;
4724 }
4725
7e66bcf2 4726 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
a4fc5ed6
KP
4727 return connector_status_disconnected;
4728
26d61aad 4729 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4730}
4731
8c241fef 4732static struct edid *
beb60608 4733intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4734{
beb60608 4735 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4736
9cd300e0
JN
4737 /* use cached edid if we have one */
4738 if (intel_connector->edid) {
9cd300e0
JN
4739 /* invalid edid */
4740 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4741 return NULL;
4742
55e9edeb 4743 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4744 } else
4745 return drm_get_edid(&intel_connector->base,
4746 &intel_dp->aux.ddc);
4747}
8c241fef 4748
beb60608
CW
4749static void
4750intel_dp_set_edid(struct intel_dp *intel_dp)
4751{
4752 struct intel_connector *intel_connector = intel_dp->attached_connector;
4753 struct edid *edid;
8c241fef 4754
beb60608
CW
4755 edid = intel_dp_get_edid(intel_dp);
4756 intel_connector->detect_edid = edid;
4757
4758 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4759 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4760 else
4761 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4762}
4763
beb60608
CW
4764static void
4765intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4766{
beb60608 4767 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4768
beb60608
CW
4769 kfree(intel_connector->detect_edid);
4770 intel_connector->detect_edid = NULL;
9cd300e0 4771
beb60608
CW
4772 intel_dp->has_audio = false;
4773}
d6f24d0f 4774
beb60608
CW
4775static enum intel_display_power_domain
4776intel_dp_power_get(struct intel_dp *dp)
4777{
4778 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4779 enum intel_display_power_domain power_domain;
4780
4781 power_domain = intel_display_port_power_domain(encoder);
4782 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4783
4784 return power_domain;
4785}
d6f24d0f 4786
beb60608
CW
4787static void
4788intel_dp_power_put(struct intel_dp *dp,
4789 enum intel_display_power_domain power_domain)
4790{
4791 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4792 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4793}
4794
a9756bb5
ZW
4795static enum drm_connector_status
4796intel_dp_detect(struct drm_connector *connector, bool force)
4797{
4798 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4799 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4800 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4801 struct drm_device *dev = connector->dev;
a9756bb5 4802 enum drm_connector_status status;
671dedd2 4803 enum intel_display_power_domain power_domain;
0e32b39c 4804 bool ret;
09b1eb13 4805 u8 sink_irq_vector;
a9756bb5 4806
164c8598 4807 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4808 connector->base.id, connector->name);
beb60608 4809 intel_dp_unset_edid(intel_dp);
164c8598 4810
0e32b39c
DA
4811 if (intel_dp->is_mst) {
4812 /* MST devices are disconnected from a monitor POV */
4813 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4814 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4815 return connector_status_disconnected;
0e32b39c
DA
4816 }
4817
beb60608 4818 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4819
d410b56d
CW
4820 /* Can't disconnect eDP, but you can close the lid... */
4821 if (is_edp(intel_dp))
4822 status = edp_detect(intel_dp);
4823 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4824 status = ironlake_dp_detect(intel_dp);
4825 else
4826 status = g4x_dp_detect(intel_dp);
4827 if (status != connector_status_connected)
c8c8fb33 4828 goto out;
a9756bb5 4829
0d198328
AJ
4830 intel_dp_probe_oui(intel_dp);
4831
0e32b39c
DA
4832 ret = intel_dp_probe_mst(intel_dp);
4833 if (ret) {
4834 /* if we are in MST mode then this connector
4835 won't appear connected or have anything with EDID on it */
4836 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4837 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4838 status = connector_status_disconnected;
4839 goto out;
4840 }
4841
beb60608 4842 intel_dp_set_edid(intel_dp);
a9756bb5 4843
d63885da
PZ
4844 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4845 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4846 status = connector_status_connected;
4847
09b1eb13
TP
4848 /* Try to read the source of the interrupt */
4849 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4850 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4851 /* Clear interrupt source */
4852 drm_dp_dpcd_writeb(&intel_dp->aux,
4853 DP_DEVICE_SERVICE_IRQ_VECTOR,
4854 sink_irq_vector);
4855
4856 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4857 intel_dp_handle_test_request(intel_dp);
4858 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4859 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4860 }
4861
c8c8fb33 4862out:
beb60608 4863 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4864 return status;
a4fc5ed6
KP
4865}
4866
beb60608
CW
4867static void
4868intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4869{
df0e9248 4870 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4871 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4872 enum intel_display_power_domain power_domain;
a4fc5ed6 4873
beb60608
CW
4874 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4875 connector->base.id, connector->name);
4876 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4877
beb60608
CW
4878 if (connector->status != connector_status_connected)
4879 return;
671dedd2 4880
beb60608
CW
4881 power_domain = intel_dp_power_get(intel_dp);
4882
4883 intel_dp_set_edid(intel_dp);
4884
4885 intel_dp_power_put(intel_dp, power_domain);
4886
4887 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4888 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4889}
4890
4891static int intel_dp_get_modes(struct drm_connector *connector)
4892{
4893 struct intel_connector *intel_connector = to_intel_connector(connector);
4894 struct edid *edid;
4895
4896 edid = intel_connector->detect_edid;
4897 if (edid) {
4898 int ret = intel_connector_update_modes(connector, edid);
4899 if (ret)
4900 return ret;
4901 }
32f9d658 4902
f8779fda 4903 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4904 if (is_edp(intel_attached_dp(connector)) &&
4905 intel_connector->panel.fixed_mode) {
f8779fda 4906 struct drm_display_mode *mode;
beb60608
CW
4907
4908 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4909 intel_connector->panel.fixed_mode);
f8779fda 4910 if (mode) {
32f9d658
ZW
4911 drm_mode_probed_add(connector, mode);
4912 return 1;
4913 }
4914 }
beb60608 4915
32f9d658 4916 return 0;
a4fc5ed6
KP
4917}
4918
1aad7ac0
CW
4919static bool
4920intel_dp_detect_audio(struct drm_connector *connector)
4921{
1aad7ac0 4922 bool has_audio = false;
beb60608 4923 struct edid *edid;
1aad7ac0 4924
beb60608
CW
4925 edid = to_intel_connector(connector)->detect_edid;
4926 if (edid)
1aad7ac0 4927 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4928
1aad7ac0
CW
4929 return has_audio;
4930}
4931
f684960e
CW
4932static int
4933intel_dp_set_property(struct drm_connector *connector,
4934 struct drm_property *property,
4935 uint64_t val)
4936{
e953fd7b 4937 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4938 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4939 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4940 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4941 int ret;
4942
662595df 4943 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4944 if (ret)
4945 return ret;
4946
3f43c48d 4947 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4948 int i = val;
4949 bool has_audio;
4950
4951 if (i == intel_dp->force_audio)
f684960e
CW
4952 return 0;
4953
1aad7ac0 4954 intel_dp->force_audio = i;
f684960e 4955
c3e5f67b 4956 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4957 has_audio = intel_dp_detect_audio(connector);
4958 else
c3e5f67b 4959 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4960
4961 if (has_audio == intel_dp->has_audio)
f684960e
CW
4962 return 0;
4963
1aad7ac0 4964 intel_dp->has_audio = has_audio;
f684960e
CW
4965 goto done;
4966 }
4967
e953fd7b 4968 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4969 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4970 bool old_range = intel_dp->limited_color_range;
ae4edb80 4971
55bc60db
VS
4972 switch (val) {
4973 case INTEL_BROADCAST_RGB_AUTO:
4974 intel_dp->color_range_auto = true;
4975 break;
4976 case INTEL_BROADCAST_RGB_FULL:
4977 intel_dp->color_range_auto = false;
0f2a2a75 4978 intel_dp->limited_color_range = false;
55bc60db
VS
4979 break;
4980 case INTEL_BROADCAST_RGB_LIMITED:
4981 intel_dp->color_range_auto = false;
0f2a2a75 4982 intel_dp->limited_color_range = true;
55bc60db
VS
4983 break;
4984 default:
4985 return -EINVAL;
4986 }
ae4edb80
DV
4987
4988 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4989 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4990 return 0;
4991
e953fd7b
CW
4992 goto done;
4993 }
4994
53b41837
YN
4995 if (is_edp(intel_dp) &&
4996 property == connector->dev->mode_config.scaling_mode_property) {
4997 if (val == DRM_MODE_SCALE_NONE) {
4998 DRM_DEBUG_KMS("no scaling not supported\n");
4999 return -EINVAL;
5000 }
5001
5002 if (intel_connector->panel.fitting_mode == val) {
5003 /* the eDP scaling property is not changed */
5004 return 0;
5005 }
5006 intel_connector->panel.fitting_mode = val;
5007
5008 goto done;
5009 }
5010
f684960e
CW
5011 return -EINVAL;
5012
5013done:
c0c36b94
CW
5014 if (intel_encoder->base.crtc)
5015 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
5016
5017 return 0;
5018}
5019
a4fc5ed6 5020static void
73845adf 5021intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 5022{
1d508706 5023 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 5024
10e972d3 5025 kfree(intel_connector->detect_edid);
beb60608 5026
9cd300e0
JN
5027 if (!IS_ERR_OR_NULL(intel_connector->edid))
5028 kfree(intel_connector->edid);
5029
acd8db10
PZ
5030 /* Can't call is_edp() since the encoder may have been destroyed
5031 * already. */
5032 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 5033 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 5034
a4fc5ed6 5035 drm_connector_cleanup(connector);
55f78c43 5036 kfree(connector);
a4fc5ed6
KP
5037}
5038
00c09d70 5039void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 5040{
da63a9f2
PZ
5041 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5042 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 5043
4f71d0cb 5044 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 5045 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
5046 if (is_edp(intel_dp)) {
5047 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5048 /*
5049 * vdd might still be enabled do to the delayed vdd off.
5050 * Make sure vdd is actually turned off here.
5051 */
773538e8 5052 pps_lock(intel_dp);
4be73780 5053 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
5054 pps_unlock(intel_dp);
5055
01527b31
CT
5056 if (intel_dp->edp_notifier.notifier_call) {
5057 unregister_reboot_notifier(&intel_dp->edp_notifier);
5058 intel_dp->edp_notifier.notifier_call = NULL;
5059 }
bd943159 5060 }
c8bd0e49 5061 drm_encoder_cleanup(encoder);
da63a9f2 5062 kfree(intel_dig_port);
24d05927
DV
5063}
5064
07f9cd0b
ID
5065static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5066{
5067 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5068
5069 if (!is_edp(intel_dp))
5070 return;
5071
951468f3
VS
5072 /*
5073 * vdd might still be enabled do to the delayed vdd off.
5074 * Make sure vdd is actually turned off here.
5075 */
afa4e53a 5076 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 5077 pps_lock(intel_dp);
07f9cd0b 5078 edp_panel_vdd_off_sync(intel_dp);
773538e8 5079 pps_unlock(intel_dp);
07f9cd0b
ID
5080}
5081
49e6bc51
VS
5082static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5083{
5084 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5085 struct drm_device *dev = intel_dig_port->base.base.dev;
5086 struct drm_i915_private *dev_priv = dev->dev_private;
5087 enum intel_display_power_domain power_domain;
5088
5089 lockdep_assert_held(&dev_priv->pps_mutex);
5090
5091 if (!edp_have_panel_vdd(intel_dp))
5092 return;
5093
5094 /*
5095 * The VDD bit needs a power domain reference, so if the bit is
5096 * already enabled when we boot or resume, grab this reference and
5097 * schedule a vdd off, so we don't hold on to the reference
5098 * indefinitely.
5099 */
5100 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5101 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5102 intel_display_power_get(dev_priv, power_domain);
5103
5104 edp_panel_vdd_schedule_off(intel_dp);
5105}
5106
6d93c0c4
ID
5107static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5108{
49e6bc51
VS
5109 struct intel_dp *intel_dp;
5110
5111 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5112 return;
5113
5114 intel_dp = enc_to_intel_dp(encoder);
5115
5116 pps_lock(intel_dp);
5117
5118 /*
5119 * Read out the current power sequencer assignment,
5120 * in case the BIOS did something with it.
5121 */
5122 if (IS_VALLEYVIEW(encoder->dev))
5123 vlv_initial_power_sequencer_setup(intel_dp);
5124
5125 intel_edp_panel_vdd_sanitize(intel_dp);
5126
5127 pps_unlock(intel_dp);
6d93c0c4
ID
5128}
5129
a4fc5ed6 5130static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 5131 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 5132 .detect = intel_dp_detect,
beb60608 5133 .force = intel_dp_force,
a4fc5ed6 5134 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 5135 .set_property = intel_dp_set_property,
2545e4a6 5136 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 5137 .destroy = intel_dp_connector_destroy,
c6f95f27 5138 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 5139 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
5140};
5141
5142static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5143 .get_modes = intel_dp_get_modes,
5144 .mode_valid = intel_dp_mode_valid,
df0e9248 5145 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
5146};
5147
a4fc5ed6 5148static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 5149 .reset = intel_dp_encoder_reset,
24d05927 5150 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
5151};
5152
b2c5c181 5153enum irqreturn
13cf5504
DA
5154intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5155{
5156 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 5157 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
5158 struct drm_device *dev = intel_dig_port->base.base.dev;
5159 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 5160 enum intel_display_power_domain power_domain;
b2c5c181 5161 enum irqreturn ret = IRQ_NONE;
1c767b33 5162
0e32b39c
DA
5163 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5164 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5165
7a7f84cc
VS
5166 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5167 /*
5168 * vdd off can generate a long pulse on eDP which
5169 * would require vdd on to handle it, and thus we
5170 * would end up in an endless cycle of
5171 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5172 */
5173 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5174 port_name(intel_dig_port->port));
a8b3d52f 5175 return IRQ_HANDLED;
7a7f84cc
VS
5176 }
5177
26fbb774
VS
5178 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5179 port_name(intel_dig_port->port),
0e32b39c 5180 long_hpd ? "long" : "short");
13cf5504 5181
1c767b33
ID
5182 power_domain = intel_display_port_power_domain(intel_encoder);
5183 intel_display_power_get(dev_priv, power_domain);
5184
0e32b39c 5185 if (long_hpd) {
5fa836a9
MK
5186 /* indicate that we need to restart link training */
5187 intel_dp->train_set_valid = false;
2a592bec 5188
7e66bcf2
JN
5189 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5190 goto mst_fail;
0e32b39c
DA
5191
5192 if (!intel_dp_get_dpcd(intel_dp)) {
5193 goto mst_fail;
5194 }
5195
5196 intel_dp_probe_oui(intel_dp);
5197
5198 if (!intel_dp_probe_mst(intel_dp))
5199 goto mst_fail;
5200
5201 } else {
5202 if (intel_dp->is_mst) {
1c767b33 5203 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5204 goto mst_fail;
5205 }
5206
5207 if (!intel_dp->is_mst) {
5208 /*
5209 * we'll check the link status via the normal hot plug path later -
5210 * but for short hpds we should check it now
5211 */
5b215bcf 5212 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5213 intel_dp_check_link_status(intel_dp);
5b215bcf 5214 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5215 }
5216 }
b2c5c181
DV
5217
5218 ret = IRQ_HANDLED;
5219
1c767b33 5220 goto put_power;
0e32b39c
DA
5221mst_fail:
5222 /* if we were in MST mode, and device is not there get out of MST mode */
5223 if (intel_dp->is_mst) {
5224 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5225 intel_dp->is_mst = false;
5226 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5227 }
1c767b33
ID
5228put_power:
5229 intel_display_power_put(dev_priv, power_domain);
5230
5231 return ret;
13cf5504
DA
5232}
5233
e3421a18
ZW
5234/* Return which DP Port should be selected for Transcoder DP control */
5235int
0206e353 5236intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5237{
5238 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5239 struct intel_encoder *intel_encoder;
5240 struct intel_dp *intel_dp;
e3421a18 5241
fa90ecef
PZ
5242 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5243 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5244
fa90ecef
PZ
5245 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5246 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5247 return intel_dp->output_reg;
e3421a18 5248 }
ea5b213a 5249
e3421a18
ZW
5250 return -1;
5251}
5252
36e83a18 5253/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5254bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5255{
5256 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5257 union child_device_config *p_child;
36e83a18 5258 int i;
5d8a7752
VS
5259 static const short port_mapping[] = {
5260 [PORT_B] = PORT_IDPB,
5261 [PORT_C] = PORT_IDPC,
5262 [PORT_D] = PORT_IDPD,
5263 };
36e83a18 5264
3b32a35b
VS
5265 if (port == PORT_A)
5266 return true;
5267
41aa3448 5268 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5269 return false;
5270
41aa3448
RV
5271 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5272 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5273
5d8a7752 5274 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5275 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5276 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5277 return true;
5278 }
5279 return false;
5280}
5281
0e32b39c 5282void
f684960e
CW
5283intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5284{
53b41837
YN
5285 struct intel_connector *intel_connector = to_intel_connector(connector);
5286
3f43c48d 5287 intel_attach_force_audio_property(connector);
e953fd7b 5288 intel_attach_broadcast_rgb_property(connector);
55bc60db 5289 intel_dp->color_range_auto = true;
53b41837
YN
5290
5291 if (is_edp(intel_dp)) {
5292 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5293 drm_object_attach_property(
5294 &connector->base,
53b41837 5295 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5296 DRM_MODE_SCALE_ASPECT);
5297 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5298 }
f684960e
CW
5299}
5300
dada1a9f
ID
5301static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5302{
5303 intel_dp->last_power_cycle = jiffies;
5304 intel_dp->last_power_on = jiffies;
5305 intel_dp->last_backlight_off = jiffies;
5306}
5307
67a54566
DV
5308static void
5309intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5310 struct intel_dp *intel_dp)
67a54566
DV
5311{
5312 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5313 struct edp_power_seq cur, vbt, spec,
5314 *final = &intel_dp->pps_delays;
b0a08bec
VK
5315 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5316 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5317
e39b999a
VS
5318 lockdep_assert_held(&dev_priv->pps_mutex);
5319
81ddbc69
VS
5320 /* already initialized? */
5321 if (final->t11_t12 != 0)
5322 return;
5323
b0a08bec
VK
5324 if (IS_BROXTON(dev)) {
5325 /*
5326 * TODO: BXT has 2 sets of PPS registers.
5327 * Correct Register for Broxton need to be identified
5328 * using VBT. hardcoding for now
5329 */
5330 pp_ctrl_reg = BXT_PP_CONTROL(0);
5331 pp_on_reg = BXT_PP_ON_DELAYS(0);
5332 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5333 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5334 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5335 pp_on_reg = PCH_PP_ON_DELAYS;
5336 pp_off_reg = PCH_PP_OFF_DELAYS;
5337 pp_div_reg = PCH_PP_DIVISOR;
5338 } else {
bf13e81b
JN
5339 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5340
5341 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5342 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5343 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5344 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5345 }
67a54566
DV
5346
5347 /* Workaround: Need to write PP_CONTROL with the unlock key as
5348 * the very first thing. */
b0a08bec 5349 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5350
453c5420
JB
5351 pp_on = I915_READ(pp_on_reg);
5352 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5353 if (!IS_BROXTON(dev)) {
5354 I915_WRITE(pp_ctrl_reg, pp_ctl);
5355 pp_div = I915_READ(pp_div_reg);
5356 }
67a54566
DV
5357
5358 /* Pull timing values out of registers */
5359 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5360 PANEL_POWER_UP_DELAY_SHIFT;
5361
5362 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5363 PANEL_LIGHT_ON_DELAY_SHIFT;
5364
5365 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5366 PANEL_LIGHT_OFF_DELAY_SHIFT;
5367
5368 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5369 PANEL_POWER_DOWN_DELAY_SHIFT;
5370
b0a08bec
VK
5371 if (IS_BROXTON(dev)) {
5372 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5373 BXT_POWER_CYCLE_DELAY_SHIFT;
5374 if (tmp > 0)
5375 cur.t11_t12 = (tmp - 1) * 1000;
5376 else
5377 cur.t11_t12 = 0;
5378 } else {
5379 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5380 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5381 }
67a54566
DV
5382
5383 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5384 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5385
41aa3448 5386 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5387
5388 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5389 * our hw here, which are all in 100usec. */
5390 spec.t1_t3 = 210 * 10;
5391 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5392 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5393 spec.t10 = 500 * 10;
5394 /* This one is special and actually in units of 100ms, but zero
5395 * based in the hw (so we need to add 100 ms). But the sw vbt
5396 * table multiplies it with 1000 to make it in units of 100usec,
5397 * too. */
5398 spec.t11_t12 = (510 + 100) * 10;
5399
5400 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5401 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5402
5403 /* Use the max of the register settings and vbt. If both are
5404 * unset, fall back to the spec limits. */
36b5f425 5405#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5406 spec.field : \
5407 max(cur.field, vbt.field))
5408 assign_final(t1_t3);
5409 assign_final(t8);
5410 assign_final(t9);
5411 assign_final(t10);
5412 assign_final(t11_t12);
5413#undef assign_final
5414
36b5f425 5415#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5416 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5417 intel_dp->backlight_on_delay = get_delay(t8);
5418 intel_dp->backlight_off_delay = get_delay(t9);
5419 intel_dp->panel_power_down_delay = get_delay(t10);
5420 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5421#undef get_delay
5422
f30d26e4
JN
5423 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5424 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5425 intel_dp->panel_power_cycle_delay);
5426
5427 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5428 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5429}
5430
5431static void
5432intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5433 struct intel_dp *intel_dp)
f30d26e4
JN
5434{
5435 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5436 u32 pp_on, pp_off, pp_div, port_sel = 0;
5437 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5438 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5439 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5440 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5441
e39b999a 5442 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5443
b0a08bec
VK
5444 if (IS_BROXTON(dev)) {
5445 /*
5446 * TODO: BXT has 2 sets of PPS registers.
5447 * Correct Register for Broxton need to be identified
5448 * using VBT. hardcoding for now
5449 */
5450 pp_ctrl_reg = BXT_PP_CONTROL(0);
5451 pp_on_reg = BXT_PP_ON_DELAYS(0);
5452 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5453
5454 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5455 pp_on_reg = PCH_PP_ON_DELAYS;
5456 pp_off_reg = PCH_PP_OFF_DELAYS;
5457 pp_div_reg = PCH_PP_DIVISOR;
5458 } else {
bf13e81b
JN
5459 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5460
5461 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5462 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5463 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5464 }
5465
b2f19d1a
PZ
5466 /*
5467 * And finally store the new values in the power sequencer. The
5468 * backlight delays are set to 1 because we do manual waits on them. For
5469 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5470 * we'll end up waiting for the backlight off delay twice: once when we
5471 * do the manual sleep, and once when we disable the panel and wait for
5472 * the PP_STATUS bit to become zero.
5473 */
f30d26e4 5474 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5475 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5476 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5477 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5478 /* Compute the divisor for the pp clock, simply match the Bspec
5479 * formula. */
b0a08bec
VK
5480 if (IS_BROXTON(dev)) {
5481 pp_div = I915_READ(pp_ctrl_reg);
5482 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5483 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5484 << BXT_POWER_CYCLE_DELAY_SHIFT);
5485 } else {
5486 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5487 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5488 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5489 }
67a54566
DV
5490
5491 /* Haswell doesn't have any port selection bits for the panel
5492 * power sequencer any more. */
bc7d38a4 5493 if (IS_VALLEYVIEW(dev)) {
ad933b56 5494 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5495 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5496 if (port == PORT_A)
a24c144c 5497 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5498 else
a24c144c 5499 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5500 }
5501
453c5420
JB
5502 pp_on |= port_sel;
5503
5504 I915_WRITE(pp_on_reg, pp_on);
5505 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5506 if (IS_BROXTON(dev))
5507 I915_WRITE(pp_ctrl_reg, pp_div);
5508 else
5509 I915_WRITE(pp_div_reg, pp_div);
67a54566 5510
67a54566 5511 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5512 I915_READ(pp_on_reg),
5513 I915_READ(pp_off_reg),
b0a08bec
VK
5514 IS_BROXTON(dev) ?
5515 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5516 I915_READ(pp_div_reg));
f684960e
CW
5517}
5518
b33a2815
VK
5519/**
5520 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5521 * @dev: DRM device
5522 * @refresh_rate: RR to be programmed
5523 *
5524 * This function gets called when refresh rate (RR) has to be changed from
5525 * one frequency to another. Switches can be between high and low RR
5526 * supported by the panel or to any other RR based on media playback (in
5527 * this case, RR value needs to be passed from user space).
5528 *
5529 * The caller of this function needs to take a lock on dev_priv->drrs.
5530 */
96178eeb 5531static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5532{
5533 struct drm_i915_private *dev_priv = dev->dev_private;
5534 struct intel_encoder *encoder;
96178eeb
VK
5535 struct intel_digital_port *dig_port = NULL;
5536 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5537 struct intel_crtc_state *config = NULL;
439d7ac0 5538 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5539 u32 reg, val;
96178eeb 5540 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5541
5542 if (refresh_rate <= 0) {
5543 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5544 return;
5545 }
5546
96178eeb
VK
5547 if (intel_dp == NULL) {
5548 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5549 return;
5550 }
5551
1fcc9d1c 5552 /*
e4d59f6b
RV
5553 * FIXME: This needs proper synchronization with psr state for some
5554 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5555 */
439d7ac0 5556
96178eeb
VK
5557 dig_port = dp_to_dig_port(intel_dp);
5558 encoder = &dig_port->base;
723f9aab 5559 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5560
5561 if (!intel_crtc) {
5562 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5563 return;
5564 }
5565
6e3c9717 5566 config = intel_crtc->config;
439d7ac0 5567
96178eeb 5568 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5569 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5570 return;
5571 }
5572
96178eeb
VK
5573 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5574 refresh_rate)
439d7ac0
PB
5575 index = DRRS_LOW_RR;
5576
96178eeb 5577 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5578 DRM_DEBUG_KMS(
5579 "DRRS requested for previously set RR...ignoring\n");
5580 return;
5581 }
5582
5583 if (!intel_crtc->active) {
5584 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5585 return;
5586 }
5587
44395bfe 5588 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5589 switch (index) {
5590 case DRRS_HIGH_RR:
5591 intel_dp_set_m_n(intel_crtc, M1_N1);
5592 break;
5593 case DRRS_LOW_RR:
5594 intel_dp_set_m_n(intel_crtc, M2_N2);
5595 break;
5596 case DRRS_MAX_RR:
5597 default:
5598 DRM_ERROR("Unsupported refreshrate type\n");
5599 }
5600 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5601 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5602 val = I915_READ(reg);
a4c30b1d 5603
439d7ac0 5604 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5605 if (IS_VALLEYVIEW(dev))
5606 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5607 else
5608 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5609 } else {
6fa7aec1
VK
5610 if (IS_VALLEYVIEW(dev))
5611 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5612 else
5613 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5614 }
5615 I915_WRITE(reg, val);
5616 }
5617
4e9ac947
VK
5618 dev_priv->drrs.refresh_rate_type = index;
5619
5620 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5621}
5622
b33a2815
VK
5623/**
5624 * intel_edp_drrs_enable - init drrs struct if supported
5625 * @intel_dp: DP struct
5626 *
5627 * Initializes frontbuffer_bits and drrs.dp
5628 */
c395578e
VK
5629void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5630{
5631 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5632 struct drm_i915_private *dev_priv = dev->dev_private;
5633 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5634 struct drm_crtc *crtc = dig_port->base.base.crtc;
5635 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5636
5637 if (!intel_crtc->config->has_drrs) {
5638 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5639 return;
5640 }
5641
5642 mutex_lock(&dev_priv->drrs.mutex);
5643 if (WARN_ON(dev_priv->drrs.dp)) {
5644 DRM_ERROR("DRRS already enabled\n");
5645 goto unlock;
5646 }
5647
5648 dev_priv->drrs.busy_frontbuffer_bits = 0;
5649
5650 dev_priv->drrs.dp = intel_dp;
5651
5652unlock:
5653 mutex_unlock(&dev_priv->drrs.mutex);
5654}
5655
b33a2815
VK
5656/**
5657 * intel_edp_drrs_disable - Disable DRRS
5658 * @intel_dp: DP struct
5659 *
5660 */
c395578e
VK
5661void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5662{
5663 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5664 struct drm_i915_private *dev_priv = dev->dev_private;
5665 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5666 struct drm_crtc *crtc = dig_port->base.base.crtc;
5667 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5668
5669 if (!intel_crtc->config->has_drrs)
5670 return;
5671
5672 mutex_lock(&dev_priv->drrs.mutex);
5673 if (!dev_priv->drrs.dp) {
5674 mutex_unlock(&dev_priv->drrs.mutex);
5675 return;
5676 }
5677
5678 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5679 intel_dp_set_drrs_state(dev_priv->dev,
5680 intel_dp->attached_connector->panel.
5681 fixed_mode->vrefresh);
5682
5683 dev_priv->drrs.dp = NULL;
5684 mutex_unlock(&dev_priv->drrs.mutex);
5685
5686 cancel_delayed_work_sync(&dev_priv->drrs.work);
5687}
5688
4e9ac947
VK
5689static void intel_edp_drrs_downclock_work(struct work_struct *work)
5690{
5691 struct drm_i915_private *dev_priv =
5692 container_of(work, typeof(*dev_priv), drrs.work.work);
5693 struct intel_dp *intel_dp;
5694
5695 mutex_lock(&dev_priv->drrs.mutex);
5696
5697 intel_dp = dev_priv->drrs.dp;
5698
5699 if (!intel_dp)
5700 goto unlock;
5701
439d7ac0 5702 /*
4e9ac947
VK
5703 * The delayed work can race with an invalidate hence we need to
5704 * recheck.
439d7ac0
PB
5705 */
5706
4e9ac947
VK
5707 if (dev_priv->drrs.busy_frontbuffer_bits)
5708 goto unlock;
439d7ac0 5709
4e9ac947
VK
5710 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5711 intel_dp_set_drrs_state(dev_priv->dev,
5712 intel_dp->attached_connector->panel.
5713 downclock_mode->vrefresh);
439d7ac0 5714
4e9ac947 5715unlock:
4e9ac947 5716 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5717}
5718
b33a2815 5719/**
0ddfd203 5720 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5721 * @dev: DRM device
5722 * @frontbuffer_bits: frontbuffer plane tracking bits
5723 *
0ddfd203
R
5724 * This function gets called everytime rendering on the given planes start.
5725 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5726 *
5727 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5728 */
a93fad0f
VK
5729void intel_edp_drrs_invalidate(struct drm_device *dev,
5730 unsigned frontbuffer_bits)
5731{
5732 struct drm_i915_private *dev_priv = dev->dev_private;
5733 struct drm_crtc *crtc;
5734 enum pipe pipe;
5735
9da7d693 5736 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5737 return;
5738
88f933a8 5739 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5740
a93fad0f 5741 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5742 if (!dev_priv->drrs.dp) {
5743 mutex_unlock(&dev_priv->drrs.mutex);
5744 return;
5745 }
5746
a93fad0f
VK
5747 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5748 pipe = to_intel_crtc(crtc)->pipe;
5749
c1d038c6
DV
5750 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5751 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5752
0ddfd203 5753 /* invalidate means busy screen hence upclock */
c1d038c6 5754 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5755 intel_dp_set_drrs_state(dev_priv->dev,
5756 dev_priv->drrs.dp->attached_connector->panel.
5757 fixed_mode->vrefresh);
a93fad0f 5758
a93fad0f
VK
5759 mutex_unlock(&dev_priv->drrs.mutex);
5760}
5761
b33a2815 5762/**
0ddfd203 5763 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5764 * @dev: DRM device
5765 * @frontbuffer_bits: frontbuffer plane tracking bits
5766 *
0ddfd203
R
5767 * This function gets called every time rendering on the given planes has
5768 * completed or flip on a crtc is completed. So DRRS should be upclocked
5769 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5770 * if no other planes are dirty.
b33a2815
VK
5771 *
5772 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5773 */
a93fad0f
VK
5774void intel_edp_drrs_flush(struct drm_device *dev,
5775 unsigned frontbuffer_bits)
5776{
5777 struct drm_i915_private *dev_priv = dev->dev_private;
5778 struct drm_crtc *crtc;
5779 enum pipe pipe;
5780
9da7d693 5781 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5782 return;
5783
88f933a8 5784 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5785
a93fad0f 5786 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5787 if (!dev_priv->drrs.dp) {
5788 mutex_unlock(&dev_priv->drrs.mutex);
5789 return;
5790 }
5791
a93fad0f
VK
5792 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5793 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5794
5795 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5796 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5797
0ddfd203 5798 /* flush means busy screen hence upclock */
c1d038c6 5799 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5800 intel_dp_set_drrs_state(dev_priv->dev,
5801 dev_priv->drrs.dp->attached_connector->panel.
5802 fixed_mode->vrefresh);
5803
5804 /*
5805 * flush also means no more activity hence schedule downclock, if all
5806 * other fbs are quiescent too
5807 */
5808 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5809 schedule_delayed_work(&dev_priv->drrs.work,
5810 msecs_to_jiffies(1000));
5811 mutex_unlock(&dev_priv->drrs.mutex);
5812}
5813
b33a2815
VK
5814/**
5815 * DOC: Display Refresh Rate Switching (DRRS)
5816 *
5817 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5818 * which enables swtching between low and high refresh rates,
5819 * dynamically, based on the usage scenario. This feature is applicable
5820 * for internal panels.
5821 *
5822 * Indication that the panel supports DRRS is given by the panel EDID, which
5823 * would list multiple refresh rates for one resolution.
5824 *
5825 * DRRS is of 2 types - static and seamless.
5826 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5827 * (may appear as a blink on screen) and is used in dock-undock scenario.
5828 * Seamless DRRS involves changing RR without any visual effect to the user
5829 * and can be used during normal system usage. This is done by programming
5830 * certain registers.
5831 *
5832 * Support for static/seamless DRRS may be indicated in the VBT based on
5833 * inputs from the panel spec.
5834 *
5835 * DRRS saves power by switching to low RR based on usage scenarios.
5836 *
5837 * eDP DRRS:-
5838 * The implementation is based on frontbuffer tracking implementation.
5839 * When there is a disturbance on the screen triggered by user activity or a
5840 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5841 * When there is no movement on screen, after a timeout of 1 second, a switch
5842 * to low RR is made.
5843 * For integration with frontbuffer tracking code,
5844 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5845 *
5846 * DRRS can be further extended to support other internal panels and also
5847 * the scenario of video playback wherein RR is set based on the rate
5848 * requested by userspace.
5849 */
5850
5851/**
5852 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5853 * @intel_connector: eDP connector
5854 * @fixed_mode: preferred mode of panel
5855 *
5856 * This function is called only once at driver load to initialize basic
5857 * DRRS stuff.
5858 *
5859 * Returns:
5860 * Downclock mode if panel supports it, else return NULL.
5861 * DRRS support is determined by the presence of downclock mode (apart
5862 * from VBT setting).
5863 */
4f9db5b5 5864static struct drm_display_mode *
96178eeb
VK
5865intel_dp_drrs_init(struct intel_connector *intel_connector,
5866 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5867{
5868 struct drm_connector *connector = &intel_connector->base;
96178eeb 5869 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5870 struct drm_i915_private *dev_priv = dev->dev_private;
5871 struct drm_display_mode *downclock_mode = NULL;
5872
9da7d693
DV
5873 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5874 mutex_init(&dev_priv->drrs.mutex);
5875
4f9db5b5
PB
5876 if (INTEL_INFO(dev)->gen <= 6) {
5877 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5878 return NULL;
5879 }
5880
5881 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5882 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5883 return NULL;
5884 }
5885
5886 downclock_mode = intel_find_panel_downclock
5887 (dev, fixed_mode, connector);
5888
5889 if (!downclock_mode) {
a1d26342 5890 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5891 return NULL;
5892 }
5893
96178eeb 5894 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5895
96178eeb 5896 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5897 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5898 return downclock_mode;
5899}
5900
ed92f0b2 5901static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5902 struct intel_connector *intel_connector)
ed92f0b2
PZ
5903{
5904 struct drm_connector *connector = &intel_connector->base;
5905 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5906 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5907 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5908 struct drm_i915_private *dev_priv = dev->dev_private;
5909 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5910 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5911 bool has_dpcd;
5912 struct drm_display_mode *scan;
5913 struct edid *edid;
6517d273 5914 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5915
5916 if (!is_edp(intel_dp))
5917 return true;
5918
49e6bc51
VS
5919 pps_lock(intel_dp);
5920 intel_edp_panel_vdd_sanitize(intel_dp);
5921 pps_unlock(intel_dp);
63635217 5922
ed92f0b2 5923 /* Cache DPCD and EDID for edp. */
ed92f0b2 5924 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5925
5926 if (has_dpcd) {
5927 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5928 dev_priv->no_aux_handshake =
5929 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5930 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5931 } else {
5932 /* if this fails, presume the device is a ghost */
5933 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5934 return false;
5935 }
5936
5937 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5938 pps_lock(intel_dp);
36b5f425 5939 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5940 pps_unlock(intel_dp);
ed92f0b2 5941
060c8778 5942 mutex_lock(&dev->mode_config.mutex);
0b99836f 5943 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5944 if (edid) {
5945 if (drm_add_edid_modes(connector, edid)) {
5946 drm_mode_connector_update_edid_property(connector,
5947 edid);
5948 drm_edid_to_eld(connector, edid);
5949 } else {
5950 kfree(edid);
5951 edid = ERR_PTR(-EINVAL);
5952 }
5953 } else {
5954 edid = ERR_PTR(-ENOENT);
5955 }
5956 intel_connector->edid = edid;
5957
5958 /* prefer fixed mode from EDID if available */
5959 list_for_each_entry(scan, &connector->probed_modes, head) {
5960 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5961 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5962 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5963 intel_connector, fixed_mode);
ed92f0b2
PZ
5964 break;
5965 }
5966 }
5967
5968 /* fallback to VBT if available for eDP */
5969 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5970 fixed_mode = drm_mode_duplicate(dev,
5971 dev_priv->vbt.lfp_lvds_vbt_mode);
5972 if (fixed_mode)
5973 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5974 }
060c8778 5975 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5976
01527b31
CT
5977 if (IS_VALLEYVIEW(dev)) {
5978 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5979 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5980
5981 /*
5982 * Figure out the current pipe for the initial backlight setup.
5983 * If the current pipe isn't valid, try the PPS pipe, and if that
5984 * fails just assume pipe A.
5985 */
5986 if (IS_CHERRYVIEW(dev))
5987 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5988 else
5989 pipe = PORT_TO_PIPE(intel_dp->DP);
5990
5991 if (pipe != PIPE_A && pipe != PIPE_B)
5992 pipe = intel_dp->pps_pipe;
5993
5994 if (pipe != PIPE_A && pipe != PIPE_B)
5995 pipe = PIPE_A;
5996
5997 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5998 pipe_name(pipe));
01527b31
CT
5999 }
6000
4f9db5b5 6001 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 6002 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 6003 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
6004
6005 return true;
6006}
6007
16c25533 6008bool
f0fec3f2
PZ
6009intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6010 struct intel_connector *intel_connector)
a4fc5ed6 6011{
f0fec3f2
PZ
6012 struct drm_connector *connector = &intel_connector->base;
6013 struct intel_dp *intel_dp = &intel_dig_port->dp;
6014 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6015 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 6016 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 6017 enum port port = intel_dig_port->port;
0b99836f 6018 int type;
a4fc5ed6 6019
a4a5d2f8
VS
6020 intel_dp->pps_pipe = INVALID_PIPE;
6021
ec5b01dd 6022 /* intel_dp vfuncs */
b6b5e383
DL
6023 if (INTEL_INFO(dev)->gen >= 9)
6024 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6025 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
6026 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6027 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6028 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6029 else if (HAS_PCH_SPLIT(dev))
6030 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6031 else
6032 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6033
b9ca5fad
DL
6034 if (INTEL_INFO(dev)->gen >= 9)
6035 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6036 else
6037 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 6038
0767935e
DV
6039 /* Preserve the current hw state. */
6040 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 6041 intel_dp->attached_connector = intel_connector;
3d3dc149 6042
3b32a35b 6043 if (intel_dp_is_edp(dev, port))
b329530c 6044 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
6045 else
6046 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 6047
f7d24902
ID
6048 /*
6049 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6050 * for DP the encoder type can be set by the caller to
6051 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6052 */
6053 if (type == DRM_MODE_CONNECTOR_eDP)
6054 intel_encoder->type = INTEL_OUTPUT_EDP;
6055
c17ed5b5
VS
6056 /* eDP only on port B and/or C on vlv/chv */
6057 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6058 port != PORT_B && port != PORT_C))
6059 return false;
6060
e7281eab
ID
6061 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6062 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6063 port_name(port));
6064
b329530c 6065 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
6066 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6067
a4fc5ed6
KP
6068 connector->interlace_allowed = true;
6069 connector->doublescan_allowed = 0;
6070
f0fec3f2 6071 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 6072 edp_panel_vdd_work);
a4fc5ed6 6073
df0e9248 6074 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 6075 drm_connector_register(connector);
a4fc5ed6 6076
affa9354 6077 if (HAS_DDI(dev))
bcbc889b
PZ
6078 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6079 else
6080 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 6081 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 6082
0b99836f 6083 /* Set up the hotplug pin. */
ab9d7c30
PZ
6084 switch (port) {
6085 case PORT_A:
1d843f9d 6086 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
6087 break;
6088 case PORT_B:
1d843f9d 6089 intel_encoder->hpd_pin = HPD_PORT_B;
cf1d5883
SJ
6090 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6091 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
6092 break;
6093 case PORT_C:
1d843f9d 6094 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
6095 break;
6096 case PORT_D:
1d843f9d 6097 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
6098 break;
6099 default:
ad1c0b19 6100 BUG();
5eb08b69
ZW
6101 }
6102
dada1a9f 6103 if (is_edp(intel_dp)) {
773538e8 6104 pps_lock(intel_dp);
1e74a324
VS
6105 intel_dp_init_panel_power_timestamps(intel_dp);
6106 if (IS_VALLEYVIEW(dev))
a4a5d2f8 6107 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 6108 else
36b5f425 6109 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 6110 pps_unlock(intel_dp);
dada1a9f 6111 }
0095e6dc 6112
9d1a1031 6113 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 6114
0e32b39c 6115 /* init MST on ports that can support it */
0c9b3715
JN
6116 if (HAS_DP_MST(dev) &&
6117 (port == PORT_B || port == PORT_C || port == PORT_D))
6118 intel_dp_mst_encoder_init(intel_dig_port,
6119 intel_connector->base.base.id);
0e32b39c 6120
36b5f425 6121 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 6122 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
6123 if (is_edp(intel_dp)) {
6124 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
6125 /*
6126 * vdd might still be enabled do to the delayed vdd off.
6127 * Make sure vdd is actually turned off here.
6128 */
773538e8 6129 pps_lock(intel_dp);
4be73780 6130 edp_panel_vdd_off_sync(intel_dp);
773538e8 6131 pps_unlock(intel_dp);
15b1d171 6132 }
34ea3d38 6133 drm_connector_unregister(connector);
b2f246a8 6134 drm_connector_cleanup(connector);
16c25533 6135 return false;
b2f246a8 6136 }
32f9d658 6137
f684960e
CW
6138 intel_dp_add_properties(intel_dp, connector);
6139
a4fc5ed6
KP
6140 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6141 * 0xd. Failure to do so will result in spurious interrupts being
6142 * generated on the port when a cable is not attached.
6143 */
6144 if (IS_G4X(dev) && !IS_GM45(dev)) {
6145 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6146 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6147 }
16c25533 6148
aa7471d2
JN
6149 i915_debugfs_connector_add(connector);
6150
16c25533 6151 return true;
a4fc5ed6 6152}
f0fec3f2
PZ
6153
6154void
6155intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6156{
13cf5504 6157 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6158 struct intel_digital_port *intel_dig_port;
6159 struct intel_encoder *intel_encoder;
6160 struct drm_encoder *encoder;
6161 struct intel_connector *intel_connector;
6162
b14c5679 6163 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6164 if (!intel_dig_port)
6165 return;
6166
08d9bc92 6167 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
6168 if (!intel_connector) {
6169 kfree(intel_dig_port);
6170 return;
6171 }
6172
6173 intel_encoder = &intel_dig_port->base;
6174 encoder = &intel_encoder->base;
6175
6176 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6177 DRM_MODE_ENCODER_TMDS);
6178
5bfe2ac0 6179 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6180 intel_encoder->disable = intel_disable_dp;
00c09d70 6181 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6182 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6183 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6184 if (IS_CHERRYVIEW(dev)) {
9197c88b 6185 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6186 intel_encoder->pre_enable = chv_pre_enable_dp;
6187 intel_encoder->enable = vlv_enable_dp;
580d3811 6188 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6189 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6190 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6191 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6192 intel_encoder->pre_enable = vlv_pre_enable_dp;
6193 intel_encoder->enable = vlv_enable_dp;
49277c31 6194 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6195 } else {
ecff4f3b
JN
6196 intel_encoder->pre_enable = g4x_pre_enable_dp;
6197 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6198 if (INTEL_INFO(dev)->gen >= 5)
6199 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6200 }
f0fec3f2 6201
174edf1f 6202 intel_dig_port->port = port;
f0fec3f2
PZ
6203 intel_dig_port->dp.output_reg = output_reg;
6204
00c09d70 6205 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6206 if (IS_CHERRYVIEW(dev)) {
6207 if (port == PORT_D)
6208 intel_encoder->crtc_mask = 1 << 2;
6209 else
6210 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6211 } else {
6212 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6213 }
bc079e8b 6214 intel_encoder->cloneable = 0;
f0fec3f2 6215
13cf5504 6216 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6217 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6218
15b1d171
PZ
6219 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6220 drm_encoder_cleanup(encoder);
6221 kfree(intel_dig_port);
b2f246a8 6222 kfree(intel_connector);
15b1d171 6223 }
f0fec3f2 6224}
0e32b39c
DA
6225
6226void intel_dp_mst_suspend(struct drm_device *dev)
6227{
6228 struct drm_i915_private *dev_priv = dev->dev_private;
6229 int i;
6230
6231 /* disable MST */
6232 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6233 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6234 if (!intel_dig_port)
6235 continue;
6236
6237 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6238 if (!intel_dig_port->dp.can_mst)
6239 continue;
6240 if (intel_dig_port->dp.is_mst)
6241 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6242 }
6243 }
6244}
6245
6246void intel_dp_mst_resume(struct drm_device *dev)
6247{
6248 struct drm_i915_private *dev_priv = dev->dev_private;
6249 int i;
6250
6251 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6252 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6253 if (!intel_dig_port)
6254 continue;
6255 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6256 int ret;
6257
6258 if (!intel_dig_port->dp.can_mst)
6259 continue;
6260
6261 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6262 if (ret != 0) {
6263 intel_dp_check_mst_status(&intel_dig_port->dp);
6264 }
6265 }
6266 }
6267}