drm/i915/bxt: Update the Broxton PCI ids
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf
CML
50struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5
CML
69static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63
SJ
93
94static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15 95 324000, 432000, 540000 };
fe51bfb9
VS
96static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
f4896f15 99static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 100
cfcb0fc9
JB
101/**
102 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103 * @intel_dp: DP struct
104 *
105 * If a CPU or PCH DP output is attached to an eDP panel, this function
106 * will return true, and false otherwise.
107 */
108static bool is_edp(struct intel_dp *intel_dp)
109{
da63a9f2
PZ
110 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111
112 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
113}
114
68b4d824 115static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 116{
68b4d824
ID
117 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118
119 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
120}
121
df0e9248
CW
122static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123{
fa90ecef 124 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
125}
126
ea5b213a 127static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 128static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 129static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 130static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
131static void vlv_steal_power_sequencer(struct drm_device *dev,
132 enum pipe pipe);
a4fc5ed6 133
ed4e9c1d
VS
134static int
135intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 136{
7183dc29 137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
138
139 switch (max_link_bw) {
140 case DP_LINK_BW_1_62:
141 case DP_LINK_BW_2_7:
1db10e28 142 case DP_LINK_BW_5_4:
d4eead50 143 break;
a4fc5ed6 144 default:
d4eead50
ID
145 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 max_link_bw);
a4fc5ed6
KP
147 max_link_bw = DP_LINK_BW_1_62;
148 break;
149 }
150 return max_link_bw;
151}
152
eeb6324d
PZ
153static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154{
155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 struct drm_device *dev = intel_dig_port->base.base.dev;
157 u8 source_max, sink_max;
158
159 source_max = 4;
160 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162 source_max = 2;
163
164 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165
166 return min(source_max, sink_max);
167}
168
cd9dde44
AJ
169/*
170 * The units on the numbers in the next two are... bizarre. Examples will
171 * make it clearer; this one parallels an example in the eDP spec.
172 *
173 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 *
175 * 270000 * 1 * 8 / 10 == 216000
176 *
177 * The actual data capacity of that configuration is 2.16Gbit/s, so the
178 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
179 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180 * 119000. At 18bpp that's 2142000 kilobits per second.
181 *
182 * Thus the strange-looking division by 10 in intel_dp_link_required, to
183 * get the result in decakilobits instead of kilobits.
184 */
185
a4fc5ed6 186static int
c898261c 187intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 188{
cd9dde44 189 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
190}
191
fe27d53e
DA
192static int
193intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194{
195 return (max_link_clock * max_lanes * 8) / 10;
196}
197
c19de8eb 198static enum drm_mode_status
a4fc5ed6
KP
199intel_dp_mode_valid(struct drm_connector *connector,
200 struct drm_display_mode *mode)
201{
df0e9248 202 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
203 struct intel_connector *intel_connector = to_intel_connector(connector);
204 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
205 int target_clock = mode->clock;
206 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 207
dd06f90e
JN
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
210 return MODE_PANEL;
211
dd06f90e 212 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 213 return MODE_PANEL;
03afc4a2
DV
214
215 target_clock = fixed_mode->clock;
7de56f43
ZY
216 }
217
50fec21a 218 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 219 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
224 if (mode_rate > max_rate)
c4867936 225 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
0af78a2b
DV
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
a4fc5ed6
KP
233 return MODE_OK;
234}
235
a4f1289e 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
c2af70e2 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
fb0f8fbf
KP
257/* hrawclock is 1/4 the FSB frequency */
258static int
259intel_hrawclk(struct drm_device *dev)
260{
261 struct drm_i915_private *dev_priv = dev->dev_private;
262 uint32_t clkcfg;
263
9473c8f4
VP
264 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265 if (IS_VALLEYVIEW(dev))
266 return 200;
267
fb0f8fbf
KP
268 clkcfg = I915_READ(CLKCFG);
269 switch (clkcfg & CLKCFG_FSB_MASK) {
270 case CLKCFG_FSB_400:
271 return 100;
272 case CLKCFG_FSB_533:
273 return 133;
274 case CLKCFG_FSB_667:
275 return 166;
276 case CLKCFG_FSB_800:
277 return 200;
278 case CLKCFG_FSB_1067:
279 return 266;
280 case CLKCFG_FSB_1333:
281 return 333;
282 /* these two are just a guess; one of them might be right */
283 case CLKCFG_FSB_1600:
284 case CLKCFG_FSB_1600_ALT:
285 return 400;
286 default:
287 return 133;
288 }
289}
290
bf13e81b
JN
291static void
292intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 293 struct intel_dp *intel_dp);
bf13e81b
JN
294static void
295intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 296 struct intel_dp *intel_dp);
bf13e81b 297
773538e8
VS
298static void pps_lock(struct intel_dp *intel_dp)
299{
300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 struct intel_encoder *encoder = &intel_dig_port->base;
302 struct drm_device *dev = encoder->base.dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 enum intel_display_power_domain power_domain;
305
306 /*
307 * See vlv_power_sequencer_reset() why we need
308 * a power domain reference here.
309 */
310 power_domain = intel_display_port_power_domain(encoder);
311 intel_display_power_get(dev_priv, power_domain);
312
313 mutex_lock(&dev_priv->pps_mutex);
314}
315
316static void pps_unlock(struct intel_dp *intel_dp)
317{
318 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319 struct intel_encoder *encoder = &intel_dig_port->base;
320 struct drm_device *dev = encoder->base.dev;
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 enum intel_display_power_domain power_domain;
323
324 mutex_unlock(&dev_priv->pps_mutex);
325
326 power_domain = intel_display_port_power_domain(encoder);
327 intel_display_power_put(dev_priv, power_domain);
328}
329
961a0db0
VS
330static void
331vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332{
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_device *dev = intel_dig_port->base.base.dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 337 bool pll_enabled;
961a0db0
VS
338 uint32_t DP;
339
340 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342 pipe_name(pipe), port_name(intel_dig_port->port)))
343 return;
344
345 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346 pipe_name(pipe), port_name(intel_dig_port->port));
347
348 /* Preserve the BIOS-computed detected bit. This is
349 * supposed to be read-only.
350 */
351 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353 DP |= DP_PORT_WIDTH(1);
354 DP |= DP_LINK_TRAIN_PAT_1;
355
356 if (IS_CHERRYVIEW(dev))
357 DP |= DP_PIPE_SELECT_CHV(pipe);
358 else if (pipe == PIPE_B)
359 DP |= DP_PIPEB_SELECT;
360
d288f65f
VS
361 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362
363 /*
364 * The DPLL for the pipe must be enabled for this to work.
365 * So enable temporarily it if it's not already enabled.
366 */
367 if (!pll_enabled)
368 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370
961a0db0
VS
371 /*
372 * Similar magic as in intel_dp_enable_port().
373 * We _must_ do this port enable + disable trick
374 * to make this power seqeuencer lock onto the port.
375 * Otherwise even VDD force bit won't work.
376 */
377 I915_WRITE(intel_dp->output_reg, DP);
378 POSTING_READ(intel_dp->output_reg);
379
380 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
382
383 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
385
386 if (!pll_enabled)
387 vlv_force_pll_off(dev, pipe);
961a0db0
VS
388}
389
bf13e81b
JN
390static enum pipe
391vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392{
393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
394 struct drm_device *dev = intel_dig_port->base.base.dev;
395 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
396 struct intel_encoder *encoder;
397 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 398 enum pipe pipe;
bf13e81b 399
e39b999a 400 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 401
a8c3344e
VS
402 /* We should never land here with regular DP ports */
403 WARN_ON(!is_edp(intel_dp));
404
a4a5d2f8
VS
405 if (intel_dp->pps_pipe != INVALID_PIPE)
406 return intel_dp->pps_pipe;
407
408 /*
409 * We don't have power sequencer currently.
410 * Pick one that's not used by other ports.
411 */
412 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413 base.head) {
414 struct intel_dp *tmp;
415
416 if (encoder->type != INTEL_OUTPUT_EDP)
417 continue;
418
419 tmp = enc_to_intel_dp(&encoder->base);
420
421 if (tmp->pps_pipe != INVALID_PIPE)
422 pipes &= ~(1 << tmp->pps_pipe);
423 }
424
425 /*
426 * Didn't find one. This should not happen since there
427 * are two power sequencers and up to two eDP ports.
428 */
429 if (WARN_ON(pipes == 0))
a8c3344e
VS
430 pipe = PIPE_A;
431 else
432 pipe = ffs(pipes) - 1;
a4a5d2f8 433
a8c3344e
VS
434 vlv_steal_power_sequencer(dev, pipe);
435 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
436
437 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438 pipe_name(intel_dp->pps_pipe),
439 port_name(intel_dig_port->port));
440
441 /* init power sequencer on this pipe and port */
36b5f425
VS
442 intel_dp_init_panel_power_sequencer(dev, intel_dp);
443 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 444
961a0db0
VS
445 /*
446 * Even vdd force doesn't work until we've made
447 * the power sequencer lock in on the port.
448 */
449 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
450
451 return intel_dp->pps_pipe;
452}
453
6491ab27
VS
454typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
455 enum pipe pipe);
456
457static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461}
462
463static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467}
468
469static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 enum pipe pipe)
471{
472 return true;
473}
bf13e81b 474
a4a5d2f8 475static enum pipe
6491ab27
VS
476vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477 enum port port,
478 vlv_pipe_check pipe_check)
a4a5d2f8
VS
479{
480 enum pipe pipe;
bf13e81b 481
bf13e81b
JN
482 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
485
486 if (port_sel != PANEL_PORT_SELECT_VLV(port))
487 continue;
488
6491ab27
VS
489 if (!pipe_check(dev_priv, pipe))
490 continue;
491
a4a5d2f8 492 return pipe;
bf13e81b
JN
493 }
494
a4a5d2f8
VS
495 return INVALID_PIPE;
496}
497
498static void
499vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500{
501 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502 struct drm_device *dev = intel_dig_port->base.base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
504 enum port port = intel_dig_port->port;
505
506 lockdep_assert_held(&dev_priv->pps_mutex);
507
508 /* try to find a pipe with this port selected */
6491ab27
VS
509 /* first pick one where the panel is on */
510 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511 vlv_pipe_has_pp_on);
512 /* didn't find one? pick one where vdd is on */
513 if (intel_dp->pps_pipe == INVALID_PIPE)
514 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 vlv_pipe_has_vdd_on);
516 /* didn't find one? pick one with just the correct port */
517 if (intel_dp->pps_pipe == INVALID_PIPE)
518 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519 vlv_pipe_any);
a4a5d2f8
VS
520
521 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522 if (intel_dp->pps_pipe == INVALID_PIPE) {
523 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
524 port_name(port));
525 return;
bf13e81b
JN
526 }
527
a4a5d2f8
VS
528 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529 port_name(port), pipe_name(intel_dp->pps_pipe));
530
36b5f425
VS
531 intel_dp_init_panel_power_sequencer(dev, intel_dp);
532 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
533}
534
773538e8
VS
535void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536{
537 struct drm_device *dev = dev_priv->dev;
538 struct intel_encoder *encoder;
539
540 if (WARN_ON(!IS_VALLEYVIEW(dev)))
541 return;
542
543 /*
544 * We can't grab pps_mutex here due to deadlock with power_domain
545 * mutex when power_domain functions are called while holding pps_mutex.
546 * That also means that in order to use pps_pipe the code needs to
547 * hold both a power domain reference and pps_mutex, and the power domain
548 * reference get/put must be done while _not_ holding pps_mutex.
549 * pps_{lock,unlock}() do these steps in the correct order, so one
550 * should use them always.
551 */
552
553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554 struct intel_dp *intel_dp;
555
556 if (encoder->type != INTEL_OUTPUT_EDP)
557 continue;
558
559 intel_dp = enc_to_intel_dp(&encoder->base);
560 intel_dp->pps_pipe = INVALID_PIPE;
561 }
bf13e81b
JN
562}
563
564static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565{
566 struct drm_device *dev = intel_dp_to_dev(intel_dp);
567
568 if (HAS_PCH_SPLIT(dev))
569 return PCH_PP_CONTROL;
570 else
571 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
572}
573
574static u32 _pp_stat_reg(struct intel_dp *intel_dp)
575{
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577
578 if (HAS_PCH_SPLIT(dev))
579 return PCH_PP_STATUS;
580 else
581 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
582}
583
01527b31
CT
584/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
585 This function only applicable when panel PM state is not to be tracked */
586static int edp_notify_handler(struct notifier_block *this, unsigned long code,
587 void *unused)
588{
589 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
590 edp_notifier);
591 struct drm_device *dev = intel_dp_to_dev(intel_dp);
592 struct drm_i915_private *dev_priv = dev->dev_private;
593 u32 pp_div;
594 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
595
596 if (!is_edp(intel_dp) || code != SYS_RESTART)
597 return 0;
598
773538e8 599 pps_lock(intel_dp);
e39b999a 600
01527b31 601 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603
01527b31
CT
604 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
605 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
606 pp_div = I915_READ(pp_div_reg);
607 pp_div &= PP_REFERENCE_DIVIDER_MASK;
608
609 /* 0x1F write to PP_DIV_REG sets max cycle delay */
610 I915_WRITE(pp_div_reg, pp_div | 0x1F);
611 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
612 msleep(intel_dp->panel_power_cycle_delay);
613 }
614
773538e8 615 pps_unlock(intel_dp);
e39b999a 616
01527b31
CT
617 return 0;
618}
619
4be73780 620static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 621{
30add22d 622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
623 struct drm_i915_private *dev_priv = dev->dev_private;
624
e39b999a
VS
625 lockdep_assert_held(&dev_priv->pps_mutex);
626
9a42356b
VS
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
bf13e81b 631 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
632}
633
4be73780 634static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 635{
30add22d 636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
637 struct drm_i915_private *dev_priv = dev->dev_private;
638
e39b999a
VS
639 lockdep_assert_held(&dev_priv->pps_mutex);
640
9a42356b
VS
641 if (IS_VALLEYVIEW(dev) &&
642 intel_dp->pps_pipe == INVALID_PIPE)
643 return false;
644
773538e8 645 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
646}
647
9b984dae
KP
648static void
649intel_dp_check_edp(struct intel_dp *intel_dp)
650{
30add22d 651 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 652 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 653
9b984dae
KP
654 if (!is_edp(intel_dp))
655 return;
453c5420 656
4be73780 657 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
658 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
660 I915_READ(_pp_stat_reg(intel_dp)),
661 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
662 }
663}
664
9ee32fea
DV
665static uint32_t
666intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
667{
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 struct drm_device *dev = intel_dig_port->base.base.dev;
670 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 671 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
672 uint32_t status;
673 bool done;
674
ef04f00d 675#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 676 if (has_aux_irq)
b18ac466 677 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 678 msecs_to_jiffies_timeout(10));
9ee32fea
DV
679 else
680 done = wait_for_atomic(C, 10) == 0;
681 if (!done)
682 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 has_aux_irq);
684#undef C
685
686 return status;
687}
688
ec5b01dd 689static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 690{
174edf1f
PZ
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 693
ec5b01dd
DL
694 /*
695 * The clock divider is based off the hrawclk, and would like to run at
696 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 697 */
ec5b01dd
DL
698 return index ? 0 : intel_hrawclk(dev) / 2;
699}
700
701static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
702{
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 705 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
706
707 if (index)
708 return 0;
709
710 if (intel_dig_port->port == PORT_A) {
469d4b2a 711 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
ec5b01dd
DL
712 } else {
713 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
714 }
715}
716
717static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
718{
719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720 struct drm_device *dev = intel_dig_port->base.base.dev;
721 struct drm_i915_private *dev_priv = dev->dev_private;
722
723 if (intel_dig_port->port == PORT_A) {
724 if (index)
725 return 0;
1652d19e 726 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
2c55c336
JN
727 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728 /* Workaround for non-ULT HSW */
bc86625a
CW
729 switch (index) {
730 case 0: return 63;
731 case 1: return 72;
732 default: return 0;
733 }
ec5b01dd 734 } else {
bc86625a 735 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 736 }
b84a1cf8
RV
737}
738
ec5b01dd
DL
739static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740{
741 return index ? 0 : 100;
742}
743
b6b5e383
DL
744static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745{
746 /*
747 * SKL doesn't need us to program the AUX clock divider (Hardware will
748 * derive the clock from CDCLK automatically). We still implement the
749 * get_aux_clock_divider vfunc to plug-in into the existing code.
750 */
751 return index ? 0 : 1;
752}
753
5ed12a19
DL
754static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
755 bool has_aux_irq,
756 int send_bytes,
757 uint32_t aux_clock_divider)
758{
759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760 struct drm_device *dev = intel_dig_port->base.base.dev;
761 uint32_t precharge, timeout;
762
763 if (IS_GEN6(dev))
764 precharge = 3;
765 else
766 precharge = 5;
767
768 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
770 else
771 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
772
773 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 774 DP_AUX_CH_CTL_DONE |
5ed12a19 775 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 776 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 777 timeout |
788d4433 778 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
779 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 781 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
782}
783
b9ca5fad
DL
784static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785 bool has_aux_irq,
786 int send_bytes,
787 uint32_t unused)
788{
789 return DP_AUX_CH_CTL_SEND_BUSY |
790 DP_AUX_CH_CTL_DONE |
791 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792 DP_AUX_CH_CTL_TIME_OUT_ERROR |
793 DP_AUX_CH_CTL_TIME_OUT_1600us |
794 DP_AUX_CH_CTL_RECEIVE_ERROR |
795 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
797}
798
b84a1cf8
RV
799static int
800intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 801 const uint8_t *send, int send_bytes,
b84a1cf8
RV
802 uint8_t *recv, int recv_size)
803{
804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805 struct drm_device *dev = intel_dig_port->base.base.dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808 uint32_t ch_data = ch_ctl + 4;
bc86625a 809 uint32_t aux_clock_divider;
b84a1cf8
RV
810 int i, ret, recv_bytes;
811 uint32_t status;
5ed12a19 812 int try, clock = 0;
4e6b788c 813 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
814 bool vdd;
815
773538e8 816 pps_lock(intel_dp);
e39b999a 817
72c3500a
VS
818 /*
819 * We will be called with VDD already enabled for dpcd/edid/oui reads.
820 * In such cases we want to leave VDD enabled and it's up to upper layers
821 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
822 * ourselves.
823 */
1e0560e0 824 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
825
826 /* dp aux is extremely sensitive to irq latency, hence request the
827 * lowest possible wakeup latency and so prevent the cpu from going into
828 * deep sleep states.
829 */
830 pm_qos_update_request(&dev_priv->pm_qos, 0);
831
832 intel_dp_check_edp(intel_dp);
5eb08b69 833
c67a470b
PZ
834 intel_aux_display_runtime_get(dev_priv);
835
11bee43e
JB
836 /* Try to wait for any previous AUX channel activity */
837 for (try = 0; try < 3; try++) {
ef04f00d 838 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
839 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
840 break;
841 msleep(1);
842 }
843
844 if (try == 3) {
845 WARN(1, "dp_aux_ch not started status 0x%08x\n",
846 I915_READ(ch_ctl));
9ee32fea
DV
847 ret = -EBUSY;
848 goto out;
4f7f7b7e
CW
849 }
850
46a5ae9f
PZ
851 /* Only 5 data registers! */
852 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853 ret = -E2BIG;
854 goto out;
855 }
856
ec5b01dd 857 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
858 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859 has_aux_irq,
860 send_bytes,
861 aux_clock_divider);
5ed12a19 862
bc86625a
CW
863 /* Must try at least 3 times according to DP spec */
864 for (try = 0; try < 5; try++) {
865 /* Load the send data into the aux channel data registers */
866 for (i = 0; i < send_bytes; i += 4)
867 I915_WRITE(ch_data + i,
a4f1289e
RV
868 intel_dp_pack_aux(send + i,
869 send_bytes - i));
bc86625a
CW
870
871 /* Send the command and wait for it to complete */
5ed12a19 872 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
873
874 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
875
876 /* Clear done status and any errors */
877 I915_WRITE(ch_ctl,
878 status |
879 DP_AUX_CH_CTL_DONE |
880 DP_AUX_CH_CTL_TIME_OUT_ERROR |
881 DP_AUX_CH_CTL_RECEIVE_ERROR);
882
74ebf294 883 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 884 continue;
74ebf294
TP
885
886 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887 * 400us delay required for errors and timeouts
888 * Timeout errors from the HW already meet this
889 * requirement so skip to next iteration
890 */
891 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892 usleep_range(400, 500);
bc86625a 893 continue;
74ebf294 894 }
bc86625a
CW
895 if (status & DP_AUX_CH_CTL_DONE)
896 break;
897 }
4f7f7b7e 898 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
899 break;
900 }
901
a4fc5ed6 902 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 903 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
904 ret = -EBUSY;
905 goto out;
a4fc5ed6
KP
906 }
907
908 /* Check for timeout or receive error.
909 * Timeouts occur when the sink is not connected
910 */
a5b3da54 911 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 912 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
913 ret = -EIO;
914 goto out;
a5b3da54 915 }
1ae8c0a5
KP
916
917 /* Timeouts occur when the device isn't connected, so they're
918 * "normal" -- don't fill the kernel log with these */
a5b3da54 919 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 920 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
921 ret = -ETIMEDOUT;
922 goto out;
a4fc5ed6
KP
923 }
924
925 /* Unload any bytes sent back from the other side */
926 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
927 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
928 if (recv_bytes > recv_size)
929 recv_bytes = recv_size;
0206e353 930
4f7f7b7e 931 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
932 intel_dp_unpack_aux(I915_READ(ch_data + i),
933 recv + i, recv_bytes - i);
a4fc5ed6 934
9ee32fea
DV
935 ret = recv_bytes;
936out:
937 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 938 intel_aux_display_runtime_put(dev_priv);
9ee32fea 939
884f19e9
JN
940 if (vdd)
941 edp_panel_vdd_off(intel_dp, false);
942
773538e8 943 pps_unlock(intel_dp);
e39b999a 944
9ee32fea 945 return ret;
a4fc5ed6
KP
946}
947
a6c8aff0
JN
948#define BARE_ADDRESS_SIZE 3
949#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
950static ssize_t
951intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 952{
9d1a1031
JN
953 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
954 uint8_t txbuf[20], rxbuf[20];
955 size_t txsize, rxsize;
a4fc5ed6 956 int ret;
a4fc5ed6 957
d2d9cbbd
VS
958 txbuf[0] = (msg->request << 4) |
959 ((msg->address >> 16) & 0xf);
960 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
961 txbuf[2] = msg->address & 0xff;
962 txbuf[3] = msg->size - 1;
46a5ae9f 963
9d1a1031
JN
964 switch (msg->request & ~DP_AUX_I2C_MOT) {
965 case DP_AUX_NATIVE_WRITE:
966 case DP_AUX_I2C_WRITE:
a6c8aff0 967 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 968 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 969
9d1a1031
JN
970 if (WARN_ON(txsize > 20))
971 return -E2BIG;
a4fc5ed6 972
9d1a1031 973 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 974
9d1a1031
JN
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 978
a1ddefd8
JN
979 if (ret > 1) {
980 /* Number of bytes written in a short write. */
981 ret = clamp_t(int, rxbuf[1], 0, msg->size);
982 } else {
983 /* Return payload size. */
984 ret = msg->size;
985 }
9d1a1031
JN
986 }
987 break;
46a5ae9f 988
9d1a1031
JN
989 case DP_AUX_NATIVE_READ:
990 case DP_AUX_I2C_READ:
a6c8aff0 991 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 992 rxsize = msg->size + 1;
a4fc5ed6 993
9d1a1031
JN
994 if (WARN_ON(rxsize > 20))
995 return -E2BIG;
a4fc5ed6 996
9d1a1031
JN
997 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
998 if (ret > 0) {
999 msg->reply = rxbuf[0] >> 4;
1000 /*
1001 * Assume happy day, and copy the data. The caller is
1002 * expected to check msg->reply before touching it.
1003 *
1004 * Return payload size.
1005 */
1006 ret--;
1007 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1008 }
9d1a1031
JN
1009 break;
1010
1011 default:
1012 ret = -EINVAL;
1013 break;
a4fc5ed6 1014 }
f51a44b9 1015
9d1a1031 1016 return ret;
a4fc5ed6
KP
1017}
1018
9d1a1031
JN
1019static void
1020intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1021{
1022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1023 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1024 enum port port = intel_dig_port->port;
0b99836f 1025 const char *name = NULL;
ab2c0672
DA
1026 int ret;
1027
33ad6626
JN
1028 switch (port) {
1029 case PORT_A:
1030 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1031 name = "DPDDC-A";
ab2c0672 1032 break;
33ad6626
JN
1033 case PORT_B:
1034 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1035 name = "DPDDC-B";
ab2c0672 1036 break;
33ad6626
JN
1037 case PORT_C:
1038 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1039 name = "DPDDC-C";
ab2c0672 1040 break;
33ad6626
JN
1041 case PORT_D:
1042 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1043 name = "DPDDC-D";
33ad6626
JN
1044 break;
1045 default:
1046 BUG();
ab2c0672
DA
1047 }
1048
1b1aad75
DL
1049 /*
1050 * The AUX_CTL register is usually DP_CTL + 0x10.
1051 *
1052 * On Haswell and Broadwell though:
1053 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1054 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1055 *
1056 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1057 */
1058 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1059 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1060
0b99836f 1061 intel_dp->aux.name = name;
9d1a1031
JN
1062 intel_dp->aux.dev = dev->dev;
1063 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1064
0b99836f
JN
1065 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1066 connector->base.kdev->kobj.name);
8316f337 1067
4f71d0cb 1068 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1069 if (ret < 0) {
4f71d0cb 1070 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1071 name, ret);
1072 return;
ab2c0672 1073 }
8a5e6aeb 1074
0b99836f
JN
1075 ret = sysfs_create_link(&connector->base.kdev->kobj,
1076 &intel_dp->aux.ddc.dev.kobj,
1077 intel_dp->aux.ddc.dev.kobj.name);
1078 if (ret < 0) {
1079 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1080 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1081 }
a4fc5ed6
KP
1082}
1083
80f65de3
ID
1084static void
1085intel_dp_connector_unregister(struct intel_connector *intel_connector)
1086{
1087 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1088
0e32b39c
DA
1089 if (!intel_connector->mst_port)
1090 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1091 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1092 intel_connector_unregister(intel_connector);
1093}
1094
5416d871 1095static void
c3346ef6 1096skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1097{
1098 u32 ctrl1;
1099
dd3cd74a
ACO
1100 memset(&pipe_config->dpll_hw_state, 0,
1101 sizeof(pipe_config->dpll_hw_state));
1102
5416d871
DL
1103 pipe_config->ddi_pll_sel = SKL_DPLL0;
1104 pipe_config->dpll_hw_state.cfgcr1 = 0;
1105 pipe_config->dpll_hw_state.cfgcr2 = 0;
1106
1107 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1108 switch (link_clock / 2) {
1109 case 81000:
71cd8423 1110 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1111 SKL_DPLL0);
1112 break;
c3346ef6 1113 case 135000:
71cd8423 1114 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1115 SKL_DPLL0);
1116 break;
c3346ef6 1117 case 270000:
71cd8423 1118 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1119 SKL_DPLL0);
1120 break;
c3346ef6 1121 case 162000:
71cd8423 1122 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1123 SKL_DPLL0);
1124 break;
1125 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1126 results in CDCLK change. Need to handle the change of CDCLK by
1127 disabling pipes and re-enabling them */
1128 case 108000:
71cd8423 1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1130 SKL_DPLL0);
1131 break;
1132 case 216000:
71cd8423 1133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1134 SKL_DPLL0);
1135 break;
1136
5416d871
DL
1137 }
1138 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1139}
1140
0e50338c 1141static void
5cec258b 1142hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1143{
1144 switch (link_bw) {
1145 case DP_LINK_BW_1_62:
1146 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1147 break;
1148 case DP_LINK_BW_2_7:
1149 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1150 break;
1151 case DP_LINK_BW_5_4:
1152 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1153 break;
1154 }
1155}
1156
fc0f8e25 1157static int
12f6a2e2 1158intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1159{
94ca719e
VS
1160 if (intel_dp->num_sink_rates) {
1161 *sink_rates = intel_dp->sink_rates;
1162 return intel_dp->num_sink_rates;
fc0f8e25 1163 }
12f6a2e2
VS
1164
1165 *sink_rates = default_rates;
1166
1167 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1168}
1169
a8f3ef61 1170static int
1db10e28 1171intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1172{
637a9c63
SJ
1173 if (IS_SKYLAKE(dev)) {
1174 *source_rates = skl_rates;
1175 return ARRAY_SIZE(skl_rates);
fe51bfb9
VS
1176 } else if (IS_CHERRYVIEW(dev)) {
1177 *source_rates = chv_rates;
1178 return ARRAY_SIZE(chv_rates);
a8f3ef61 1179 }
636280ba
VS
1180
1181 *source_rates = default_rates;
1182
1db10e28
VS
1183 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1184 /* WaDisableHBR2:skl */
1185 return (DP_LINK_BW_2_7 >> 3) + 1;
1186 else if (INTEL_INFO(dev)->gen >= 8 ||
1187 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1188 return (DP_LINK_BW_5_4 >> 3) + 1;
1189 else
1190 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1191}
1192
c6bb3538
DV
1193static void
1194intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1195 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1196{
1197 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1198 const struct dp_link_dpll *divisor = NULL;
1199 int i, count = 0;
c6bb3538
DV
1200
1201 if (IS_G4X(dev)) {
9dd4ffdf
CML
1202 divisor = gen4_dpll;
1203 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1204 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1205 divisor = pch_dpll;
1206 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1207 } else if (IS_CHERRYVIEW(dev)) {
1208 divisor = chv_dpll;
1209 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1210 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1211 divisor = vlv_dpll;
1212 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1213 }
9dd4ffdf
CML
1214
1215 if (divisor && count) {
1216 for (i = 0; i < count; i++) {
1217 if (link_bw == divisor[i].link_bw) {
1218 pipe_config->dpll = divisor[i].dpll;
1219 pipe_config->clock_set = true;
1220 break;
1221 }
1222 }
c6bb3538
DV
1223 }
1224}
1225
2ecae76a
VS
1226static int intersect_rates(const int *source_rates, int source_len,
1227 const int *sink_rates, int sink_len,
94ca719e 1228 int *common_rates)
a8f3ef61
SJ
1229{
1230 int i = 0, j = 0, k = 0;
1231
a8f3ef61
SJ
1232 while (i < source_len && j < sink_len) {
1233 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1234 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1235 return k;
94ca719e 1236 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1237 ++k;
1238 ++i;
1239 ++j;
1240 } else if (source_rates[i] < sink_rates[j]) {
1241 ++i;
1242 } else {
1243 ++j;
1244 }
1245 }
1246 return k;
1247}
1248
94ca719e
VS
1249static int intel_dp_common_rates(struct intel_dp *intel_dp,
1250 int *common_rates)
2ecae76a
VS
1251{
1252 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1253 const int *source_rates, *sink_rates;
1254 int source_len, sink_len;
1255
1256 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1257 source_len = intel_dp_source_rates(dev, &source_rates);
1258
1259 return intersect_rates(source_rates, source_len,
1260 sink_rates, sink_len,
94ca719e 1261 common_rates);
2ecae76a
VS
1262}
1263
0336400e
VS
1264static void snprintf_int_array(char *str, size_t len,
1265 const int *array, int nelem)
1266{
1267 int i;
1268
1269 str[0] = '\0';
1270
1271 for (i = 0; i < nelem; i++) {
1272 int r = snprintf(str, len, "%d,", array[i]);
1273 if (r >= len)
1274 return;
1275 str += r;
1276 len -= r;
1277 }
1278}
1279
1280static void intel_dp_print_rates(struct intel_dp *intel_dp)
1281{
1282 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1283 const int *source_rates, *sink_rates;
94ca719e
VS
1284 int source_len, sink_len, common_len;
1285 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1286 char str[128]; /* FIXME: too big for stack? */
1287
1288 if ((drm_debug & DRM_UT_KMS) == 0)
1289 return;
1290
1291 source_len = intel_dp_source_rates(dev, &source_rates);
1292 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1293 DRM_DEBUG_KMS("source rates: %s\n", str);
1294
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1296 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1297 DRM_DEBUG_KMS("sink rates: %s\n", str);
1298
94ca719e
VS
1299 common_len = intel_dp_common_rates(intel_dp, common_rates);
1300 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1301 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1302}
1303
f4896f15 1304static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1305{
1306 int i = 0;
1307
1308 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1309 if (find == rates[i])
1310 break;
1311
1312 return i;
1313}
1314
50fec21a
VS
1315int
1316intel_dp_max_link_rate(struct intel_dp *intel_dp)
1317{
1318 int rates[DP_MAX_SUPPORTED_RATES] = {};
1319 int len;
1320
94ca719e 1321 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1322 if (WARN_ON(len <= 0))
1323 return 162000;
1324
1325 return rates[rate_to_index(0, rates) - 1];
1326}
1327
ed4e9c1d
VS
1328int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1329{
94ca719e 1330 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1331}
1332
00c09d70 1333bool
5bfe2ac0 1334intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1335 struct intel_crtc_state *pipe_config)
a4fc5ed6 1336{
5bfe2ac0 1337 struct drm_device *dev = encoder->base.dev;
36008365 1338 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1339 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1340 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1341 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1342 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1343 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1344 int lane_count, clock;
56071a20 1345 int min_lane_count = 1;
eeb6324d 1346 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1347 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1348 int min_clock = 0;
a8f3ef61 1349 int max_clock;
083f9560 1350 int bpp, mode_rate;
ff9a6750 1351 int link_avail, link_clock;
94ca719e
VS
1352 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1353 int common_len;
a8f3ef61 1354
94ca719e 1355 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1356
1357 /* No common link rates between source and sink */
94ca719e 1358 WARN_ON(common_len <= 0);
a8f3ef61 1359
94ca719e 1360 max_clock = common_len - 1;
a4fc5ed6 1361
bc7d38a4 1362 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1363 pipe_config->has_pch_encoder = true;
1364
03afc4a2 1365 pipe_config->has_dp_encoder = true;
f769cd24 1366 pipe_config->has_drrs = false;
9fcb1704 1367 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1368
dd06f90e
JN
1369 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1370 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1371 adjusted_mode);
a1b2278e
CK
1372
1373 if (INTEL_INFO(dev)->gen >= 9) {
1374 int ret;
1375 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1376 if (ret)
1377 return ret;
1378 }
1379
2dd24552
JB
1380 if (!HAS_PCH_SPLIT(dev))
1381 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1382 intel_connector->panel.fitting_mode);
1383 else
b074cec8
JB
1384 intel_pch_panel_fitting(intel_crtc, pipe_config,
1385 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1386 }
1387
cb1793ce 1388 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1389 return false;
1390
083f9560 1391 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1392 "max bw %d pixel clock %iKHz\n",
94ca719e 1393 max_lane_count, common_rates[max_clock],
241bfc38 1394 adjusted_mode->crtc_clock);
083f9560 1395
36008365
DV
1396 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1397 * bpc in between. */
3e7ca985 1398 bpp = pipe_config->pipe_bpp;
56071a20
JN
1399 if (is_edp(intel_dp)) {
1400 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1401 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1402 dev_priv->vbt.edp_bpp);
1403 bpp = dev_priv->vbt.edp_bpp;
1404 }
1405
344c5bbc
JN
1406 /*
1407 * Use the maximum clock and number of lanes the eDP panel
1408 * advertizes being capable of. The panels are generally
1409 * designed to support only a single clock and lane
1410 * configuration, and typically these values correspond to the
1411 * native resolution of the panel.
1412 */
1413 min_lane_count = max_lane_count;
1414 min_clock = max_clock;
7984211e 1415 }
657445fe 1416
36008365 1417 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1418 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1419 bpp);
36008365 1420
c6930992 1421 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1422 for (lane_count = min_lane_count;
1423 lane_count <= max_lane_count;
1424 lane_count <<= 1) {
1425
94ca719e 1426 link_clock = common_rates[clock];
36008365
DV
1427 link_avail = intel_dp_max_data_rate(link_clock,
1428 lane_count);
1429
1430 if (mode_rate <= link_avail) {
1431 goto found;
1432 }
1433 }
1434 }
1435 }
c4867936 1436
36008365 1437 return false;
3685a8f3 1438
36008365 1439found:
55bc60db
VS
1440 if (intel_dp->color_range_auto) {
1441 /*
1442 * See:
1443 * CEA-861-E - 5.1 Default Encoding Parameters
1444 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1445 */
18316c8c 1446 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1447 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1448 else
1449 intel_dp->color_range = 0;
1450 }
1451
3685a8f3 1452 if (intel_dp->color_range)
50f3b016 1453 pipe_config->limited_color_range = true;
a4fc5ed6 1454
36008365 1455 intel_dp->lane_count = lane_count;
a8f3ef61 1456
94ca719e 1457 if (intel_dp->num_sink_rates) {
bc27b7d3 1458 intel_dp->link_bw = 0;
a8f3ef61 1459 intel_dp->rate_select =
94ca719e 1460 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1461 } else {
1462 intel_dp->link_bw =
94ca719e 1463 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1464 intel_dp->rate_select = 0;
a8f3ef61
SJ
1465 }
1466
657445fe 1467 pipe_config->pipe_bpp = bpp;
94ca719e 1468 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1469
36008365
DV
1470 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1471 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1472 pipe_config->port_clock, bpp);
36008365
DV
1473 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1474 mode_rate, link_avail);
a4fc5ed6 1475
03afc4a2 1476 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1477 adjusted_mode->crtc_clock,
1478 pipe_config->port_clock,
03afc4a2 1479 &pipe_config->dp_m_n);
9d1a455b 1480
439d7ac0 1481 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1482 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1483 pipe_config->has_drrs = true;
439d7ac0
PB
1484 intel_link_compute_m_n(bpp, lane_count,
1485 intel_connector->panel.downclock_mode->clock,
1486 pipe_config->port_clock,
1487 &pipe_config->dp_m2_n2);
1488 }
1489
5416d871 1490 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1491 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
977bb38d
S
1492 else if (IS_BROXTON(dev))
1493 /* handled in ddi */;
5416d871 1494 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1495 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1496 else
1497 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1498
03afc4a2 1499 return true;
a4fc5ed6
KP
1500}
1501
7c62a164 1502static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1503{
7c62a164
DV
1504 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1505 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1506 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1507 struct drm_i915_private *dev_priv = dev->dev_private;
1508 u32 dpa_ctl;
1509
6e3c9717
ACO
1510 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1511 crtc->config->port_clock);
ea9b6006
DV
1512 dpa_ctl = I915_READ(DP_A);
1513 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1514
6e3c9717 1515 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1516 /* For a long time we've carried around a ILK-DevA w/a for the
1517 * 160MHz clock. If we're really unlucky, it's still required.
1518 */
1519 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1520 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1521 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1522 } else {
1523 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1524 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1525 }
1ce17038 1526
ea9b6006
DV
1527 I915_WRITE(DP_A, dpa_ctl);
1528
1529 POSTING_READ(DP_A);
1530 udelay(500);
1531}
1532
8ac33ed3 1533static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1534{
b934223d 1535 struct drm_device *dev = encoder->base.dev;
417e822d 1536 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1537 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1538 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1539 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1540 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1541
417e822d 1542 /*
1a2eb460 1543 * There are four kinds of DP registers:
417e822d
KP
1544 *
1545 * IBX PCH
1a2eb460
KP
1546 * SNB CPU
1547 * IVB CPU
417e822d
KP
1548 * CPT PCH
1549 *
1550 * IBX PCH and CPU are the same for almost everything,
1551 * except that the CPU DP PLL is configured in this
1552 * register
1553 *
1554 * CPT PCH is quite different, having many bits moved
1555 * to the TRANS_DP_CTL register instead. That
1556 * configuration happens (oddly) in ironlake_pch_enable
1557 */
9c9e7927 1558
417e822d
KP
1559 /* Preserve the BIOS-computed detected bit. This is
1560 * supposed to be read-only.
1561 */
1562 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1563
417e822d 1564 /* Handle DP bits in common between all three register formats */
417e822d 1565 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1566 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1567
6e3c9717 1568 if (crtc->config->has_audio)
ea5b213a 1569 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1570
417e822d 1571 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1572
bc7d38a4 1573 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1574 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1575 intel_dp->DP |= DP_SYNC_HS_HIGH;
1576 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1577 intel_dp->DP |= DP_SYNC_VS_HIGH;
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1579
6aba5b6c 1580 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1581 intel_dp->DP |= DP_ENHANCED_FRAMING;
1582
7c62a164 1583 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1584 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1585 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1586 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1587
1588 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1589 intel_dp->DP |= DP_SYNC_HS_HIGH;
1590 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1591 intel_dp->DP |= DP_SYNC_VS_HIGH;
1592 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1593
6aba5b6c 1594 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1595 intel_dp->DP |= DP_ENHANCED_FRAMING;
1596
44f37d1f
CML
1597 if (!IS_CHERRYVIEW(dev)) {
1598 if (crtc->pipe == 1)
1599 intel_dp->DP |= DP_PIPEB_SELECT;
1600 } else {
1601 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1602 }
417e822d
KP
1603 } else {
1604 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1605 }
a4fc5ed6
KP
1606}
1607
ffd6749d
PZ
1608#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1609#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1610
1a5ef5b7
PZ
1611#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1612#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1613
ffd6749d
PZ
1614#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1615#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1616
4be73780 1617static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1618 u32 mask,
1619 u32 value)
bd943159 1620{
30add22d 1621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1622 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1623 u32 pp_stat_reg, pp_ctrl_reg;
1624
e39b999a
VS
1625 lockdep_assert_held(&dev_priv->pps_mutex);
1626
bf13e81b
JN
1627 pp_stat_reg = _pp_stat_reg(intel_dp);
1628 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1629
99ea7127 1630 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1631 mask, value,
1632 I915_READ(pp_stat_reg),
1633 I915_READ(pp_ctrl_reg));
32ce697c 1634
453c5420 1635 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1636 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1637 I915_READ(pp_stat_reg),
1638 I915_READ(pp_ctrl_reg));
32ce697c 1639 }
54c136d4
CW
1640
1641 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1642}
32ce697c 1643
4be73780 1644static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1645{
1646 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1647 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1648}
1649
4be73780 1650static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1651{
1652 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1653 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1654}
1655
4be73780 1656static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1657{
1658 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1659
1660 /* When we disable the VDD override bit last we have to do the manual
1661 * wait. */
1662 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1663 intel_dp->panel_power_cycle_delay);
1664
4be73780 1665 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1666}
1667
4be73780 1668static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1669{
1670 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1671 intel_dp->backlight_on_delay);
1672}
1673
4be73780 1674static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1675{
1676 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1677 intel_dp->backlight_off_delay);
1678}
99ea7127 1679
832dd3c1
KP
1680/* Read the current pp_control value, unlocking the register if it
1681 * is locked
1682 */
1683
453c5420 1684static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1685{
453c5420
JB
1686 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1687 struct drm_i915_private *dev_priv = dev->dev_private;
1688 u32 control;
832dd3c1 1689
e39b999a
VS
1690 lockdep_assert_held(&dev_priv->pps_mutex);
1691
bf13e81b 1692 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1693 control &= ~PANEL_UNLOCK_MASK;
1694 control |= PANEL_UNLOCK_REGS;
1695 return control;
bd943159
KP
1696}
1697
951468f3
VS
1698/*
1699 * Must be paired with edp_panel_vdd_off().
1700 * Must hold pps_mutex around the whole on/off sequence.
1701 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1702 */
1e0560e0 1703static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1704{
30add22d 1705 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1707 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1708 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1709 enum intel_display_power_domain power_domain;
5d613501 1710 u32 pp;
453c5420 1711 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1712 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1713
e39b999a
VS
1714 lockdep_assert_held(&dev_priv->pps_mutex);
1715
97af61f5 1716 if (!is_edp(intel_dp))
adddaaf4 1717 return false;
bd943159 1718
2c623c11 1719 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1720 intel_dp->want_panel_vdd = true;
99ea7127 1721
4be73780 1722 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1723 return need_to_disable;
b0665d57 1724
4e6e1a54
ID
1725 power_domain = intel_display_port_power_domain(intel_encoder);
1726 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1727
3936fcf4
VS
1728 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1729 port_name(intel_dig_port->port));
bd943159 1730
4be73780
DV
1731 if (!edp_have_panel_power(intel_dp))
1732 wait_panel_power_cycle(intel_dp);
99ea7127 1733
453c5420 1734 pp = ironlake_get_pp_control(intel_dp);
5d613501 1735 pp |= EDP_FORCE_VDD;
ebf33b18 1736
bf13e81b
JN
1737 pp_stat_reg = _pp_stat_reg(intel_dp);
1738 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1739
1740 I915_WRITE(pp_ctrl_reg, pp);
1741 POSTING_READ(pp_ctrl_reg);
1742 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1743 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1744 /*
1745 * If the panel wasn't on, delay before accessing aux channel
1746 */
4be73780 1747 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1748 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1749 port_name(intel_dig_port->port));
f01eca2e 1750 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1751 }
adddaaf4
JN
1752
1753 return need_to_disable;
1754}
1755
951468f3
VS
1756/*
1757 * Must be paired with intel_edp_panel_vdd_off() or
1758 * intel_edp_panel_off().
1759 * Nested calls to these functions are not allowed since
1760 * we drop the lock. Caller must use some higher level
1761 * locking to prevent nested calls from other threads.
1762 */
b80d6c78 1763void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1764{
c695b6b6 1765 bool vdd;
adddaaf4 1766
c695b6b6
VS
1767 if (!is_edp(intel_dp))
1768 return;
1769
773538e8 1770 pps_lock(intel_dp);
c695b6b6 1771 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1772 pps_unlock(intel_dp);
c695b6b6 1773
e2c719b7 1774 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1775 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1776}
1777
4be73780 1778static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1779{
30add22d 1780 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1781 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1782 struct intel_digital_port *intel_dig_port =
1783 dp_to_dig_port(intel_dp);
1784 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1785 enum intel_display_power_domain power_domain;
5d613501 1786 u32 pp;
453c5420 1787 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1788
e39b999a 1789 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1790
15e899a0 1791 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1792
15e899a0 1793 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1794 return;
b0665d57 1795
3936fcf4
VS
1796 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1797 port_name(intel_dig_port->port));
bd943159 1798
be2c9196
VS
1799 pp = ironlake_get_pp_control(intel_dp);
1800 pp &= ~EDP_FORCE_VDD;
453c5420 1801
be2c9196
VS
1802 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1803 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1804
be2c9196
VS
1805 I915_WRITE(pp_ctrl_reg, pp);
1806 POSTING_READ(pp_ctrl_reg);
90791a5c 1807
be2c9196
VS
1808 /* Make sure sequencer is idle before allowing subsequent activity */
1809 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1810 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1811
be2c9196
VS
1812 if ((pp & POWER_TARGET_ON) == 0)
1813 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1814
be2c9196
VS
1815 power_domain = intel_display_port_power_domain(intel_encoder);
1816 intel_display_power_put(dev_priv, power_domain);
bd943159 1817}
5d613501 1818
4be73780 1819static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1820{
1821 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1822 struct intel_dp, panel_vdd_work);
bd943159 1823
773538e8 1824 pps_lock(intel_dp);
15e899a0
VS
1825 if (!intel_dp->want_panel_vdd)
1826 edp_panel_vdd_off_sync(intel_dp);
773538e8 1827 pps_unlock(intel_dp);
bd943159
KP
1828}
1829
aba86890
ID
1830static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1831{
1832 unsigned long delay;
1833
1834 /*
1835 * Queue the timer to fire a long time from now (relative to the power
1836 * down delay) to keep the panel power up across a sequence of
1837 * operations.
1838 */
1839 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1840 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1841}
1842
951468f3
VS
1843/*
1844 * Must be paired with edp_panel_vdd_on().
1845 * Must hold pps_mutex around the whole on/off sequence.
1846 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1847 */
4be73780 1848static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1849{
e39b999a
VS
1850 struct drm_i915_private *dev_priv =
1851 intel_dp_to_dev(intel_dp)->dev_private;
1852
1853 lockdep_assert_held(&dev_priv->pps_mutex);
1854
97af61f5
KP
1855 if (!is_edp(intel_dp))
1856 return;
5d613501 1857
e2c719b7 1858 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1859 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1860
bd943159
KP
1861 intel_dp->want_panel_vdd = false;
1862
aba86890 1863 if (sync)
4be73780 1864 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1865 else
1866 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1867}
1868
9f0fb5be 1869static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1870{
30add22d 1871 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1872 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1873 u32 pp;
453c5420 1874 u32 pp_ctrl_reg;
9934c132 1875
9f0fb5be
VS
1876 lockdep_assert_held(&dev_priv->pps_mutex);
1877
97af61f5 1878 if (!is_edp(intel_dp))
bd943159 1879 return;
99ea7127 1880
3936fcf4
VS
1881 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1882 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1883
e7a89ace
VS
1884 if (WARN(edp_have_panel_power(intel_dp),
1885 "eDP port %c panel power already on\n",
1886 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1887 return;
9934c132 1888
4be73780 1889 wait_panel_power_cycle(intel_dp);
37c6c9b0 1890
bf13e81b 1891 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1892 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1893 if (IS_GEN5(dev)) {
1894 /* ILK workaround: disable reset around power sequence */
1895 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1896 I915_WRITE(pp_ctrl_reg, pp);
1897 POSTING_READ(pp_ctrl_reg);
05ce1a49 1898 }
37c6c9b0 1899
1c0ae80a 1900 pp |= POWER_TARGET_ON;
99ea7127
KP
1901 if (!IS_GEN5(dev))
1902 pp |= PANEL_POWER_RESET;
1903
453c5420
JB
1904 I915_WRITE(pp_ctrl_reg, pp);
1905 POSTING_READ(pp_ctrl_reg);
9934c132 1906
4be73780 1907 wait_panel_on(intel_dp);
dce56b3c 1908 intel_dp->last_power_on = jiffies;
9934c132 1909
05ce1a49
KP
1910 if (IS_GEN5(dev)) {
1911 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1912 I915_WRITE(pp_ctrl_reg, pp);
1913 POSTING_READ(pp_ctrl_reg);
05ce1a49 1914 }
9f0fb5be 1915}
e39b999a 1916
9f0fb5be
VS
1917void intel_edp_panel_on(struct intel_dp *intel_dp)
1918{
1919 if (!is_edp(intel_dp))
1920 return;
1921
1922 pps_lock(intel_dp);
1923 edp_panel_on(intel_dp);
773538e8 1924 pps_unlock(intel_dp);
9934c132
JB
1925}
1926
9f0fb5be
VS
1927
1928static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1929{
4e6e1a54
ID
1930 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1931 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1932 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1933 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1934 enum intel_display_power_domain power_domain;
99ea7127 1935 u32 pp;
453c5420 1936 u32 pp_ctrl_reg;
9934c132 1937
9f0fb5be
VS
1938 lockdep_assert_held(&dev_priv->pps_mutex);
1939
97af61f5
KP
1940 if (!is_edp(intel_dp))
1941 return;
37c6c9b0 1942
3936fcf4
VS
1943 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1944 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1945
3936fcf4
VS
1946 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1947 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1948
453c5420 1949 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1950 /* We need to switch off panel power _and_ force vdd, for otherwise some
1951 * panels get very unhappy and cease to work. */
b3064154
PJ
1952 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1953 EDP_BLC_ENABLE);
453c5420 1954
bf13e81b 1955 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1956
849e39f5
PZ
1957 intel_dp->want_panel_vdd = false;
1958
453c5420
JB
1959 I915_WRITE(pp_ctrl_reg, pp);
1960 POSTING_READ(pp_ctrl_reg);
9934c132 1961
dce56b3c 1962 intel_dp->last_power_cycle = jiffies;
4be73780 1963 wait_panel_off(intel_dp);
849e39f5
PZ
1964
1965 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1966 power_domain = intel_display_port_power_domain(intel_encoder);
1967 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1968}
e39b999a 1969
9f0fb5be
VS
1970void intel_edp_panel_off(struct intel_dp *intel_dp)
1971{
1972 if (!is_edp(intel_dp))
1973 return;
e39b999a 1974
9f0fb5be
VS
1975 pps_lock(intel_dp);
1976 edp_panel_off(intel_dp);
773538e8 1977 pps_unlock(intel_dp);
9934c132
JB
1978}
1979
1250d107
JN
1980/* Enable backlight in the panel power control. */
1981static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1982{
da63a9f2
PZ
1983 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1984 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1985 struct drm_i915_private *dev_priv = dev->dev_private;
1986 u32 pp;
453c5420 1987 u32 pp_ctrl_reg;
32f9d658 1988
01cb9ea6
JB
1989 /*
1990 * If we enable the backlight right away following a panel power
1991 * on, we may see slight flicker as the panel syncs with the eDP
1992 * link. So delay a bit to make sure the image is solid before
1993 * allowing it to appear.
1994 */
4be73780 1995 wait_backlight_on(intel_dp);
e39b999a 1996
773538e8 1997 pps_lock(intel_dp);
e39b999a 1998
453c5420 1999 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2000 pp |= EDP_BLC_ENABLE;
453c5420 2001
bf13e81b 2002 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2003
2004 I915_WRITE(pp_ctrl_reg, pp);
2005 POSTING_READ(pp_ctrl_reg);
e39b999a 2006
773538e8 2007 pps_unlock(intel_dp);
32f9d658
ZW
2008}
2009
1250d107
JN
2010/* Enable backlight PWM and backlight PP control. */
2011void intel_edp_backlight_on(struct intel_dp *intel_dp)
2012{
2013 if (!is_edp(intel_dp))
2014 return;
2015
2016 DRM_DEBUG_KMS("\n");
2017
2018 intel_panel_enable_backlight(intel_dp->attached_connector);
2019 _intel_edp_backlight_on(intel_dp);
2020}
2021
2022/* Disable backlight in the panel power control. */
2023static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2024{
30add22d 2025 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2026 struct drm_i915_private *dev_priv = dev->dev_private;
2027 u32 pp;
453c5420 2028 u32 pp_ctrl_reg;
32f9d658 2029
f01eca2e
KP
2030 if (!is_edp(intel_dp))
2031 return;
2032
773538e8 2033 pps_lock(intel_dp);
e39b999a 2034
453c5420 2035 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2036 pp &= ~EDP_BLC_ENABLE;
453c5420 2037
bf13e81b 2038 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2039
2040 I915_WRITE(pp_ctrl_reg, pp);
2041 POSTING_READ(pp_ctrl_reg);
f7d2323c 2042
773538e8 2043 pps_unlock(intel_dp);
e39b999a
VS
2044
2045 intel_dp->last_backlight_off = jiffies;
f7d2323c 2046 edp_wait_backlight_off(intel_dp);
1250d107 2047}
f7d2323c 2048
1250d107
JN
2049/* Disable backlight PP control and backlight PWM. */
2050void intel_edp_backlight_off(struct intel_dp *intel_dp)
2051{
2052 if (!is_edp(intel_dp))
2053 return;
2054
2055 DRM_DEBUG_KMS("\n");
f7d2323c 2056
1250d107 2057 _intel_edp_backlight_off(intel_dp);
f7d2323c 2058 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2059}
a4fc5ed6 2060
73580fb7
JN
2061/*
2062 * Hook for controlling the panel power control backlight through the bl_power
2063 * sysfs attribute. Take care to handle multiple calls.
2064 */
2065static void intel_edp_backlight_power(struct intel_connector *connector,
2066 bool enable)
2067{
2068 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2069 bool is_enabled;
2070
773538e8 2071 pps_lock(intel_dp);
e39b999a 2072 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2073 pps_unlock(intel_dp);
73580fb7
JN
2074
2075 if (is_enabled == enable)
2076 return;
2077
23ba9373
JN
2078 DRM_DEBUG_KMS("panel power control backlight %s\n",
2079 enable ? "enable" : "disable");
73580fb7
JN
2080
2081 if (enable)
2082 _intel_edp_backlight_on(intel_dp);
2083 else
2084 _intel_edp_backlight_off(intel_dp);
2085}
2086
2bd2ad64 2087static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2088{
da63a9f2
PZ
2089 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2090 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2091 struct drm_device *dev = crtc->dev;
d240f20f
JB
2092 struct drm_i915_private *dev_priv = dev->dev_private;
2093 u32 dpa_ctl;
2094
2bd2ad64
DV
2095 assert_pipe_disabled(dev_priv,
2096 to_intel_crtc(crtc)->pipe);
2097
d240f20f
JB
2098 DRM_DEBUG_KMS("\n");
2099 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2100 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2101 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102
2103 /* We don't adjust intel_dp->DP while tearing down the link, to
2104 * facilitate link retraining (e.g. after hotplug). Hence clear all
2105 * enable bits here to ensure that we don't enable too much. */
2106 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2107 intel_dp->DP |= DP_PLL_ENABLE;
2108 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2109 POSTING_READ(DP_A);
2110 udelay(200);
d240f20f
JB
2111}
2112
2bd2ad64 2113static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2114{
da63a9f2
PZ
2115 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2116 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2117 struct drm_device *dev = crtc->dev;
d240f20f
JB
2118 struct drm_i915_private *dev_priv = dev->dev_private;
2119 u32 dpa_ctl;
2120
2bd2ad64
DV
2121 assert_pipe_disabled(dev_priv,
2122 to_intel_crtc(crtc)->pipe);
2123
d240f20f 2124 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2125 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2126 "dp pll off, should be on\n");
2127 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2128
2129 /* We can't rely on the value tracked for the DP register in
2130 * intel_dp->DP because link_down must not change that (otherwise link
2131 * re-training will fail. */
298b0b39 2132 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2133 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2134 POSTING_READ(DP_A);
d240f20f
JB
2135 udelay(200);
2136}
2137
c7ad3810 2138/* If the sink supports it, try to set the power state appropriately */
c19b0669 2139void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2140{
2141 int ret, i;
2142
2143 /* Should have a valid DPCD by this point */
2144 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2145 return;
2146
2147 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2148 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2149 DP_SET_POWER_D3);
c7ad3810
JB
2150 } else {
2151 /*
2152 * When turning on, we need to retry for 1ms to give the sink
2153 * time to wake up.
2154 */
2155 for (i = 0; i < 3; i++) {
9d1a1031
JN
2156 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2157 DP_SET_POWER_D0);
c7ad3810
JB
2158 if (ret == 1)
2159 break;
2160 msleep(1);
2161 }
2162 }
f9cac721
JN
2163
2164 if (ret != 1)
2165 DRM_DEBUG_KMS("failed to %s sink power state\n",
2166 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2167}
2168
19d8fe15
DV
2169static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2170 enum pipe *pipe)
d240f20f 2171{
19d8fe15 2172 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2173 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2174 struct drm_device *dev = encoder->base.dev;
2175 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2176 enum intel_display_power_domain power_domain;
2177 u32 tmp;
2178
2179 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2180 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2181 return false;
2182
2183 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2184
2185 if (!(tmp & DP_PORT_EN))
2186 return false;
2187
bc7d38a4 2188 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2189 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2190 } else if (IS_CHERRYVIEW(dev)) {
2191 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2192 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2193 *pipe = PORT_TO_PIPE(tmp);
2194 } else {
2195 u32 trans_sel;
2196 u32 trans_dp;
2197 int i;
2198
2199 switch (intel_dp->output_reg) {
2200 case PCH_DP_B:
2201 trans_sel = TRANS_DP_PORT_SEL_B;
2202 break;
2203 case PCH_DP_C:
2204 trans_sel = TRANS_DP_PORT_SEL_C;
2205 break;
2206 case PCH_DP_D:
2207 trans_sel = TRANS_DP_PORT_SEL_D;
2208 break;
2209 default:
2210 return true;
2211 }
2212
055e393f 2213 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2214 trans_dp = I915_READ(TRANS_DP_CTL(i));
2215 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2216 *pipe = i;
2217 return true;
2218 }
2219 }
19d8fe15 2220
4a0833ec
DV
2221 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2222 intel_dp->output_reg);
2223 }
d240f20f 2224
19d8fe15
DV
2225 return true;
2226}
d240f20f 2227
045ac3b5 2228static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2229 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2230{
2231 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2232 u32 tmp, flags = 0;
63000ef6
XZ
2233 struct drm_device *dev = encoder->base.dev;
2234 struct drm_i915_private *dev_priv = dev->dev_private;
2235 enum port port = dp_to_dig_port(intel_dp)->port;
2236 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2237 int dotclock;
045ac3b5 2238
9ed109a7 2239 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2240
2241 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2242
63000ef6 2243 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2244 if (tmp & DP_SYNC_HS_HIGH)
2245 flags |= DRM_MODE_FLAG_PHSYNC;
2246 else
2247 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2248
63000ef6
XZ
2249 if (tmp & DP_SYNC_VS_HIGH)
2250 flags |= DRM_MODE_FLAG_PVSYNC;
2251 else
2252 flags |= DRM_MODE_FLAG_NVSYNC;
2253 } else {
2254 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2255 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2256 flags |= DRM_MODE_FLAG_PHSYNC;
2257 else
2258 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2259
63000ef6
XZ
2260 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2261 flags |= DRM_MODE_FLAG_PVSYNC;
2262 else
2263 flags |= DRM_MODE_FLAG_NVSYNC;
2264 }
045ac3b5 2265
2d112de7 2266 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2267
8c875fca
VS
2268 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2269 tmp & DP_COLOR_RANGE_16_235)
2270 pipe_config->limited_color_range = true;
2271
eb14cb74
VS
2272 pipe_config->has_dp_encoder = true;
2273
2274 intel_dp_get_m_n(crtc, pipe_config);
2275
18442d08 2276 if (port == PORT_A) {
f1f644dc
JB
2277 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2278 pipe_config->port_clock = 162000;
2279 else
2280 pipe_config->port_clock = 270000;
2281 }
18442d08
VS
2282
2283 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2284 &pipe_config->dp_m_n);
2285
2286 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2287 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2288
2d112de7 2289 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2290
c6cd2ee2
JN
2291 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2292 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2293 /*
2294 * This is a big fat ugly hack.
2295 *
2296 * Some machines in UEFI boot mode provide us a VBT that has 18
2297 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2298 * unknown we fail to light up. Yet the same BIOS boots up with
2299 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2300 * max, not what it tells us to use.
2301 *
2302 * Note: This will still be broken if the eDP panel is not lit
2303 * up by the BIOS, and thus we can't get the mode at module
2304 * load.
2305 */
2306 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2307 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2308 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2309 }
045ac3b5
JB
2310}
2311
e8cb4558 2312static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2313{
e8cb4558 2314 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2315 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2316 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2317
6e3c9717 2318 if (crtc->config->has_audio)
495a5bb8 2319 intel_audio_codec_disable(encoder);
6cb49835 2320
b32c6f48
RV
2321 if (HAS_PSR(dev) && !HAS_DDI(dev))
2322 intel_psr_disable(intel_dp);
2323
6cb49835
DV
2324 /* Make sure the panel is off before trying to change the mode. But also
2325 * ensure that we have vdd while we switch off the panel. */
24f3e092 2326 intel_edp_panel_vdd_on(intel_dp);
4be73780 2327 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2328 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2329 intel_edp_panel_off(intel_dp);
3739850b 2330
08aff3fe
VS
2331 /* disable the port before the pipe on g4x */
2332 if (INTEL_INFO(dev)->gen < 5)
3739850b 2333 intel_dp_link_down(intel_dp);
d240f20f
JB
2334}
2335
08aff3fe 2336static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2337{
2bd2ad64 2338 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2339 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2340
49277c31 2341 intel_dp_link_down(intel_dp);
08aff3fe
VS
2342 if (port == PORT_A)
2343 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2344}
2345
2346static void vlv_post_disable_dp(struct intel_encoder *encoder)
2347{
2348 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2349
2350 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2351}
2352
580d3811
VS
2353static void chv_post_disable_dp(struct intel_encoder *encoder)
2354{
2355 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2356 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2357 struct drm_device *dev = encoder->base.dev;
2358 struct drm_i915_private *dev_priv = dev->dev_private;
2359 struct intel_crtc *intel_crtc =
2360 to_intel_crtc(encoder->base.crtc);
2361 enum dpio_channel ch = vlv_dport_to_channel(dport);
2362 enum pipe pipe = intel_crtc->pipe;
2363 u32 val;
2364
2365 intel_dp_link_down(intel_dp);
2366
2367 mutex_lock(&dev_priv->dpio_lock);
2368
2369 /* Propagate soft reset to data lane reset */
97fd4d5c 2370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2371 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2372 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2373
97fd4d5c
VS
2374 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2375 val |= CHV_PCS_REQ_SOFTRESET_EN;
2376 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2377
2378 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2379 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2380 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2381
2382 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2383 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2384 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2385
2386 mutex_unlock(&dev_priv->dpio_lock);
2387}
2388
7b13b58a
VS
2389static void
2390_intel_dp_set_link_train(struct intel_dp *intel_dp,
2391 uint32_t *DP,
2392 uint8_t dp_train_pat)
2393{
2394 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2395 struct drm_device *dev = intel_dig_port->base.base.dev;
2396 struct drm_i915_private *dev_priv = dev->dev_private;
2397 enum port port = intel_dig_port->port;
2398
2399 if (HAS_DDI(dev)) {
2400 uint32_t temp = I915_READ(DP_TP_CTL(port));
2401
2402 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2403 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2404 else
2405 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2406
2407 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2408 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2409 case DP_TRAINING_PATTERN_DISABLE:
2410 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2411
2412 break;
2413 case DP_TRAINING_PATTERN_1:
2414 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2415 break;
2416 case DP_TRAINING_PATTERN_2:
2417 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2418 break;
2419 case DP_TRAINING_PATTERN_3:
2420 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2421 break;
2422 }
2423 I915_WRITE(DP_TP_CTL(port), temp);
2424
2425 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2426 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2427
2428 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2429 case DP_TRAINING_PATTERN_DISABLE:
2430 *DP |= DP_LINK_TRAIN_OFF_CPT;
2431 break;
2432 case DP_TRAINING_PATTERN_1:
2433 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2434 break;
2435 case DP_TRAINING_PATTERN_2:
2436 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2437 break;
2438 case DP_TRAINING_PATTERN_3:
2439 DRM_ERROR("DP training pattern 3 not supported\n");
2440 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2441 break;
2442 }
2443
2444 } else {
2445 if (IS_CHERRYVIEW(dev))
2446 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2447 else
2448 *DP &= ~DP_LINK_TRAIN_MASK;
2449
2450 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2451 case DP_TRAINING_PATTERN_DISABLE:
2452 *DP |= DP_LINK_TRAIN_OFF;
2453 break;
2454 case DP_TRAINING_PATTERN_1:
2455 *DP |= DP_LINK_TRAIN_PAT_1;
2456 break;
2457 case DP_TRAINING_PATTERN_2:
2458 *DP |= DP_LINK_TRAIN_PAT_2;
2459 break;
2460 case DP_TRAINING_PATTERN_3:
2461 if (IS_CHERRYVIEW(dev)) {
2462 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2463 } else {
2464 DRM_ERROR("DP training pattern 3 not supported\n");
2465 *DP |= DP_LINK_TRAIN_PAT_2;
2466 }
2467 break;
2468 }
2469 }
2470}
2471
2472static void intel_dp_enable_port(struct intel_dp *intel_dp)
2473{
2474 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2475 struct drm_i915_private *dev_priv = dev->dev_private;
2476
7b13b58a
VS
2477 /* enable with pattern 1 (as per spec) */
2478 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2479 DP_TRAINING_PATTERN_1);
2480
2481 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2482 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2483
2484 /*
2485 * Magic for VLV/CHV. We _must_ first set up the register
2486 * without actually enabling the port, and then do another
2487 * write to enable the port. Otherwise link training will
2488 * fail when the power sequencer is freshly used for this port.
2489 */
2490 intel_dp->DP |= DP_PORT_EN;
2491
2492 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2493 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2494}
2495
e8cb4558 2496static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2497{
e8cb4558
DV
2498 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2499 struct drm_device *dev = encoder->base.dev;
2500 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2501 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2502 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2503 unsigned int lane_mask = 0x0;
5d613501 2504
0c33d8d7
DV
2505 if (WARN_ON(dp_reg & DP_PORT_EN))
2506 return;
5d613501 2507
093e3f13
VS
2508 pps_lock(intel_dp);
2509
2510 if (IS_VALLEYVIEW(dev))
2511 vlv_init_panel_power_sequencer(intel_dp);
2512
7b13b58a 2513 intel_dp_enable_port(intel_dp);
093e3f13
VS
2514
2515 edp_panel_vdd_on(intel_dp);
2516 edp_panel_on(intel_dp);
2517 edp_panel_vdd_off(intel_dp, true);
2518
2519 pps_unlock(intel_dp);
2520
61234fa5 2521 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2522 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2523 lane_mask);
61234fa5 2524
f01eca2e 2525 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2526 intel_dp_start_link_train(intel_dp);
33a34e4e 2527 intel_dp_complete_link_train(intel_dp);
3ab9c637 2528 intel_dp_stop_link_train(intel_dp);
c1dec79a 2529
6e3c9717 2530 if (crtc->config->has_audio) {
c1dec79a
JN
2531 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2532 pipe_name(crtc->pipe));
2533 intel_audio_codec_enable(encoder);
2534 }
ab1f90f9 2535}
89b667f8 2536
ecff4f3b
JN
2537static void g4x_enable_dp(struct intel_encoder *encoder)
2538{
828f5c6e
JN
2539 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2540
ecff4f3b 2541 intel_enable_dp(encoder);
4be73780 2542 intel_edp_backlight_on(intel_dp);
ab1f90f9 2543}
89b667f8 2544
ab1f90f9
JN
2545static void vlv_enable_dp(struct intel_encoder *encoder)
2546{
828f5c6e
JN
2547 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2548
4be73780 2549 intel_edp_backlight_on(intel_dp);
b32c6f48 2550 intel_psr_enable(intel_dp);
d240f20f
JB
2551}
2552
ecff4f3b 2553static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2554{
2555 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2556 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2557
8ac33ed3
DV
2558 intel_dp_prepare(encoder);
2559
d41f1efb
DV
2560 /* Only ilk+ has port A */
2561 if (dport->port == PORT_A) {
2562 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2563 ironlake_edp_pll_on(intel_dp);
d41f1efb 2564 }
ab1f90f9
JN
2565}
2566
83b84597
VS
2567static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2568{
2569 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2570 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2571 enum pipe pipe = intel_dp->pps_pipe;
2572 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2573
2574 edp_panel_vdd_off_sync(intel_dp);
2575
2576 /*
2577 * VLV seems to get confused when multiple power seqeuencers
2578 * have the same port selected (even if only one has power/vdd
2579 * enabled). The failure manifests as vlv_wait_port_ready() failing
2580 * CHV on the other hand doesn't seem to mind having the same port
2581 * selected in multiple power seqeuencers, but let's clear the
2582 * port select always when logically disconnecting a power sequencer
2583 * from a port.
2584 */
2585 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2586 pipe_name(pipe), port_name(intel_dig_port->port));
2587 I915_WRITE(pp_on_reg, 0);
2588 POSTING_READ(pp_on_reg);
2589
2590 intel_dp->pps_pipe = INVALID_PIPE;
2591}
2592
a4a5d2f8
VS
2593static void vlv_steal_power_sequencer(struct drm_device *dev,
2594 enum pipe pipe)
2595{
2596 struct drm_i915_private *dev_priv = dev->dev_private;
2597 struct intel_encoder *encoder;
2598
2599 lockdep_assert_held(&dev_priv->pps_mutex);
2600
ac3c12e4
VS
2601 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2602 return;
2603
a4a5d2f8
VS
2604 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2605 base.head) {
2606 struct intel_dp *intel_dp;
773538e8 2607 enum port port;
a4a5d2f8
VS
2608
2609 if (encoder->type != INTEL_OUTPUT_EDP)
2610 continue;
2611
2612 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2613 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2614
2615 if (intel_dp->pps_pipe != pipe)
2616 continue;
2617
2618 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2619 pipe_name(pipe), port_name(port));
a4a5d2f8 2620
034e43c6
VS
2621 WARN(encoder->connectors_active,
2622 "stealing pipe %c power sequencer from active eDP port %c\n",
2623 pipe_name(pipe), port_name(port));
a4a5d2f8 2624
a4a5d2f8 2625 /* make sure vdd is off before we steal it */
83b84597 2626 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2627 }
2628}
2629
2630static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2631{
2632 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2633 struct intel_encoder *encoder = &intel_dig_port->base;
2634 struct drm_device *dev = encoder->base.dev;
2635 struct drm_i915_private *dev_priv = dev->dev_private;
2636 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2637
2638 lockdep_assert_held(&dev_priv->pps_mutex);
2639
093e3f13
VS
2640 if (!is_edp(intel_dp))
2641 return;
2642
a4a5d2f8
VS
2643 if (intel_dp->pps_pipe == crtc->pipe)
2644 return;
2645
2646 /*
2647 * If another power sequencer was being used on this
2648 * port previously make sure to turn off vdd there while
2649 * we still have control of it.
2650 */
2651 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2652 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2653
2654 /*
2655 * We may be stealing the power
2656 * sequencer from another port.
2657 */
2658 vlv_steal_power_sequencer(dev, crtc->pipe);
2659
2660 /* now it's all ours */
2661 intel_dp->pps_pipe = crtc->pipe;
2662
2663 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2664 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2665
2666 /* init power sequencer on this pipe and port */
36b5f425
VS
2667 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2668 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2669}
2670
ab1f90f9 2671static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2672{
2bd2ad64 2673 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2674 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2675 struct drm_device *dev = encoder->base.dev;
89b667f8 2676 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2677 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2678 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2679 int pipe = intel_crtc->pipe;
2680 u32 val;
a4fc5ed6 2681
ab1f90f9 2682 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2683
ab3c759a 2684 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2685 val = 0;
2686 if (pipe)
2687 val |= (1<<21);
2688 else
2689 val &= ~(1<<21);
2690 val |= 0x001000c4;
ab3c759a
CML
2691 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2692 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2693 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2694
ab1f90f9
JN
2695 mutex_unlock(&dev_priv->dpio_lock);
2696
2697 intel_enable_dp(encoder);
89b667f8
JB
2698}
2699
ecff4f3b 2700static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2701{
2702 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2703 struct drm_device *dev = encoder->base.dev;
2704 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2705 struct intel_crtc *intel_crtc =
2706 to_intel_crtc(encoder->base.crtc);
e4607fcf 2707 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2708 int pipe = intel_crtc->pipe;
89b667f8 2709
8ac33ed3
DV
2710 intel_dp_prepare(encoder);
2711
89b667f8 2712 /* Program Tx lane resets to default */
0980a60f 2713 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2714 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2715 DPIO_PCS_TX_LANE2_RESET |
2716 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2717 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2718 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2719 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2720 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2721 DPIO_PCS_CLK_SOFT_RESET);
2722
2723 /* Fix up inter-pair skew failure */
ab3c759a
CML
2724 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2725 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2726 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2727 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2728}
2729
e4a1d846
CML
2730static void chv_pre_enable_dp(struct intel_encoder *encoder)
2731{
2732 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2733 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2734 struct drm_device *dev = encoder->base.dev;
2735 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2736 struct intel_crtc *intel_crtc =
2737 to_intel_crtc(encoder->base.crtc);
2738 enum dpio_channel ch = vlv_dport_to_channel(dport);
2739 int pipe = intel_crtc->pipe;
2e523e98 2740 int data, i, stagger;
949c1d43 2741 u32 val;
e4a1d846 2742
e4a1d846 2743 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2744
570e2a74
VS
2745 /* allow hardware to manage TX FIFO reset source */
2746 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2747 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2748 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2749
2750 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2751 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2752 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2753
949c1d43 2754 /* Deassert soft data lane reset*/
97fd4d5c 2755 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2756 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2757 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2758
2759 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2760 val |= CHV_PCS_REQ_SOFTRESET_EN;
2761 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2762
2763 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2764 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2765 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2766
97fd4d5c 2767 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2768 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2769 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2770
2771 /* Program Tx lane latency optimal setting*/
e4a1d846 2772 for (i = 0; i < 4; i++) {
e4a1d846
CML
2773 /* Set the upar bit */
2774 data = (i == 1) ? 0x0 : 0x1;
2775 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2776 data << DPIO_UPAR_SHIFT);
2777 }
2778
2779 /* Data lane stagger programming */
2e523e98
VS
2780 if (intel_crtc->config->port_clock > 270000)
2781 stagger = 0x18;
2782 else if (intel_crtc->config->port_clock > 135000)
2783 stagger = 0xd;
2784 else if (intel_crtc->config->port_clock > 67500)
2785 stagger = 0x7;
2786 else if (intel_crtc->config->port_clock > 33750)
2787 stagger = 0x4;
2788 else
2789 stagger = 0x2;
2790
2791 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2792 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2793 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2794
2795 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2796 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2797 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2798
2799 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2800 DPIO_LANESTAGGER_STRAP(stagger) |
2801 DPIO_LANESTAGGER_STRAP_OVRD |
2802 DPIO_TX1_STAGGER_MASK(0x1f) |
2803 DPIO_TX1_STAGGER_MULT(6) |
2804 DPIO_TX2_STAGGER_MULT(0));
2805
2806 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2807 DPIO_LANESTAGGER_STRAP(stagger) |
2808 DPIO_LANESTAGGER_STRAP_OVRD |
2809 DPIO_TX1_STAGGER_MASK(0x1f) |
2810 DPIO_TX1_STAGGER_MULT(7) |
2811 DPIO_TX2_STAGGER_MULT(5));
e4a1d846
CML
2812
2813 mutex_unlock(&dev_priv->dpio_lock);
2814
e4a1d846 2815 intel_enable_dp(encoder);
e4a1d846
CML
2816}
2817
9197c88b
VS
2818static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2819{
2820 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2821 struct drm_device *dev = encoder->base.dev;
2822 struct drm_i915_private *dev_priv = dev->dev_private;
2823 struct intel_crtc *intel_crtc =
2824 to_intel_crtc(encoder->base.crtc);
2825 enum dpio_channel ch = vlv_dport_to_channel(dport);
2826 enum pipe pipe = intel_crtc->pipe;
2827 u32 val;
2828
625695f8
VS
2829 intel_dp_prepare(encoder);
2830
9197c88b
VS
2831 mutex_lock(&dev_priv->dpio_lock);
2832
b9e5ac3c
VS
2833 /* program left/right clock distribution */
2834 if (pipe != PIPE_B) {
2835 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2836 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2837 if (ch == DPIO_CH0)
2838 val |= CHV_BUFLEFTENA1_FORCE;
2839 if (ch == DPIO_CH1)
2840 val |= CHV_BUFRIGHTENA1_FORCE;
2841 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2842 } else {
2843 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2844 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2845 if (ch == DPIO_CH0)
2846 val |= CHV_BUFLEFTENA2_FORCE;
2847 if (ch == DPIO_CH1)
2848 val |= CHV_BUFRIGHTENA2_FORCE;
2849 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2850 }
2851
9197c88b
VS
2852 /* program clock channel usage */
2853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2854 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2855 if (pipe != PIPE_B)
2856 val &= ~CHV_PCS_USEDCLKCHANNEL;
2857 else
2858 val |= CHV_PCS_USEDCLKCHANNEL;
2859 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2860
2861 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2862 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2863 if (pipe != PIPE_B)
2864 val &= ~CHV_PCS_USEDCLKCHANNEL;
2865 else
2866 val |= CHV_PCS_USEDCLKCHANNEL;
2867 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2868
2869 /*
2870 * This a a bit weird since generally CL
2871 * matches the pipe, but here we need to
2872 * pick the CL based on the port.
2873 */
2874 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2875 if (pipe != PIPE_B)
2876 val &= ~CHV_CMN_USEDCLKCHANNEL;
2877 else
2878 val |= CHV_CMN_USEDCLKCHANNEL;
2879 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2880
2881 mutex_unlock(&dev_priv->dpio_lock);
2882}
2883
a4fc5ed6 2884/*
df0c237d
JB
2885 * Native read with retry for link status and receiver capability reads for
2886 * cases where the sink may still be asleep.
9d1a1031
JN
2887 *
2888 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2889 * supposed to retry 3 times per the spec.
a4fc5ed6 2890 */
9d1a1031
JN
2891static ssize_t
2892intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2893 void *buffer, size_t size)
a4fc5ed6 2894{
9d1a1031
JN
2895 ssize_t ret;
2896 int i;
61da5fab 2897
f6a19066
VS
2898 /*
2899 * Sometime we just get the same incorrect byte repeated
2900 * over the entire buffer. Doing just one throw away read
2901 * initially seems to "solve" it.
2902 */
2903 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2904
61da5fab 2905 for (i = 0; i < 3; i++) {
9d1a1031
JN
2906 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2907 if (ret == size)
2908 return ret;
61da5fab
JB
2909 msleep(1);
2910 }
a4fc5ed6 2911
9d1a1031 2912 return ret;
a4fc5ed6
KP
2913}
2914
2915/*
2916 * Fetch AUX CH registers 0x202 - 0x207 which contain
2917 * link status information
2918 */
2919static bool
93f62dad 2920intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2921{
9d1a1031
JN
2922 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2923 DP_LANE0_1_STATUS,
2924 link_status,
2925 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2926}
2927
1100244e 2928/* These are source-specific values. */
a4fc5ed6 2929static uint8_t
1a2eb460 2930intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2931{
30add22d 2932 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2933 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2934 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2935
9314726b
VK
2936 if (IS_BROXTON(dev))
2937 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2938 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 2939 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 2940 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2941 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2942 } else if (IS_VALLEYVIEW(dev))
bd60018a 2943 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2944 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2945 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2946 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2947 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2948 else
bd60018a 2949 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2950}
2951
2952static uint8_t
2953intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2954{
30add22d 2955 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2956 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2957
5a9d1f1a
DL
2958 if (INTEL_INFO(dev)->gen >= 9) {
2959 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2960 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2961 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2962 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2963 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2964 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2965 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2966 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2967 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2968 default:
2969 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2970 }
2971 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2972 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2973 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2974 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2975 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2976 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2978 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2980 default:
bd60018a 2981 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2982 }
e2fa6fba
P
2983 } else if (IS_VALLEYVIEW(dev)) {
2984 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2985 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2986 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2987 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2988 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2989 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2990 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2991 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2992 default:
bd60018a 2993 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2994 }
bc7d38a4 2995 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2996 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2997 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2998 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2999 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3000 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3001 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3002 default:
bd60018a 3003 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3004 }
3005 } else {
3006 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3008 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3009 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3010 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3012 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3013 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3014 default:
bd60018a 3015 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3016 }
a4fc5ed6
KP
3017 }
3018}
3019
5829975c 3020static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3021{
3022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3023 struct drm_i915_private *dev_priv = dev->dev_private;
3024 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3025 struct intel_crtc *intel_crtc =
3026 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3027 unsigned long demph_reg_value, preemph_reg_value,
3028 uniqtranscale_reg_value;
3029 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3030 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3031 int pipe = intel_crtc->pipe;
e2fa6fba
P
3032
3033 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3034 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3035 preemph_reg_value = 0x0004000;
3036 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3038 demph_reg_value = 0x2B405555;
3039 uniqtranscale_reg_value = 0x552AB83A;
3040 break;
bd60018a 3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3042 demph_reg_value = 0x2B404040;
3043 uniqtranscale_reg_value = 0x5548B83A;
3044 break;
bd60018a 3045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3046 demph_reg_value = 0x2B245555;
3047 uniqtranscale_reg_value = 0x5560B83A;
3048 break;
bd60018a 3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3050 demph_reg_value = 0x2B405555;
3051 uniqtranscale_reg_value = 0x5598DA3A;
3052 break;
3053 default:
3054 return 0;
3055 }
3056 break;
bd60018a 3057 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3058 preemph_reg_value = 0x0002000;
3059 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3061 demph_reg_value = 0x2B404040;
3062 uniqtranscale_reg_value = 0x5552B83A;
3063 break;
bd60018a 3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3065 demph_reg_value = 0x2B404848;
3066 uniqtranscale_reg_value = 0x5580B83A;
3067 break;
bd60018a 3068 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3069 demph_reg_value = 0x2B404040;
3070 uniqtranscale_reg_value = 0x55ADDA3A;
3071 break;
3072 default:
3073 return 0;
3074 }
3075 break;
bd60018a 3076 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3077 preemph_reg_value = 0x0000000;
3078 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3079 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3080 demph_reg_value = 0x2B305555;
3081 uniqtranscale_reg_value = 0x5570B83A;
3082 break;
bd60018a 3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3084 demph_reg_value = 0x2B2B4040;
3085 uniqtranscale_reg_value = 0x55ADDA3A;
3086 break;
3087 default:
3088 return 0;
3089 }
3090 break;
bd60018a 3091 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3092 preemph_reg_value = 0x0006000;
3093 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3094 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3095 demph_reg_value = 0x1B405555;
3096 uniqtranscale_reg_value = 0x55ADDA3A;
3097 break;
3098 default:
3099 return 0;
3100 }
3101 break;
3102 default:
3103 return 0;
3104 }
3105
0980a60f 3106 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3107 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3108 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3109 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3110 uniqtranscale_reg_value);
ab3c759a
CML
3111 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3112 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3113 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3114 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3115 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3116
3117 return 0;
3118}
3119
5829975c 3120static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3121{
3122 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3123 struct drm_i915_private *dev_priv = dev->dev_private;
3124 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3125 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3126 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3127 uint8_t train_set = intel_dp->train_set[0];
3128 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3129 enum pipe pipe = intel_crtc->pipe;
3130 int i;
e4a1d846
CML
3131
3132 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3133 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3134 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3135 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3136 deemph_reg_value = 128;
3137 margin_reg_value = 52;
3138 break;
bd60018a 3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3140 deemph_reg_value = 128;
3141 margin_reg_value = 77;
3142 break;
bd60018a 3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3144 deemph_reg_value = 128;
3145 margin_reg_value = 102;
3146 break;
bd60018a 3147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3148 deemph_reg_value = 128;
3149 margin_reg_value = 154;
3150 /* FIXME extra to set for 1200 */
3151 break;
3152 default:
3153 return 0;
3154 }
3155 break;
bd60018a 3156 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3157 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3158 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3159 deemph_reg_value = 85;
3160 margin_reg_value = 78;
3161 break;
bd60018a 3162 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3163 deemph_reg_value = 85;
3164 margin_reg_value = 116;
3165 break;
bd60018a 3166 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3167 deemph_reg_value = 85;
3168 margin_reg_value = 154;
3169 break;
3170 default:
3171 return 0;
3172 }
3173 break;
bd60018a 3174 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3175 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3176 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3177 deemph_reg_value = 64;
3178 margin_reg_value = 104;
3179 break;
bd60018a 3180 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3181 deemph_reg_value = 64;
3182 margin_reg_value = 154;
3183 break;
3184 default:
3185 return 0;
3186 }
3187 break;
bd60018a 3188 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3189 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3191 deemph_reg_value = 43;
3192 margin_reg_value = 154;
3193 break;
3194 default:
3195 return 0;
3196 }
3197 break;
3198 default:
3199 return 0;
3200 }
3201
3202 mutex_lock(&dev_priv->dpio_lock);
3203
3204 /* Clear calc init */
1966e59e
VS
3205 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3206 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3207 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3208 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3209 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3210
3211 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3212 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3213 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3214 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3215 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3216
a02ef3c7
VS
3217 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3218 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3219 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3220 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3221
3222 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3223 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3224 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3225 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3226
e4a1d846 3227 /* Program swing deemph */
f72df8db
VS
3228 for (i = 0; i < 4; i++) {
3229 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3230 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3231 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3232 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3233 }
e4a1d846
CML
3234
3235 /* Program swing margin */
f72df8db
VS
3236 for (i = 0; i < 4; i++) {
3237 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3238 val &= ~DPIO_SWING_MARGIN000_MASK;
3239 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3240 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3241 }
e4a1d846
CML
3242
3243 /* Disable unique transition scale */
f72df8db
VS
3244 for (i = 0; i < 4; i++) {
3245 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3246 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3247 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3248 }
e4a1d846
CML
3249
3250 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3251 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3252 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3253 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3254
3255 /*
3256 * The document said it needs to set bit 27 for ch0 and bit 26
3257 * for ch1. Might be a typo in the doc.
3258 * For now, for this unique transition scale selection, set bit
3259 * 27 for ch0 and ch1.
3260 */
f72df8db
VS
3261 for (i = 0; i < 4; i++) {
3262 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3263 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3264 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3265 }
e4a1d846 3266
f72df8db
VS
3267 for (i = 0; i < 4; i++) {
3268 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3269 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3270 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3271 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3272 }
e4a1d846
CML
3273 }
3274
3275 /* Start swing calculation */
1966e59e
VS
3276 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3277 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3278 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3279
3280 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3281 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3282 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3283
3284 /* LRC Bypass */
3285 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3286 val |= DPIO_LRC_BYPASS;
3287 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3288
3289 mutex_unlock(&dev_priv->dpio_lock);
3290
3291 return 0;
3292}
3293
a4fc5ed6 3294static void
0301b3ac
JN
3295intel_get_adjust_train(struct intel_dp *intel_dp,
3296 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3297{
3298 uint8_t v = 0;
3299 uint8_t p = 0;
3300 int lane;
1a2eb460
KP
3301 uint8_t voltage_max;
3302 uint8_t preemph_max;
a4fc5ed6 3303
33a34e4e 3304 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3305 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3306 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3307
3308 if (this_v > v)
3309 v = this_v;
3310 if (this_p > p)
3311 p = this_p;
3312 }
3313
1a2eb460 3314 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3315 if (v >= voltage_max)
3316 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3317
1a2eb460
KP
3318 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3319 if (p >= preemph_max)
3320 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3321
3322 for (lane = 0; lane < 4; lane++)
33a34e4e 3323 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3324}
3325
3326static uint32_t
5829975c 3327gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3328{
3cf2efb1 3329 uint32_t signal_levels = 0;
a4fc5ed6 3330
3cf2efb1 3331 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3333 default:
3334 signal_levels |= DP_VOLTAGE_0_4;
3335 break;
bd60018a 3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3337 signal_levels |= DP_VOLTAGE_0_6;
3338 break;
bd60018a 3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3340 signal_levels |= DP_VOLTAGE_0_8;
3341 break;
bd60018a 3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3343 signal_levels |= DP_VOLTAGE_1_2;
3344 break;
3345 }
3cf2efb1 3346 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3347 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3348 default:
3349 signal_levels |= DP_PRE_EMPHASIS_0;
3350 break;
bd60018a 3351 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3352 signal_levels |= DP_PRE_EMPHASIS_3_5;
3353 break;
bd60018a 3354 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3355 signal_levels |= DP_PRE_EMPHASIS_6;
3356 break;
bd60018a 3357 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3358 signal_levels |= DP_PRE_EMPHASIS_9_5;
3359 break;
3360 }
3361 return signal_levels;
3362}
3363
e3421a18
ZW
3364/* Gen6's DP voltage swing and pre-emphasis control */
3365static uint32_t
5829975c 3366gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3367{
3c5a62b5
YL
3368 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3369 DP_TRAIN_PRE_EMPHASIS_MASK);
3370 switch (signal_levels) {
bd60018a
SJ
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3373 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3375 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3378 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3381 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3384 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3385 default:
3c5a62b5
YL
3386 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3387 "0x%x\n", signal_levels);
3388 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3389 }
3390}
3391
1a2eb460
KP
3392/* Gen7's DP voltage swing and pre-emphasis control */
3393static uint32_t
5829975c 3394gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3395{
3396 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3397 DP_TRAIN_PRE_EMPHASIS_MASK);
3398 switch (signal_levels) {
bd60018a 3399 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3400 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3401 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3402 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3403 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3404 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3405
bd60018a 3406 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3407 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3408 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3409 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3410
bd60018a 3411 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3412 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3413 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3414 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3415
3416 default:
3417 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3418 "0x%x\n", signal_levels);
3419 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3420 }
3421}
3422
d6c0d722
PZ
3423/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3424static uint32_t
5829975c 3425hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3426{
d6c0d722
PZ
3427 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3428 DP_TRAIN_PRE_EMPHASIS_MASK);
3429 switch (signal_levels) {
bd60018a 3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3431 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3433 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3435 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3437 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3438
bd60018a 3439 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3440 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3441 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3442 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3444 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3445
bd60018a 3446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3447 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3448 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3449 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3450
3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3452 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3453 default:
3454 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3455 "0x%x\n", signal_levels);
c5fe6a06 3456 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3457 }
a4fc5ed6
KP
3458}
3459
5829975c 3460static void bxt_signal_levels(struct intel_dp *intel_dp)
96fb9f9b
VK
3461{
3462 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3463 enum port port = dport->port;
3464 struct drm_device *dev = dport->base.base.dev;
3465 struct intel_encoder *encoder = &dport->base;
3466 uint8_t train_set = intel_dp->train_set[0];
3467 uint32_t level = 0;
3468
3469 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3470 DP_TRAIN_PRE_EMPHASIS_MASK);
3471 switch (signal_levels) {
3472 default:
3473 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3475 level = 0;
3476 break;
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3478 level = 1;
3479 break;
3480 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3481 level = 2;
3482 break;
3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3484 level = 3;
3485 break;
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3487 level = 4;
3488 break;
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3490 level = 5;
3491 break;
3492 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3493 level = 6;
3494 break;
3495 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3496 level = 7;
3497 break;
3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3499 level = 8;
3500 break;
3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3502 level = 9;
3503 break;
3504 }
3505
3506 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3507}
3508
f0a3424e
PZ
3509/* Properly updates "DP" with the correct signal levels. */
3510static void
3511intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3512{
3513 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3514 enum port port = intel_dig_port->port;
f0a3424e
PZ
3515 struct drm_device *dev = intel_dig_port->base.base.dev;
3516 uint32_t signal_levels, mask;
3517 uint8_t train_set = intel_dp->train_set[0];
3518
96fb9f9b
VK
3519 if (IS_BROXTON(dev)) {
3520 signal_levels = 0;
5829975c 3521 bxt_signal_levels(intel_dp);
96fb9f9b
VK
3522 mask = 0;
3523 } else if (HAS_DDI(dev)) {
5829975c 3524 signal_levels = hsw_signal_levels(train_set);
f0a3424e 3525 mask = DDI_BUF_EMP_MASK;
e4a1d846 3526 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3527 signal_levels = chv_signal_levels(intel_dp);
e4a1d846 3528 mask = 0;
e2fa6fba 3529 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3530 signal_levels = vlv_signal_levels(intel_dp);
e2fa6fba 3531 mask = 0;
bc7d38a4 3532 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3533 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3534 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3535 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3536 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3537 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3538 } else {
5829975c 3539 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3540 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3541 }
3542
96fb9f9b
VK
3543 if (mask)
3544 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3545
3546 DRM_DEBUG_KMS("Using vswing level %d\n",
3547 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3548 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3549 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3550 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3551
3552 *DP = (*DP & ~mask) | signal_levels;
3553}
3554
a4fc5ed6 3555static bool
ea5b213a 3556intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3557 uint32_t *DP,
58e10eb9 3558 uint8_t dp_train_pat)
a4fc5ed6 3559{
174edf1f
PZ
3560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3561 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3562 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3563 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3564 int ret, len;
a4fc5ed6 3565
7b13b58a 3566 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3567
70aff66c 3568 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3569 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3570
2cdfe6c8
JN
3571 buf[0] = dp_train_pat;
3572 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3573 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3574 /* don't write DP_TRAINING_LANEx_SET on disable */
3575 len = 1;
3576 } else {
3577 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3578 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3579 len = intel_dp->lane_count + 1;
47ea7542 3580 }
a4fc5ed6 3581
9d1a1031
JN
3582 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3583 buf, len);
2cdfe6c8
JN
3584
3585 return ret == len;
a4fc5ed6
KP
3586}
3587
70aff66c
JN
3588static bool
3589intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3590 uint8_t dp_train_pat)
3591{
4e96c977
MK
3592 if (!intel_dp->train_set_valid)
3593 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3594 intel_dp_set_signal_levels(intel_dp, DP);
3595 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3596}
3597
3598static bool
3599intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3600 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3601{
3602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3603 struct drm_device *dev = intel_dig_port->base.base.dev;
3604 struct drm_i915_private *dev_priv = dev->dev_private;
3605 int ret;
3606
3607 intel_get_adjust_train(intel_dp, link_status);
3608 intel_dp_set_signal_levels(intel_dp, DP);
3609
3610 I915_WRITE(intel_dp->output_reg, *DP);
3611 POSTING_READ(intel_dp->output_reg);
3612
9d1a1031
JN
3613 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3614 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3615
3616 return ret == intel_dp->lane_count;
3617}
3618
3ab9c637
ID
3619static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3620{
3621 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3622 struct drm_device *dev = intel_dig_port->base.base.dev;
3623 struct drm_i915_private *dev_priv = dev->dev_private;
3624 enum port port = intel_dig_port->port;
3625 uint32_t val;
3626
3627 if (!HAS_DDI(dev))
3628 return;
3629
3630 val = I915_READ(DP_TP_CTL(port));
3631 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3632 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3633 I915_WRITE(DP_TP_CTL(port), val);
3634
3635 /*
3636 * On PORT_A we can have only eDP in SST mode. There the only reason
3637 * we need to set idle transmission mode is to work around a HW issue
3638 * where we enable the pipe while not in idle link-training mode.
3639 * In this case there is requirement to wait for a minimum number of
3640 * idle patterns to be sent.
3641 */
3642 if (port == PORT_A)
3643 return;
3644
3645 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3646 1))
3647 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3648}
3649
33a34e4e 3650/* Enable corresponding port and start training pattern 1 */
c19b0669 3651void
33a34e4e 3652intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3653{
da63a9f2 3654 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3655 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3656 int i;
3657 uint8_t voltage;
cdb0e95b 3658 int voltage_tries, loop_tries;
ea5b213a 3659 uint32_t DP = intel_dp->DP;
6aba5b6c 3660 uint8_t link_config[2];
a4fc5ed6 3661
affa9354 3662 if (HAS_DDI(dev))
c19b0669
PZ
3663 intel_ddi_prepare_link_retrain(encoder);
3664
3cf2efb1 3665 /* Write the link configuration data */
6aba5b6c
JN
3666 link_config[0] = intel_dp->link_bw;
3667 link_config[1] = intel_dp->lane_count;
3668 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3669 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3670 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3671 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3672 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3673 &intel_dp->rate_select, 1);
6aba5b6c
JN
3674
3675 link_config[0] = 0;
3676 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3677 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3678
3679 DP |= DP_PORT_EN;
1a2eb460 3680
70aff66c
JN
3681 /* clock recovery */
3682 if (!intel_dp_reset_link_train(intel_dp, &DP,
3683 DP_TRAINING_PATTERN_1 |
3684 DP_LINK_SCRAMBLING_DISABLE)) {
3685 DRM_ERROR("failed to enable link training\n");
3686 return;
3687 }
3688
a4fc5ed6 3689 voltage = 0xff;
cdb0e95b
KP
3690 voltage_tries = 0;
3691 loop_tries = 0;
a4fc5ed6 3692 for (;;) {
70aff66c 3693 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3694
a7c9655f 3695 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3696 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3697 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3698 break;
93f62dad 3699 }
a4fc5ed6 3700
01916270 3701 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3702 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3703 break;
3704 }
3705
4e96c977
MK
3706 /*
3707 * if we used previously trained voltage and pre-emphasis values
3708 * and we don't get clock recovery, reset link training values
3709 */
3710 if (intel_dp->train_set_valid) {
3711 DRM_DEBUG_KMS("clock recovery not ok, reset");
3712 /* clear the flag as we are not reusing train set */
3713 intel_dp->train_set_valid = false;
3714 if (!intel_dp_reset_link_train(intel_dp, &DP,
3715 DP_TRAINING_PATTERN_1 |
3716 DP_LINK_SCRAMBLING_DISABLE)) {
3717 DRM_ERROR("failed to enable link training\n");
3718 return;
3719 }
3720 continue;
3721 }
3722
3cf2efb1
CW
3723 /* Check to see if we've tried the max voltage */
3724 for (i = 0; i < intel_dp->lane_count; i++)
3725 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3726 break;
3b4f819d 3727 if (i == intel_dp->lane_count) {
b06fbda3
DV
3728 ++loop_tries;
3729 if (loop_tries == 5) {
3def84b3 3730 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3731 break;
3732 }
70aff66c
JN
3733 intel_dp_reset_link_train(intel_dp, &DP,
3734 DP_TRAINING_PATTERN_1 |
3735 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3736 voltage_tries = 0;
3737 continue;
3738 }
a4fc5ed6 3739
3cf2efb1 3740 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3741 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3742 ++voltage_tries;
b06fbda3 3743 if (voltage_tries == 5) {
3def84b3 3744 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3745 break;
3746 }
3747 } else
3748 voltage_tries = 0;
3749 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3750
70aff66c
JN
3751 /* Update training set as requested by target */
3752 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3753 DRM_ERROR("failed to update link training\n");
3754 break;
3755 }
a4fc5ed6
KP
3756 }
3757
33a34e4e
JB
3758 intel_dp->DP = DP;
3759}
3760
c19b0669 3761void
33a34e4e
JB
3762intel_dp_complete_link_train(struct intel_dp *intel_dp)
3763{
33a34e4e 3764 bool channel_eq = false;
37f80975 3765 int tries, cr_tries;
33a34e4e 3766 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3767 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3768
3769 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3770 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3771 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3772
a4fc5ed6 3773 /* channel equalization */
70aff66c 3774 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3775 training_pattern |
70aff66c
JN
3776 DP_LINK_SCRAMBLING_DISABLE)) {
3777 DRM_ERROR("failed to start channel equalization\n");
3778 return;
3779 }
3780
a4fc5ed6 3781 tries = 0;
37f80975 3782 cr_tries = 0;
a4fc5ed6
KP
3783 channel_eq = false;
3784 for (;;) {
70aff66c 3785 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3786
37f80975
JB
3787 if (cr_tries > 5) {
3788 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3789 break;
3790 }
3791
a7c9655f 3792 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3793 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3794 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3795 break;
70aff66c 3796 }
a4fc5ed6 3797
37f80975 3798 /* Make sure clock is still ok */
01916270 3799 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
4e96c977 3800 intel_dp->train_set_valid = false;
37f80975 3801 intel_dp_start_link_train(intel_dp);
70aff66c 3802 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3803 training_pattern |
70aff66c 3804 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3805 cr_tries++;
3806 continue;
3807 }
3808
1ffdff13 3809 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3810 channel_eq = true;
3811 break;
3812 }
a4fc5ed6 3813
37f80975
JB
3814 /* Try 5 times, then try clock recovery if that fails */
3815 if (tries > 5) {
4e96c977 3816 intel_dp->train_set_valid = false;
37f80975 3817 intel_dp_start_link_train(intel_dp);
70aff66c 3818 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3819 training_pattern |
70aff66c 3820 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3821 tries = 0;
3822 cr_tries++;
3823 continue;
3824 }
a4fc5ed6 3825
70aff66c
JN
3826 /* Update training set as requested by target */
3827 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3828 DRM_ERROR("failed to update link training\n");
3829 break;
3830 }
3cf2efb1 3831 ++tries;
869184a6 3832 }
3cf2efb1 3833
3ab9c637
ID
3834 intel_dp_set_idle_link_train(intel_dp);
3835
3836 intel_dp->DP = DP;
3837
4e96c977 3838 if (channel_eq) {
5fa836a9 3839 intel_dp->train_set_valid = true;
07f42258 3840 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3841 }
3ab9c637
ID
3842}
3843
3844void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3845{
70aff66c 3846 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3847 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3848}
3849
3850static void
ea5b213a 3851intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3852{
da63a9f2 3853 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3854 enum port port = intel_dig_port->port;
da63a9f2 3855 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3856 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3857 uint32_t DP = intel_dp->DP;
a4fc5ed6 3858
bc76e320 3859 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3860 return;
3861
0c33d8d7 3862 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3863 return;
3864
28c97730 3865 DRM_DEBUG_KMS("\n");
32f9d658 3866
bc7d38a4 3867 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3868 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3869 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3870 } else {
aad3d14d
VS
3871 if (IS_CHERRYVIEW(dev))
3872 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3873 else
3874 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3875 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3876 }
fe255d00 3877 POSTING_READ(intel_dp->output_reg);
5eb08b69 3878
493a7081 3879 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3880 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3881 /* Hardware workaround: leaving our transcoder select
3882 * set to transcoder B while it's off will prevent the
3883 * corresponding HDMI output on transcoder A.
3884 *
3885 * Combine this with another hardware workaround:
3886 * transcoder select bit can only be cleared while the
3887 * port is enabled.
3888 */
3889 DP &= ~DP_PIPEB_SELECT;
3890 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3891 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3892 }
3893
832afda6 3894 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3895 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3896 POSTING_READ(intel_dp->output_reg);
f01eca2e 3897 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3898}
3899
26d61aad
KP
3900static bool
3901intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3902{
a031d709
RV
3903 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3904 struct drm_device *dev = dig_port->base.base.dev;
3905 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3906 uint8_t rev;
a031d709 3907
9d1a1031
JN
3908 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3909 sizeof(intel_dp->dpcd)) < 0)
edb39244 3910 return false; /* aux transfer failed */
92fd8fd1 3911
a8e98153 3912 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3913
edb39244
AJ
3914 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3915 return false; /* DPCD not present */
3916
2293bb5c
SK
3917 /* Check if the panel supports PSR */
3918 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3919 if (is_edp(intel_dp)) {
9d1a1031
JN
3920 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3921 intel_dp->psr_dpcd,
3922 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3923 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3924 dev_priv->psr.sink_support = true;
50003939 3925 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3926 }
474d1ec4
SJ
3927
3928 if (INTEL_INFO(dev)->gen >= 9 &&
3929 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3930 uint8_t frame_sync_cap;
3931
3932 dev_priv->psr.sink_support = true;
3933 intel_dp_dpcd_read_wake(&intel_dp->aux,
3934 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3935 &frame_sync_cap, 1);
3936 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3937 /* PSR2 needs frame sync as well */
3938 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3939 DRM_DEBUG_KMS("PSR2 %s on sink",
3940 dev_priv->psr.psr2_support ? "supported" : "not supported");
3941 }
50003939
JN
3942 }
3943
7809a611 3944 /* Training Pattern 3 support, both source and sink */
06ea66b6 3945 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3946 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3947 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3948 intel_dp->use_tps3 = true;
f8d8a672 3949 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3950 } else
3951 intel_dp->use_tps3 = false;
3952
fc0f8e25
SJ
3953 /* Intermediate frequency support */
3954 if (is_edp(intel_dp) &&
3955 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3956 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3957 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3958 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3959 int i;
3960
fc0f8e25
SJ
3961 intel_dp_dpcd_read_wake(&intel_dp->aux,
3962 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3963 sink_rates,
3964 sizeof(sink_rates));
ea2d8a42 3965
94ca719e
VS
3966 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3967 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3968
3969 if (val == 0)
3970 break;
3971
af77b974
SJ
3972 /* Value read is in kHz while drm clock is saved in deca-kHz */
3973 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3974 }
94ca719e 3975 intel_dp->num_sink_rates = i;
fc0f8e25 3976 }
0336400e
VS
3977
3978 intel_dp_print_rates(intel_dp);
3979
edb39244
AJ
3980 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3981 DP_DWN_STRM_PORT_PRESENT))
3982 return true; /* native DP sink */
3983
3984 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3985 return true; /* no per-port downstream info */
3986
9d1a1031
JN
3987 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3988 intel_dp->downstream_ports,
3989 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3990 return false; /* downstream port status fetch failed */
3991
3992 return true;
92fd8fd1
KP
3993}
3994
0d198328
AJ
3995static void
3996intel_dp_probe_oui(struct intel_dp *intel_dp)
3997{
3998 u8 buf[3];
3999
4000 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4001 return;
4002
9d1a1031 4003 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
4004 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4005 buf[0], buf[1], buf[2]);
4006
9d1a1031 4007 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
4008 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4009 buf[0], buf[1], buf[2]);
4010}
4011
0e32b39c
DA
4012static bool
4013intel_dp_probe_mst(struct intel_dp *intel_dp)
4014{
4015 u8 buf[1];
4016
4017 if (!intel_dp->can_mst)
4018 return false;
4019
4020 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4021 return false;
4022
0e32b39c
DA
4023 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4024 if (buf[0] & DP_MST_CAP) {
4025 DRM_DEBUG_KMS("Sink is MST capable\n");
4026 intel_dp->is_mst = true;
4027 } else {
4028 DRM_DEBUG_KMS("Sink is not MST capable\n");
4029 intel_dp->is_mst = false;
4030 }
4031 }
0e32b39c
DA
4032
4033 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4034 return intel_dp->is_mst;
4035}
4036
d2e216d0
RV
4037int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4038{
4039 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4040 struct drm_device *dev = intel_dig_port->base.base.dev;
4041 struct intel_crtc *intel_crtc =
4042 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
4043 u8 buf;
4044 int test_crc_count;
4045 int attempts = 6;
d2e216d0 4046
ad9dc91b 4047 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 4048 return -EIO;
d2e216d0 4049
ad9dc91b 4050 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
4051 return -ENOTTY;
4052
1dda5f93
RV
4053 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4054 return -EIO;
4055
9d1a1031 4056 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 4057 buf | DP_TEST_SINK_START) < 0)
bda0381e 4058 return -EIO;
d2e216d0 4059
1dda5f93 4060 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 4061 return -EIO;
ad9dc91b 4062 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 4063
ad9dc91b 4064 do {
1dda5f93
RV
4065 if (drm_dp_dpcd_readb(&intel_dp->aux,
4066 DP_TEST_SINK_MISC, &buf) < 0)
4067 return -EIO;
ad9dc91b
RV
4068 intel_wait_for_vblank(dev, intel_crtc->pipe);
4069 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4070
4071 if (attempts == 0) {
90bd1f46
DV
4072 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4073 return -ETIMEDOUT;
ad9dc91b 4074 }
d2e216d0 4075
9d1a1031 4076 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 4077 return -EIO;
d2e216d0 4078
1dda5f93
RV
4079 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4080 return -EIO;
4081 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4082 buf & ~DP_TEST_SINK_START) < 0)
4083 return -EIO;
ce31d9f4 4084
d2e216d0
RV
4085 return 0;
4086}
4087
a60f0e38
JB
4088static bool
4089intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4090{
9d1a1031
JN
4091 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4092 DP_DEVICE_SERVICE_IRQ_VECTOR,
4093 sink_irq_vector, 1) == 1;
a60f0e38
JB
4094}
4095
0e32b39c
DA
4096static bool
4097intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4098{
4099 int ret;
4100
4101 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4102 DP_SINK_COUNT_ESI,
4103 sink_irq_vector, 14);
4104 if (ret != 14)
4105 return false;
4106
4107 return true;
4108}
4109
c5d5ab7a
TP
4110static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4111{
4112 uint8_t test_result = DP_TEST_ACK;
4113 return test_result;
4114}
4115
4116static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4117{
4118 uint8_t test_result = DP_TEST_NAK;
4119 return test_result;
4120}
4121
4122static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4123{
c5d5ab7a 4124 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4125 struct intel_connector *intel_connector = intel_dp->attached_connector;
4126 struct drm_connector *connector = &intel_connector->base;
4127
4128 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4129 connector->edid_corrupt ||
559be30c
TP
4130 intel_dp->aux.i2c_defer_count > 6) {
4131 /* Check EDID read for NACKs, DEFERs and corruption
4132 * (DP CTS 1.2 Core r1.1)
4133 * 4.2.2.4 : Failed EDID read, I2C_NAK
4134 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4135 * 4.2.2.6 : EDID corruption detected
4136 * Use failsafe mode for all cases
4137 */
4138 if (intel_dp->aux.i2c_nack_count > 0 ||
4139 intel_dp->aux.i2c_defer_count > 0)
4140 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4141 intel_dp->aux.i2c_nack_count,
4142 intel_dp->aux.i2c_defer_count);
4143 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4144 } else {
4145 if (!drm_dp_dpcd_write(&intel_dp->aux,
4146 DP_TEST_EDID_CHECKSUM,
4147 &intel_connector->detect_edid->checksum,
5a1cc655 4148 1))
559be30c
TP
4149 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4150
4151 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4152 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4153 }
4154
4155 /* Set test active flag here so userspace doesn't interrupt things */
4156 intel_dp->compliance_test_active = 1;
4157
c5d5ab7a
TP
4158 return test_result;
4159}
4160
4161static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4162{
c5d5ab7a
TP
4163 uint8_t test_result = DP_TEST_NAK;
4164 return test_result;
4165}
4166
4167static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4168{
4169 uint8_t response = DP_TEST_NAK;
4170 uint8_t rxdata = 0;
4171 int status = 0;
4172
559be30c 4173 intel_dp->compliance_test_active = 0;
c5d5ab7a 4174 intel_dp->compliance_test_type = 0;
559be30c
TP
4175 intel_dp->compliance_test_data = 0;
4176
c5d5ab7a
TP
4177 intel_dp->aux.i2c_nack_count = 0;
4178 intel_dp->aux.i2c_defer_count = 0;
4179
4180 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4181 if (status <= 0) {
4182 DRM_DEBUG_KMS("Could not read test request from sink\n");
4183 goto update_status;
4184 }
4185
4186 switch (rxdata) {
4187 case DP_TEST_LINK_TRAINING:
4188 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4189 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4190 response = intel_dp_autotest_link_training(intel_dp);
4191 break;
4192 case DP_TEST_LINK_VIDEO_PATTERN:
4193 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4194 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4195 response = intel_dp_autotest_video_pattern(intel_dp);
4196 break;
4197 case DP_TEST_LINK_EDID_READ:
4198 DRM_DEBUG_KMS("EDID test requested\n");
4199 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4200 response = intel_dp_autotest_edid(intel_dp);
4201 break;
4202 case DP_TEST_LINK_PHY_TEST_PATTERN:
4203 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4204 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4205 response = intel_dp_autotest_phy_pattern(intel_dp);
4206 break;
4207 default:
4208 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4209 break;
4210 }
4211
4212update_status:
4213 status = drm_dp_dpcd_write(&intel_dp->aux,
4214 DP_TEST_RESPONSE,
4215 &response, 1);
4216 if (status <= 0)
4217 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4218}
4219
0e32b39c
DA
4220static int
4221intel_dp_check_mst_status(struct intel_dp *intel_dp)
4222{
4223 bool bret;
4224
4225 if (intel_dp->is_mst) {
4226 u8 esi[16] = { 0 };
4227 int ret = 0;
4228 int retry;
4229 bool handled;
4230 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4231go_again:
4232 if (bret == true) {
4233
4234 /* check link status - esi[10] = 0x200c */
4235 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4236 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4237 intel_dp_start_link_train(intel_dp);
4238 intel_dp_complete_link_train(intel_dp);
4239 intel_dp_stop_link_train(intel_dp);
4240 }
4241
6f34cc39 4242 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4243 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4244
4245 if (handled) {
4246 for (retry = 0; retry < 3; retry++) {
4247 int wret;
4248 wret = drm_dp_dpcd_write(&intel_dp->aux,
4249 DP_SINK_COUNT_ESI+1,
4250 &esi[1], 3);
4251 if (wret == 3) {
4252 break;
4253 }
4254 }
4255
4256 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4257 if (bret == true) {
6f34cc39 4258 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4259 goto go_again;
4260 }
4261 } else
4262 ret = 0;
4263
4264 return ret;
4265 } else {
4266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4267 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4268 intel_dp->is_mst = false;
4269 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4270 /* send a hotplug event */
4271 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4272 }
4273 }
4274 return -EINVAL;
4275}
4276
a4fc5ed6
KP
4277/*
4278 * According to DP spec
4279 * 5.1.2:
4280 * 1. Read DPCD
4281 * 2. Configure link according to Receiver Capabilities
4282 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4283 * 4. Check link status on receipt of hot-plug interrupt
4284 */
a5146200 4285static void
ea5b213a 4286intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4287{
5b215bcf 4288 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4289 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4290 u8 sink_irq_vector;
93f62dad 4291 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4292
5b215bcf
DA
4293 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4294
da63a9f2 4295 if (!intel_encoder->connectors_active)
d2b996ac 4296 return;
59cd09e1 4297
da63a9f2 4298 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4299 return;
4300
1a125d8a
ID
4301 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4302 return;
4303
92fd8fd1 4304 /* Try to read receiver status if the link appears to be up */
93f62dad 4305 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4306 return;
4307 }
4308
92fd8fd1 4309 /* Now read the DPCD to see if it's actually running */
26d61aad 4310 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4311 return;
4312 }
4313
a60f0e38
JB
4314 /* Try to read the source of the interrupt */
4315 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4316 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4317 /* Clear interrupt source */
9d1a1031
JN
4318 drm_dp_dpcd_writeb(&intel_dp->aux,
4319 DP_DEVICE_SERVICE_IRQ_VECTOR,
4320 sink_irq_vector);
a60f0e38
JB
4321
4322 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4323 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4324 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4325 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4326 }
4327
1ffdff13 4328 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4329 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4330 intel_encoder->base.name);
33a34e4e
JB
4331 intel_dp_start_link_train(intel_dp);
4332 intel_dp_complete_link_train(intel_dp);
3ab9c637 4333 intel_dp_stop_link_train(intel_dp);
33a34e4e 4334 }
a4fc5ed6 4335}
a4fc5ed6 4336
caf9ab24 4337/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4338static enum drm_connector_status
26d61aad 4339intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4340{
caf9ab24 4341 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4342 uint8_t type;
4343
4344 if (!intel_dp_get_dpcd(intel_dp))
4345 return connector_status_disconnected;
4346
4347 /* if there's no downstream port, we're done */
4348 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4349 return connector_status_connected;
caf9ab24
AJ
4350
4351 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4352 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4353 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4354 uint8_t reg;
9d1a1031
JN
4355
4356 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4357 &reg, 1) < 0)
caf9ab24 4358 return connector_status_unknown;
9d1a1031 4359
23235177
AJ
4360 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4361 : connector_status_disconnected;
caf9ab24
AJ
4362 }
4363
4364 /* If no HPD, poke DDC gently */
0b99836f 4365 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4366 return connector_status_connected;
caf9ab24
AJ
4367
4368 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4369 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4370 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4371 if (type == DP_DS_PORT_TYPE_VGA ||
4372 type == DP_DS_PORT_TYPE_NON_EDID)
4373 return connector_status_unknown;
4374 } else {
4375 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4376 DP_DWN_STRM_PORT_TYPE_MASK;
4377 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4378 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4379 return connector_status_unknown;
4380 }
caf9ab24
AJ
4381
4382 /* Anything else is out of spec, warn and ignore */
4383 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4384 return connector_status_disconnected;
71ba9000
AJ
4385}
4386
d410b56d
CW
4387static enum drm_connector_status
4388edp_detect(struct intel_dp *intel_dp)
4389{
4390 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4391 enum drm_connector_status status;
4392
4393 status = intel_panel_detect(dev);
4394 if (status == connector_status_unknown)
4395 status = connector_status_connected;
4396
4397 return status;
4398}
4399
5eb08b69 4400static enum drm_connector_status
a9756bb5 4401ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4402{
30add22d 4403 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4404 struct drm_i915_private *dev_priv = dev->dev_private;
4405 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4406
1b469639
DL
4407 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4408 return connector_status_disconnected;
4409
26d61aad 4410 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4411}
4412
2a592bec
DA
4413static int g4x_digital_port_connected(struct drm_device *dev,
4414 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4415{
a4fc5ed6 4416 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4417 uint32_t bit;
5eb08b69 4418
232a6ee9
TP
4419 if (IS_VALLEYVIEW(dev)) {
4420 switch (intel_dig_port->port) {
4421 case PORT_B:
4422 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4423 break;
4424 case PORT_C:
4425 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4426 break;
4427 case PORT_D:
4428 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4429 break;
4430 default:
2a592bec 4431 return -EINVAL;
232a6ee9
TP
4432 }
4433 } else {
4434 switch (intel_dig_port->port) {
4435 case PORT_B:
4436 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4437 break;
4438 case PORT_C:
4439 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4440 break;
4441 case PORT_D:
4442 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4443 break;
4444 default:
2a592bec 4445 return -EINVAL;
232a6ee9 4446 }
a4fc5ed6
KP
4447 }
4448
10f76a38 4449 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4450 return 0;
4451 return 1;
4452}
4453
4454static enum drm_connector_status
4455g4x_dp_detect(struct intel_dp *intel_dp)
4456{
4457 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4458 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4459 int ret;
4460
4461 /* Can't disconnect eDP, but you can close the lid... */
4462 if (is_edp(intel_dp)) {
4463 enum drm_connector_status status;
4464
4465 status = intel_panel_detect(dev);
4466 if (status == connector_status_unknown)
4467 status = connector_status_connected;
4468 return status;
4469 }
4470
4471 ret = g4x_digital_port_connected(dev, intel_dig_port);
4472 if (ret == -EINVAL)
4473 return connector_status_unknown;
4474 else if (ret == 0)
a4fc5ed6
KP
4475 return connector_status_disconnected;
4476
26d61aad 4477 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4478}
4479
8c241fef 4480static struct edid *
beb60608 4481intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4482{
beb60608 4483 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4484
9cd300e0
JN
4485 /* use cached edid if we have one */
4486 if (intel_connector->edid) {
9cd300e0
JN
4487 /* invalid edid */
4488 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4489 return NULL;
4490
55e9edeb 4491 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4492 } else
4493 return drm_get_edid(&intel_connector->base,
4494 &intel_dp->aux.ddc);
4495}
8c241fef 4496
beb60608
CW
4497static void
4498intel_dp_set_edid(struct intel_dp *intel_dp)
4499{
4500 struct intel_connector *intel_connector = intel_dp->attached_connector;
4501 struct edid *edid;
8c241fef 4502
beb60608
CW
4503 edid = intel_dp_get_edid(intel_dp);
4504 intel_connector->detect_edid = edid;
4505
4506 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4507 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4508 else
4509 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4510}
4511
beb60608
CW
4512static void
4513intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4514{
beb60608 4515 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4516
beb60608
CW
4517 kfree(intel_connector->detect_edid);
4518 intel_connector->detect_edid = NULL;
9cd300e0 4519
beb60608
CW
4520 intel_dp->has_audio = false;
4521}
d6f24d0f 4522
beb60608
CW
4523static enum intel_display_power_domain
4524intel_dp_power_get(struct intel_dp *dp)
4525{
4526 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4527 enum intel_display_power_domain power_domain;
4528
4529 power_domain = intel_display_port_power_domain(encoder);
4530 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4531
4532 return power_domain;
4533}
d6f24d0f 4534
beb60608
CW
4535static void
4536intel_dp_power_put(struct intel_dp *dp,
4537 enum intel_display_power_domain power_domain)
4538{
4539 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4540 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4541}
4542
a9756bb5
ZW
4543static enum drm_connector_status
4544intel_dp_detect(struct drm_connector *connector, bool force)
4545{
4546 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4547 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4548 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4549 struct drm_device *dev = connector->dev;
a9756bb5 4550 enum drm_connector_status status;
671dedd2 4551 enum intel_display_power_domain power_domain;
0e32b39c 4552 bool ret;
09b1eb13 4553 u8 sink_irq_vector;
a9756bb5 4554
164c8598 4555 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4556 connector->base.id, connector->name);
beb60608 4557 intel_dp_unset_edid(intel_dp);
164c8598 4558
0e32b39c
DA
4559 if (intel_dp->is_mst) {
4560 /* MST devices are disconnected from a monitor POV */
4561 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4562 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4563 return connector_status_disconnected;
0e32b39c
DA
4564 }
4565
beb60608 4566 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4567
d410b56d
CW
4568 /* Can't disconnect eDP, but you can close the lid... */
4569 if (is_edp(intel_dp))
4570 status = edp_detect(intel_dp);
4571 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4572 status = ironlake_dp_detect(intel_dp);
4573 else
4574 status = g4x_dp_detect(intel_dp);
4575 if (status != connector_status_connected)
c8c8fb33 4576 goto out;
a9756bb5 4577
0d198328
AJ
4578 intel_dp_probe_oui(intel_dp);
4579
0e32b39c
DA
4580 ret = intel_dp_probe_mst(intel_dp);
4581 if (ret) {
4582 /* if we are in MST mode then this connector
4583 won't appear connected or have anything with EDID on it */
4584 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4585 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4586 status = connector_status_disconnected;
4587 goto out;
4588 }
4589
beb60608 4590 intel_dp_set_edid(intel_dp);
a9756bb5 4591
d63885da
PZ
4592 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4593 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4594 status = connector_status_connected;
4595
09b1eb13
TP
4596 /* Try to read the source of the interrupt */
4597 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4598 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4599 /* Clear interrupt source */
4600 drm_dp_dpcd_writeb(&intel_dp->aux,
4601 DP_DEVICE_SERVICE_IRQ_VECTOR,
4602 sink_irq_vector);
4603
4604 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4605 intel_dp_handle_test_request(intel_dp);
4606 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4607 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4608 }
4609
c8c8fb33 4610out:
beb60608 4611 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4612 return status;
a4fc5ed6
KP
4613}
4614
beb60608
CW
4615static void
4616intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4617{
df0e9248 4618 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4619 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4620 enum intel_display_power_domain power_domain;
a4fc5ed6 4621
beb60608
CW
4622 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4623 connector->base.id, connector->name);
4624 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4625
beb60608
CW
4626 if (connector->status != connector_status_connected)
4627 return;
671dedd2 4628
beb60608
CW
4629 power_domain = intel_dp_power_get(intel_dp);
4630
4631 intel_dp_set_edid(intel_dp);
4632
4633 intel_dp_power_put(intel_dp, power_domain);
4634
4635 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4636 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4637}
4638
4639static int intel_dp_get_modes(struct drm_connector *connector)
4640{
4641 struct intel_connector *intel_connector = to_intel_connector(connector);
4642 struct edid *edid;
4643
4644 edid = intel_connector->detect_edid;
4645 if (edid) {
4646 int ret = intel_connector_update_modes(connector, edid);
4647 if (ret)
4648 return ret;
4649 }
32f9d658 4650
f8779fda 4651 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4652 if (is_edp(intel_attached_dp(connector)) &&
4653 intel_connector->panel.fixed_mode) {
f8779fda 4654 struct drm_display_mode *mode;
beb60608
CW
4655
4656 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4657 intel_connector->panel.fixed_mode);
f8779fda 4658 if (mode) {
32f9d658
ZW
4659 drm_mode_probed_add(connector, mode);
4660 return 1;
4661 }
4662 }
beb60608 4663
32f9d658 4664 return 0;
a4fc5ed6
KP
4665}
4666
1aad7ac0
CW
4667static bool
4668intel_dp_detect_audio(struct drm_connector *connector)
4669{
1aad7ac0 4670 bool has_audio = false;
beb60608 4671 struct edid *edid;
1aad7ac0 4672
beb60608
CW
4673 edid = to_intel_connector(connector)->detect_edid;
4674 if (edid)
1aad7ac0 4675 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4676
1aad7ac0
CW
4677 return has_audio;
4678}
4679
f684960e
CW
4680static int
4681intel_dp_set_property(struct drm_connector *connector,
4682 struct drm_property *property,
4683 uint64_t val)
4684{
e953fd7b 4685 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4686 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4687 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4688 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4689 int ret;
4690
662595df 4691 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4692 if (ret)
4693 return ret;
4694
3f43c48d 4695 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4696 int i = val;
4697 bool has_audio;
4698
4699 if (i == intel_dp->force_audio)
f684960e
CW
4700 return 0;
4701
1aad7ac0 4702 intel_dp->force_audio = i;
f684960e 4703
c3e5f67b 4704 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4705 has_audio = intel_dp_detect_audio(connector);
4706 else
c3e5f67b 4707 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4708
4709 if (has_audio == intel_dp->has_audio)
f684960e
CW
4710 return 0;
4711
1aad7ac0 4712 intel_dp->has_audio = has_audio;
f684960e
CW
4713 goto done;
4714 }
4715
e953fd7b 4716 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4717 bool old_auto = intel_dp->color_range_auto;
4718 uint32_t old_range = intel_dp->color_range;
4719
55bc60db
VS
4720 switch (val) {
4721 case INTEL_BROADCAST_RGB_AUTO:
4722 intel_dp->color_range_auto = true;
4723 break;
4724 case INTEL_BROADCAST_RGB_FULL:
4725 intel_dp->color_range_auto = false;
4726 intel_dp->color_range = 0;
4727 break;
4728 case INTEL_BROADCAST_RGB_LIMITED:
4729 intel_dp->color_range_auto = false;
4730 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4731 break;
4732 default:
4733 return -EINVAL;
4734 }
ae4edb80
DV
4735
4736 if (old_auto == intel_dp->color_range_auto &&
4737 old_range == intel_dp->color_range)
4738 return 0;
4739
e953fd7b
CW
4740 goto done;
4741 }
4742
53b41837
YN
4743 if (is_edp(intel_dp) &&
4744 property == connector->dev->mode_config.scaling_mode_property) {
4745 if (val == DRM_MODE_SCALE_NONE) {
4746 DRM_DEBUG_KMS("no scaling not supported\n");
4747 return -EINVAL;
4748 }
4749
4750 if (intel_connector->panel.fitting_mode == val) {
4751 /* the eDP scaling property is not changed */
4752 return 0;
4753 }
4754 intel_connector->panel.fitting_mode = val;
4755
4756 goto done;
4757 }
4758
f684960e
CW
4759 return -EINVAL;
4760
4761done:
c0c36b94
CW
4762 if (intel_encoder->base.crtc)
4763 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4764
4765 return 0;
4766}
4767
a4fc5ed6 4768static void
73845adf 4769intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4770{
1d508706 4771 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4772
10e972d3 4773 kfree(intel_connector->detect_edid);
beb60608 4774
9cd300e0
JN
4775 if (!IS_ERR_OR_NULL(intel_connector->edid))
4776 kfree(intel_connector->edid);
4777
acd8db10
PZ
4778 /* Can't call is_edp() since the encoder may have been destroyed
4779 * already. */
4780 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4781 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4782
a4fc5ed6 4783 drm_connector_cleanup(connector);
55f78c43 4784 kfree(connector);
a4fc5ed6
KP
4785}
4786
00c09d70 4787void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4788{
da63a9f2
PZ
4789 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4790 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4791
4f71d0cb 4792 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4793 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4794 if (is_edp(intel_dp)) {
4795 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4796 /*
4797 * vdd might still be enabled do to the delayed vdd off.
4798 * Make sure vdd is actually turned off here.
4799 */
773538e8 4800 pps_lock(intel_dp);
4be73780 4801 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4802 pps_unlock(intel_dp);
4803
01527b31
CT
4804 if (intel_dp->edp_notifier.notifier_call) {
4805 unregister_reboot_notifier(&intel_dp->edp_notifier);
4806 intel_dp->edp_notifier.notifier_call = NULL;
4807 }
bd943159 4808 }
c8bd0e49 4809 drm_encoder_cleanup(encoder);
da63a9f2 4810 kfree(intel_dig_port);
24d05927
DV
4811}
4812
07f9cd0b
ID
4813static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4814{
4815 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4816
4817 if (!is_edp(intel_dp))
4818 return;
4819
951468f3
VS
4820 /*
4821 * vdd might still be enabled do to the delayed vdd off.
4822 * Make sure vdd is actually turned off here.
4823 */
afa4e53a 4824 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4825 pps_lock(intel_dp);
07f9cd0b 4826 edp_panel_vdd_off_sync(intel_dp);
773538e8 4827 pps_unlock(intel_dp);
07f9cd0b
ID
4828}
4829
49e6bc51
VS
4830static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4831{
4832 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4833 struct drm_device *dev = intel_dig_port->base.base.dev;
4834 struct drm_i915_private *dev_priv = dev->dev_private;
4835 enum intel_display_power_domain power_domain;
4836
4837 lockdep_assert_held(&dev_priv->pps_mutex);
4838
4839 if (!edp_have_panel_vdd(intel_dp))
4840 return;
4841
4842 /*
4843 * The VDD bit needs a power domain reference, so if the bit is
4844 * already enabled when we boot or resume, grab this reference and
4845 * schedule a vdd off, so we don't hold on to the reference
4846 * indefinitely.
4847 */
4848 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4849 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4850 intel_display_power_get(dev_priv, power_domain);
4851
4852 edp_panel_vdd_schedule_off(intel_dp);
4853}
4854
6d93c0c4
ID
4855static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4856{
49e6bc51
VS
4857 struct intel_dp *intel_dp;
4858
4859 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4860 return;
4861
4862 intel_dp = enc_to_intel_dp(encoder);
4863
4864 pps_lock(intel_dp);
4865
4866 /*
4867 * Read out the current power sequencer assignment,
4868 * in case the BIOS did something with it.
4869 */
4870 if (IS_VALLEYVIEW(encoder->dev))
4871 vlv_initial_power_sequencer_setup(intel_dp);
4872
4873 intel_edp_panel_vdd_sanitize(intel_dp);
4874
4875 pps_unlock(intel_dp);
6d93c0c4
ID
4876}
4877
a4fc5ed6 4878static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4879 .dpms = intel_connector_dpms,
a4fc5ed6 4880 .detect = intel_dp_detect,
beb60608 4881 .force = intel_dp_force,
a4fc5ed6 4882 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4883 .set_property = intel_dp_set_property,
2545e4a6 4884 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4885 .destroy = intel_dp_connector_destroy,
c6f95f27 4886 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4887 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4888};
4889
4890static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4891 .get_modes = intel_dp_get_modes,
4892 .mode_valid = intel_dp_mode_valid,
df0e9248 4893 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4894};
4895
a4fc5ed6 4896static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4897 .reset = intel_dp_encoder_reset,
24d05927 4898 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4899};
4900
0e32b39c 4901void
21d40d37 4902intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4903{
0e32b39c 4904 return;
c8110e52 4905}
6207937d 4906
b2c5c181 4907enum irqreturn
13cf5504
DA
4908intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4909{
4910 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4911 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4912 struct drm_device *dev = intel_dig_port->base.base.dev;
4913 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4914 enum intel_display_power_domain power_domain;
b2c5c181 4915 enum irqreturn ret = IRQ_NONE;
1c767b33 4916
0e32b39c
DA
4917 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4918 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4919
7a7f84cc
VS
4920 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4921 /*
4922 * vdd off can generate a long pulse on eDP which
4923 * would require vdd on to handle it, and thus we
4924 * would end up in an endless cycle of
4925 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4926 */
4927 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4928 port_name(intel_dig_port->port));
a8b3d52f 4929 return IRQ_HANDLED;
7a7f84cc
VS
4930 }
4931
26fbb774
VS
4932 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4933 port_name(intel_dig_port->port),
0e32b39c 4934 long_hpd ? "long" : "short");
13cf5504 4935
1c767b33
ID
4936 power_domain = intel_display_port_power_domain(intel_encoder);
4937 intel_display_power_get(dev_priv, power_domain);
4938
0e32b39c 4939 if (long_hpd) {
5fa836a9
MK
4940 /* indicate that we need to restart link training */
4941 intel_dp->train_set_valid = false;
2a592bec
DA
4942
4943 if (HAS_PCH_SPLIT(dev)) {
4944 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4945 goto mst_fail;
4946 } else {
4947 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4948 goto mst_fail;
4949 }
0e32b39c
DA
4950
4951 if (!intel_dp_get_dpcd(intel_dp)) {
4952 goto mst_fail;
4953 }
4954
4955 intel_dp_probe_oui(intel_dp);
4956
4957 if (!intel_dp_probe_mst(intel_dp))
4958 goto mst_fail;
4959
4960 } else {
4961 if (intel_dp->is_mst) {
1c767b33 4962 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4963 goto mst_fail;
4964 }
4965
4966 if (!intel_dp->is_mst) {
4967 /*
4968 * we'll check the link status via the normal hot plug path later -
4969 * but for short hpds we should check it now
4970 */
5b215bcf 4971 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4972 intel_dp_check_link_status(intel_dp);
5b215bcf 4973 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4974 }
4975 }
b2c5c181
DV
4976
4977 ret = IRQ_HANDLED;
4978
1c767b33 4979 goto put_power;
0e32b39c
DA
4980mst_fail:
4981 /* if we were in MST mode, and device is not there get out of MST mode */
4982 if (intel_dp->is_mst) {
4983 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4984 intel_dp->is_mst = false;
4985 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4986 }
1c767b33
ID
4987put_power:
4988 intel_display_power_put(dev_priv, power_domain);
4989
4990 return ret;
13cf5504
DA
4991}
4992
e3421a18
ZW
4993/* Return which DP Port should be selected for Transcoder DP control */
4994int
0206e353 4995intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4996{
4997 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4998 struct intel_encoder *intel_encoder;
4999 struct intel_dp *intel_dp;
e3421a18 5000
fa90ecef
PZ
5001 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5002 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5003
fa90ecef
PZ
5004 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5005 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5006 return intel_dp->output_reg;
e3421a18 5007 }
ea5b213a 5008
e3421a18
ZW
5009 return -1;
5010}
5011
36e83a18 5012/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5013bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5014{
5015 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5016 union child_device_config *p_child;
36e83a18 5017 int i;
5d8a7752
VS
5018 static const short port_mapping[] = {
5019 [PORT_B] = PORT_IDPB,
5020 [PORT_C] = PORT_IDPC,
5021 [PORT_D] = PORT_IDPD,
5022 };
36e83a18 5023
3b32a35b
VS
5024 if (port == PORT_A)
5025 return true;
5026
41aa3448 5027 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5028 return false;
5029
41aa3448
RV
5030 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5031 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5032
5d8a7752 5033 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5034 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5035 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5036 return true;
5037 }
5038 return false;
5039}
5040
0e32b39c 5041void
f684960e
CW
5042intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5043{
53b41837
YN
5044 struct intel_connector *intel_connector = to_intel_connector(connector);
5045
3f43c48d 5046 intel_attach_force_audio_property(connector);
e953fd7b 5047 intel_attach_broadcast_rgb_property(connector);
55bc60db 5048 intel_dp->color_range_auto = true;
53b41837
YN
5049
5050 if (is_edp(intel_dp)) {
5051 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5052 drm_object_attach_property(
5053 &connector->base,
53b41837 5054 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5055 DRM_MODE_SCALE_ASPECT);
5056 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5057 }
f684960e
CW
5058}
5059
dada1a9f
ID
5060static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5061{
5062 intel_dp->last_power_cycle = jiffies;
5063 intel_dp->last_power_on = jiffies;
5064 intel_dp->last_backlight_off = jiffies;
5065}
5066
67a54566
DV
5067static void
5068intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5069 struct intel_dp *intel_dp)
67a54566
DV
5070{
5071 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5072 struct edp_power_seq cur, vbt, spec,
5073 *final = &intel_dp->pps_delays;
67a54566 5074 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 5075 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5076
e39b999a
VS
5077 lockdep_assert_held(&dev_priv->pps_mutex);
5078
81ddbc69
VS
5079 /* already initialized? */
5080 if (final->t11_t12 != 0)
5081 return;
5082
453c5420 5083 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5084 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5085 pp_on_reg = PCH_PP_ON_DELAYS;
5086 pp_off_reg = PCH_PP_OFF_DELAYS;
5087 pp_div_reg = PCH_PP_DIVISOR;
5088 } else {
bf13e81b
JN
5089 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5090
5091 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5092 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5093 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5094 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5095 }
67a54566
DV
5096
5097 /* Workaround: Need to write PP_CONTROL with the unlock key as
5098 * the very first thing. */
453c5420 5099 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 5100 I915_WRITE(pp_ctrl_reg, pp);
67a54566 5101
453c5420
JB
5102 pp_on = I915_READ(pp_on_reg);
5103 pp_off = I915_READ(pp_off_reg);
5104 pp_div = I915_READ(pp_div_reg);
67a54566
DV
5105
5106 /* Pull timing values out of registers */
5107 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5108 PANEL_POWER_UP_DELAY_SHIFT;
5109
5110 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5111 PANEL_LIGHT_ON_DELAY_SHIFT;
5112
5113 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5114 PANEL_LIGHT_OFF_DELAY_SHIFT;
5115
5116 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5117 PANEL_POWER_DOWN_DELAY_SHIFT;
5118
5119 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5120 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5121
5122 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5123 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5124
41aa3448 5125 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5126
5127 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5128 * our hw here, which are all in 100usec. */
5129 spec.t1_t3 = 210 * 10;
5130 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5131 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5132 spec.t10 = 500 * 10;
5133 /* This one is special and actually in units of 100ms, but zero
5134 * based in the hw (so we need to add 100 ms). But the sw vbt
5135 * table multiplies it with 1000 to make it in units of 100usec,
5136 * too. */
5137 spec.t11_t12 = (510 + 100) * 10;
5138
5139 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5140 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5141
5142 /* Use the max of the register settings and vbt. If both are
5143 * unset, fall back to the spec limits. */
36b5f425 5144#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5145 spec.field : \
5146 max(cur.field, vbt.field))
5147 assign_final(t1_t3);
5148 assign_final(t8);
5149 assign_final(t9);
5150 assign_final(t10);
5151 assign_final(t11_t12);
5152#undef assign_final
5153
36b5f425 5154#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5155 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5156 intel_dp->backlight_on_delay = get_delay(t8);
5157 intel_dp->backlight_off_delay = get_delay(t9);
5158 intel_dp->panel_power_down_delay = get_delay(t10);
5159 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5160#undef get_delay
5161
f30d26e4
JN
5162 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5163 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5164 intel_dp->panel_power_cycle_delay);
5165
5166 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5167 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5168}
5169
5170static void
5171intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5172 struct intel_dp *intel_dp)
f30d26e4
JN
5173{
5174 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5175 u32 pp_on, pp_off, pp_div, port_sel = 0;
5176 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5177 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 5178 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5179 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5180
e39b999a 5181 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
5182
5183 if (HAS_PCH_SPLIT(dev)) {
5184 pp_on_reg = PCH_PP_ON_DELAYS;
5185 pp_off_reg = PCH_PP_OFF_DELAYS;
5186 pp_div_reg = PCH_PP_DIVISOR;
5187 } else {
bf13e81b
JN
5188 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5189
5190 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5191 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5192 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5193 }
5194
b2f19d1a
PZ
5195 /*
5196 * And finally store the new values in the power sequencer. The
5197 * backlight delays are set to 1 because we do manual waits on them. For
5198 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5199 * we'll end up waiting for the backlight off delay twice: once when we
5200 * do the manual sleep, and once when we disable the panel and wait for
5201 * the PP_STATUS bit to become zero.
5202 */
f30d26e4 5203 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5204 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5205 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5206 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5207 /* Compute the divisor for the pp clock, simply match the Bspec
5208 * formula. */
453c5420 5209 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 5210 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
5211 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5212
5213 /* Haswell doesn't have any port selection bits for the panel
5214 * power sequencer any more. */
bc7d38a4 5215 if (IS_VALLEYVIEW(dev)) {
ad933b56 5216 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5217 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5218 if (port == PORT_A)
a24c144c 5219 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5220 else
a24c144c 5221 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5222 }
5223
453c5420
JB
5224 pp_on |= port_sel;
5225
5226 I915_WRITE(pp_on_reg, pp_on);
5227 I915_WRITE(pp_off_reg, pp_off);
5228 I915_WRITE(pp_div_reg, pp_div);
67a54566 5229
67a54566 5230 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5231 I915_READ(pp_on_reg),
5232 I915_READ(pp_off_reg),
5233 I915_READ(pp_div_reg));
f684960e
CW
5234}
5235
b33a2815
VK
5236/**
5237 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5238 * @dev: DRM device
5239 * @refresh_rate: RR to be programmed
5240 *
5241 * This function gets called when refresh rate (RR) has to be changed from
5242 * one frequency to another. Switches can be between high and low RR
5243 * supported by the panel or to any other RR based on media playback (in
5244 * this case, RR value needs to be passed from user space).
5245 *
5246 * The caller of this function needs to take a lock on dev_priv->drrs.
5247 */
96178eeb 5248static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5249{
5250 struct drm_i915_private *dev_priv = dev->dev_private;
5251 struct intel_encoder *encoder;
96178eeb
VK
5252 struct intel_digital_port *dig_port = NULL;
5253 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5254 struct intel_crtc_state *config = NULL;
439d7ac0 5255 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5256 u32 reg, val;
96178eeb 5257 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5258
5259 if (refresh_rate <= 0) {
5260 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5261 return;
5262 }
5263
96178eeb
VK
5264 if (intel_dp == NULL) {
5265 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5266 return;
5267 }
5268
1fcc9d1c 5269 /*
e4d59f6b
RV
5270 * FIXME: This needs proper synchronization with psr state for some
5271 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5272 */
439d7ac0 5273
96178eeb
VK
5274 dig_port = dp_to_dig_port(intel_dp);
5275 encoder = &dig_port->base;
723f9aab 5276 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5277
5278 if (!intel_crtc) {
5279 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5280 return;
5281 }
5282
6e3c9717 5283 config = intel_crtc->config;
439d7ac0 5284
96178eeb 5285 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5286 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5287 return;
5288 }
5289
96178eeb
VK
5290 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5291 refresh_rate)
439d7ac0
PB
5292 index = DRRS_LOW_RR;
5293
96178eeb 5294 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5295 DRM_DEBUG_KMS(
5296 "DRRS requested for previously set RR...ignoring\n");
5297 return;
5298 }
5299
5300 if (!intel_crtc->active) {
5301 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5302 return;
5303 }
5304
44395bfe 5305 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5306 switch (index) {
5307 case DRRS_HIGH_RR:
5308 intel_dp_set_m_n(intel_crtc, M1_N1);
5309 break;
5310 case DRRS_LOW_RR:
5311 intel_dp_set_m_n(intel_crtc, M2_N2);
5312 break;
5313 case DRRS_MAX_RR:
5314 default:
5315 DRM_ERROR("Unsupported refreshrate type\n");
5316 }
5317 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5318 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5319 val = I915_READ(reg);
a4c30b1d 5320
439d7ac0 5321 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5322 if (IS_VALLEYVIEW(dev))
5323 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5324 else
5325 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5326 } else {
6fa7aec1
VK
5327 if (IS_VALLEYVIEW(dev))
5328 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5329 else
5330 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5331 }
5332 I915_WRITE(reg, val);
5333 }
5334
4e9ac947
VK
5335 dev_priv->drrs.refresh_rate_type = index;
5336
5337 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5338}
5339
b33a2815
VK
5340/**
5341 * intel_edp_drrs_enable - init drrs struct if supported
5342 * @intel_dp: DP struct
5343 *
5344 * Initializes frontbuffer_bits and drrs.dp
5345 */
c395578e
VK
5346void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5347{
5348 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5349 struct drm_i915_private *dev_priv = dev->dev_private;
5350 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5351 struct drm_crtc *crtc = dig_port->base.base.crtc;
5352 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5353
5354 if (!intel_crtc->config->has_drrs) {
5355 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5356 return;
5357 }
5358
5359 mutex_lock(&dev_priv->drrs.mutex);
5360 if (WARN_ON(dev_priv->drrs.dp)) {
5361 DRM_ERROR("DRRS already enabled\n");
5362 goto unlock;
5363 }
5364
5365 dev_priv->drrs.busy_frontbuffer_bits = 0;
5366
5367 dev_priv->drrs.dp = intel_dp;
5368
5369unlock:
5370 mutex_unlock(&dev_priv->drrs.mutex);
5371}
5372
b33a2815
VK
5373/**
5374 * intel_edp_drrs_disable - Disable DRRS
5375 * @intel_dp: DP struct
5376 *
5377 */
c395578e
VK
5378void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5379{
5380 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5381 struct drm_i915_private *dev_priv = dev->dev_private;
5382 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5383 struct drm_crtc *crtc = dig_port->base.base.crtc;
5384 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5385
5386 if (!intel_crtc->config->has_drrs)
5387 return;
5388
5389 mutex_lock(&dev_priv->drrs.mutex);
5390 if (!dev_priv->drrs.dp) {
5391 mutex_unlock(&dev_priv->drrs.mutex);
5392 return;
5393 }
5394
5395 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5396 intel_dp_set_drrs_state(dev_priv->dev,
5397 intel_dp->attached_connector->panel.
5398 fixed_mode->vrefresh);
5399
5400 dev_priv->drrs.dp = NULL;
5401 mutex_unlock(&dev_priv->drrs.mutex);
5402
5403 cancel_delayed_work_sync(&dev_priv->drrs.work);
5404}
5405
4e9ac947
VK
5406static void intel_edp_drrs_downclock_work(struct work_struct *work)
5407{
5408 struct drm_i915_private *dev_priv =
5409 container_of(work, typeof(*dev_priv), drrs.work.work);
5410 struct intel_dp *intel_dp;
5411
5412 mutex_lock(&dev_priv->drrs.mutex);
5413
5414 intel_dp = dev_priv->drrs.dp;
5415
5416 if (!intel_dp)
5417 goto unlock;
5418
439d7ac0 5419 /*
4e9ac947
VK
5420 * The delayed work can race with an invalidate hence we need to
5421 * recheck.
439d7ac0
PB
5422 */
5423
4e9ac947
VK
5424 if (dev_priv->drrs.busy_frontbuffer_bits)
5425 goto unlock;
439d7ac0 5426
4e9ac947
VK
5427 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5428 intel_dp_set_drrs_state(dev_priv->dev,
5429 intel_dp->attached_connector->panel.
5430 downclock_mode->vrefresh);
439d7ac0 5431
4e9ac947 5432unlock:
4e9ac947 5433 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5434}
5435
b33a2815
VK
5436/**
5437 * intel_edp_drrs_invalidate - Invalidate DRRS
5438 * @dev: DRM device
5439 * @frontbuffer_bits: frontbuffer plane tracking bits
5440 *
5441 * When there is a disturbance on screen (due to cursor movement/time
5442 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5443 * high RR.
5444 *
5445 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5446 */
a93fad0f
VK
5447void intel_edp_drrs_invalidate(struct drm_device *dev,
5448 unsigned frontbuffer_bits)
5449{
5450 struct drm_i915_private *dev_priv = dev->dev_private;
5451 struct drm_crtc *crtc;
5452 enum pipe pipe;
5453
9da7d693 5454 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5455 return;
5456
88f933a8 5457 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5458
a93fad0f 5459 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5460 if (!dev_priv->drrs.dp) {
5461 mutex_unlock(&dev_priv->drrs.mutex);
5462 return;
5463 }
5464
a93fad0f
VK
5465 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5466 pipe = to_intel_crtc(crtc)->pipe;
5467
5468 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5469 intel_dp_set_drrs_state(dev_priv->dev,
5470 dev_priv->drrs.dp->attached_connector->panel.
5471 fixed_mode->vrefresh);
5472 }
5473
5474 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5475
5476 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5477 mutex_unlock(&dev_priv->drrs.mutex);
5478}
5479
b33a2815
VK
5480/**
5481 * intel_edp_drrs_flush - Flush DRRS
5482 * @dev: DRM device
5483 * @frontbuffer_bits: frontbuffer plane tracking bits
5484 *
5485 * When there is no movement on screen, DRRS work can be scheduled.
5486 * This DRRS work is responsible for setting relevant registers after a
5487 * timeout of 1 second.
5488 *
5489 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5490 */
a93fad0f
VK
5491void intel_edp_drrs_flush(struct drm_device *dev,
5492 unsigned frontbuffer_bits)
5493{
5494 struct drm_i915_private *dev_priv = dev->dev_private;
5495 struct drm_crtc *crtc;
5496 enum pipe pipe;
5497
9da7d693 5498 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5499 return;
5500
88f933a8 5501 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5502
a93fad0f 5503 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5504 if (!dev_priv->drrs.dp) {
5505 mutex_unlock(&dev_priv->drrs.mutex);
5506 return;
5507 }
5508
a93fad0f
VK
5509 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5510 pipe = to_intel_crtc(crtc)->pipe;
5511 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5512
a93fad0f
VK
5513 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5514 !dev_priv->drrs.busy_frontbuffer_bits)
5515 schedule_delayed_work(&dev_priv->drrs.work,
5516 msecs_to_jiffies(1000));
5517 mutex_unlock(&dev_priv->drrs.mutex);
5518}
5519
b33a2815
VK
5520/**
5521 * DOC: Display Refresh Rate Switching (DRRS)
5522 *
5523 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5524 * which enables swtching between low and high refresh rates,
5525 * dynamically, based on the usage scenario. This feature is applicable
5526 * for internal panels.
5527 *
5528 * Indication that the panel supports DRRS is given by the panel EDID, which
5529 * would list multiple refresh rates for one resolution.
5530 *
5531 * DRRS is of 2 types - static and seamless.
5532 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5533 * (may appear as a blink on screen) and is used in dock-undock scenario.
5534 * Seamless DRRS involves changing RR without any visual effect to the user
5535 * and can be used during normal system usage. This is done by programming
5536 * certain registers.
5537 *
5538 * Support for static/seamless DRRS may be indicated in the VBT based on
5539 * inputs from the panel spec.
5540 *
5541 * DRRS saves power by switching to low RR based on usage scenarios.
5542 *
5543 * eDP DRRS:-
5544 * The implementation is based on frontbuffer tracking implementation.
5545 * When there is a disturbance on the screen triggered by user activity or a
5546 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5547 * When there is no movement on screen, after a timeout of 1 second, a switch
5548 * to low RR is made.
5549 * For integration with frontbuffer tracking code,
5550 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5551 *
5552 * DRRS can be further extended to support other internal panels and also
5553 * the scenario of video playback wherein RR is set based on the rate
5554 * requested by userspace.
5555 */
5556
5557/**
5558 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5559 * @intel_connector: eDP connector
5560 * @fixed_mode: preferred mode of panel
5561 *
5562 * This function is called only once at driver load to initialize basic
5563 * DRRS stuff.
5564 *
5565 * Returns:
5566 * Downclock mode if panel supports it, else return NULL.
5567 * DRRS support is determined by the presence of downclock mode (apart
5568 * from VBT setting).
5569 */
4f9db5b5 5570static struct drm_display_mode *
96178eeb
VK
5571intel_dp_drrs_init(struct intel_connector *intel_connector,
5572 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5573{
5574 struct drm_connector *connector = &intel_connector->base;
96178eeb 5575 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5576 struct drm_i915_private *dev_priv = dev->dev_private;
5577 struct drm_display_mode *downclock_mode = NULL;
5578
9da7d693
DV
5579 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5580 mutex_init(&dev_priv->drrs.mutex);
5581
4f9db5b5
PB
5582 if (INTEL_INFO(dev)->gen <= 6) {
5583 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5584 return NULL;
5585 }
5586
5587 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5588 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5589 return NULL;
5590 }
5591
5592 downclock_mode = intel_find_panel_downclock
5593 (dev, fixed_mode, connector);
5594
5595 if (!downclock_mode) {
a1d26342 5596 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5597 return NULL;
5598 }
5599
96178eeb 5600 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5601
96178eeb 5602 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5603 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5604 return downclock_mode;
5605}
5606
ed92f0b2 5607static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5608 struct intel_connector *intel_connector)
ed92f0b2
PZ
5609{
5610 struct drm_connector *connector = &intel_connector->base;
5611 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5612 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5613 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5614 struct drm_i915_private *dev_priv = dev->dev_private;
5615 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5616 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5617 bool has_dpcd;
5618 struct drm_display_mode *scan;
5619 struct edid *edid;
6517d273 5620 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5621
5622 if (!is_edp(intel_dp))
5623 return true;
5624
49e6bc51
VS
5625 pps_lock(intel_dp);
5626 intel_edp_panel_vdd_sanitize(intel_dp);
5627 pps_unlock(intel_dp);
63635217 5628
ed92f0b2 5629 /* Cache DPCD and EDID for edp. */
ed92f0b2 5630 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5631
5632 if (has_dpcd) {
5633 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5634 dev_priv->no_aux_handshake =
5635 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5636 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5637 } else {
5638 /* if this fails, presume the device is a ghost */
5639 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5640 return false;
5641 }
5642
5643 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5644 pps_lock(intel_dp);
36b5f425 5645 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5646 pps_unlock(intel_dp);
ed92f0b2 5647
060c8778 5648 mutex_lock(&dev->mode_config.mutex);
0b99836f 5649 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5650 if (edid) {
5651 if (drm_add_edid_modes(connector, edid)) {
5652 drm_mode_connector_update_edid_property(connector,
5653 edid);
5654 drm_edid_to_eld(connector, edid);
5655 } else {
5656 kfree(edid);
5657 edid = ERR_PTR(-EINVAL);
5658 }
5659 } else {
5660 edid = ERR_PTR(-ENOENT);
5661 }
5662 intel_connector->edid = edid;
5663
5664 /* prefer fixed mode from EDID if available */
5665 list_for_each_entry(scan, &connector->probed_modes, head) {
5666 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5667 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5668 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5669 intel_connector, fixed_mode);
ed92f0b2
PZ
5670 break;
5671 }
5672 }
5673
5674 /* fallback to VBT if available for eDP */
5675 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5676 fixed_mode = drm_mode_duplicate(dev,
5677 dev_priv->vbt.lfp_lvds_vbt_mode);
5678 if (fixed_mode)
5679 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5680 }
060c8778 5681 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5682
01527b31
CT
5683 if (IS_VALLEYVIEW(dev)) {
5684 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5685 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5686
5687 /*
5688 * Figure out the current pipe for the initial backlight setup.
5689 * If the current pipe isn't valid, try the PPS pipe, and if that
5690 * fails just assume pipe A.
5691 */
5692 if (IS_CHERRYVIEW(dev))
5693 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5694 else
5695 pipe = PORT_TO_PIPE(intel_dp->DP);
5696
5697 if (pipe != PIPE_A && pipe != PIPE_B)
5698 pipe = intel_dp->pps_pipe;
5699
5700 if (pipe != PIPE_A && pipe != PIPE_B)
5701 pipe = PIPE_A;
5702
5703 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5704 pipe_name(pipe));
01527b31
CT
5705 }
5706
4f9db5b5 5707 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5708 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5709 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5710
5711 return true;
5712}
5713
16c25533 5714bool
f0fec3f2
PZ
5715intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5716 struct intel_connector *intel_connector)
a4fc5ed6 5717{
f0fec3f2
PZ
5718 struct drm_connector *connector = &intel_connector->base;
5719 struct intel_dp *intel_dp = &intel_dig_port->dp;
5720 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5721 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5722 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5723 enum port port = intel_dig_port->port;
0b99836f 5724 int type;
a4fc5ed6 5725
a4a5d2f8
VS
5726 intel_dp->pps_pipe = INVALID_PIPE;
5727
ec5b01dd 5728 /* intel_dp vfuncs */
b6b5e383
DL
5729 if (INTEL_INFO(dev)->gen >= 9)
5730 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5731 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5732 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5733 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5734 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5735 else if (HAS_PCH_SPLIT(dev))
5736 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5737 else
5738 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5739
b9ca5fad
DL
5740 if (INTEL_INFO(dev)->gen >= 9)
5741 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5742 else
5743 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5744
0767935e
DV
5745 /* Preserve the current hw state. */
5746 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5747 intel_dp->attached_connector = intel_connector;
3d3dc149 5748
3b32a35b 5749 if (intel_dp_is_edp(dev, port))
b329530c 5750 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5751 else
5752 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5753
f7d24902
ID
5754 /*
5755 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5756 * for DP the encoder type can be set by the caller to
5757 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5758 */
5759 if (type == DRM_MODE_CONNECTOR_eDP)
5760 intel_encoder->type = INTEL_OUTPUT_EDP;
5761
c17ed5b5
VS
5762 /* eDP only on port B and/or C on vlv/chv */
5763 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5764 port != PORT_B && port != PORT_C))
5765 return false;
5766
e7281eab
ID
5767 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5768 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5769 port_name(port));
5770
b329530c 5771 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5772 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5773
a4fc5ed6
KP
5774 connector->interlace_allowed = true;
5775 connector->doublescan_allowed = 0;
5776
f0fec3f2 5777 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5778 edp_panel_vdd_work);
a4fc5ed6 5779
df0e9248 5780 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5781 drm_connector_register(connector);
a4fc5ed6 5782
affa9354 5783 if (HAS_DDI(dev))
bcbc889b
PZ
5784 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5785 else
5786 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5787 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5788
0b99836f 5789 /* Set up the hotplug pin. */
ab9d7c30
PZ
5790 switch (port) {
5791 case PORT_A:
1d843f9d 5792 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5793 break;
5794 case PORT_B:
1d843f9d 5795 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5796 break;
5797 case PORT_C:
1d843f9d 5798 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5799 break;
5800 case PORT_D:
1d843f9d 5801 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5802 break;
5803 default:
ad1c0b19 5804 BUG();
5eb08b69
ZW
5805 }
5806
dada1a9f 5807 if (is_edp(intel_dp)) {
773538e8 5808 pps_lock(intel_dp);
1e74a324
VS
5809 intel_dp_init_panel_power_timestamps(intel_dp);
5810 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5811 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5812 else
36b5f425 5813 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5814 pps_unlock(intel_dp);
dada1a9f 5815 }
0095e6dc 5816
9d1a1031 5817 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5818
0e32b39c 5819 /* init MST on ports that can support it */
c86ea3d0 5820 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5821 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5822 intel_dp_mst_encoder_init(intel_dig_port,
5823 intel_connector->base.base.id);
0e32b39c
DA
5824 }
5825 }
5826
36b5f425 5827 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5828 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5829 if (is_edp(intel_dp)) {
5830 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5831 /*
5832 * vdd might still be enabled do to the delayed vdd off.
5833 * Make sure vdd is actually turned off here.
5834 */
773538e8 5835 pps_lock(intel_dp);
4be73780 5836 edp_panel_vdd_off_sync(intel_dp);
773538e8 5837 pps_unlock(intel_dp);
15b1d171 5838 }
34ea3d38 5839 drm_connector_unregister(connector);
b2f246a8 5840 drm_connector_cleanup(connector);
16c25533 5841 return false;
b2f246a8 5842 }
32f9d658 5843
f684960e
CW
5844 intel_dp_add_properties(intel_dp, connector);
5845
a4fc5ed6
KP
5846 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5847 * 0xd. Failure to do so will result in spurious interrupts being
5848 * generated on the port when a cable is not attached.
5849 */
5850 if (IS_G4X(dev) && !IS_GM45(dev)) {
5851 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5852 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5853 }
16c25533 5854
aa7471d2
JN
5855 i915_debugfs_connector_add(connector);
5856
16c25533 5857 return true;
a4fc5ed6 5858}
f0fec3f2
PZ
5859
5860void
5861intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5862{
13cf5504 5863 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5864 struct intel_digital_port *intel_dig_port;
5865 struct intel_encoder *intel_encoder;
5866 struct drm_encoder *encoder;
5867 struct intel_connector *intel_connector;
5868
b14c5679 5869 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5870 if (!intel_dig_port)
5871 return;
5872
08d9bc92 5873 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5874 if (!intel_connector) {
5875 kfree(intel_dig_port);
5876 return;
5877 }
5878
5879 intel_encoder = &intel_dig_port->base;
5880 encoder = &intel_encoder->base;
5881
5882 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5883 DRM_MODE_ENCODER_TMDS);
5884
5bfe2ac0 5885 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5886 intel_encoder->disable = intel_disable_dp;
00c09d70 5887 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5888 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5889 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5890 if (IS_CHERRYVIEW(dev)) {
9197c88b 5891 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5892 intel_encoder->pre_enable = chv_pre_enable_dp;
5893 intel_encoder->enable = vlv_enable_dp;
580d3811 5894 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5895 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5896 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5897 intel_encoder->pre_enable = vlv_pre_enable_dp;
5898 intel_encoder->enable = vlv_enable_dp;
49277c31 5899 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5900 } else {
ecff4f3b
JN
5901 intel_encoder->pre_enable = g4x_pre_enable_dp;
5902 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5903 if (INTEL_INFO(dev)->gen >= 5)
5904 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5905 }
f0fec3f2 5906
174edf1f 5907 intel_dig_port->port = port;
f0fec3f2
PZ
5908 intel_dig_port->dp.output_reg = output_reg;
5909
00c09d70 5910 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5911 if (IS_CHERRYVIEW(dev)) {
5912 if (port == PORT_D)
5913 intel_encoder->crtc_mask = 1 << 2;
5914 else
5915 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5916 } else {
5917 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5918 }
bc079e8b 5919 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5920 intel_encoder->hot_plug = intel_dp_hot_plug;
5921
13cf5504
DA
5922 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5923 dev_priv->hpd_irq_port[port] = intel_dig_port;
5924
15b1d171
PZ
5925 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5926 drm_encoder_cleanup(encoder);
5927 kfree(intel_dig_port);
b2f246a8 5928 kfree(intel_connector);
15b1d171 5929 }
f0fec3f2 5930}
0e32b39c
DA
5931
5932void intel_dp_mst_suspend(struct drm_device *dev)
5933{
5934 struct drm_i915_private *dev_priv = dev->dev_private;
5935 int i;
5936
5937 /* disable MST */
5938 for (i = 0; i < I915_MAX_PORTS; i++) {
5939 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5940 if (!intel_dig_port)
5941 continue;
5942
5943 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5944 if (!intel_dig_port->dp.can_mst)
5945 continue;
5946 if (intel_dig_port->dp.is_mst)
5947 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5948 }
5949 }
5950}
5951
5952void intel_dp_mst_resume(struct drm_device *dev)
5953{
5954 struct drm_i915_private *dev_priv = dev->dev_private;
5955 int i;
5956
5957 for (i = 0; i < I915_MAX_PORTS; i++) {
5958 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5959 if (!intel_dig_port)
5960 continue;
5961 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5962 int ret;
5963
5964 if (!intel_dig_port->dp.can_mst)
5965 continue;
5966
5967 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5968 if (ret != 0) {
5969 intel_dp_check_mst_status(&intel_dig_port->dp);
5970 }
5971 }
5972 }
5973}