drm/i915: Use DP_LINK_RATE_SET whenever possible
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 91
cfcb0fc9
JB
92/**
93 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94 * @intel_dp: DP struct
95 *
96 * If a CPU or PCH DP output is attached to an eDP panel, this function
97 * will return true, and false otherwise.
98 */
99static bool is_edp(struct intel_dp *intel_dp)
100{
da63a9f2
PZ
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
104}
105
68b4d824 106static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 107{
68b4d824
ID
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
111}
112
df0e9248
CW
113static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114{
fa90ecef 115 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
116}
117
ea5b213a 118static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 119static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 120static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 121static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
122static void vlv_steal_power_sequencer(struct drm_device *dev,
123 enum pipe pipe);
a4fc5ed6 124
0e32b39c 125int
ea5b213a 126intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 127{
7183dc29 128 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
129
130 switch (max_link_bw) {
131 case DP_LINK_BW_1_62:
132 case DP_LINK_BW_2_7:
1db10e28 133 case DP_LINK_BW_5_4:
d4eead50 134 break;
a4fc5ed6 135 default:
d4eead50
ID
136 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137 max_link_bw);
a4fc5ed6
KP
138 max_link_bw = DP_LINK_BW_1_62;
139 break;
140 }
141 return max_link_bw;
142}
143
eeb6324d
PZ
144static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145{
146 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147 struct drm_device *dev = intel_dig_port->base.base.dev;
148 u8 source_max, sink_max;
149
150 source_max = 4;
151 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153 source_max = 2;
154
155 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157 return min(source_max, sink_max);
158}
159
cd9dde44
AJ
160/*
161 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec.
163 *
164 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165 *
166 * 270000 * 1 * 8 / 10 == 216000
167 *
168 * The actual data capacity of that configuration is 2.16Gbit/s, so the
169 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
170 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171 * 119000. At 18bpp that's 2142000 kilobits per second.
172 *
173 * Thus the strange-looking division by 10 in intel_dp_link_required, to
174 * get the result in decakilobits instead of kilobits.
175 */
176
a4fc5ed6 177static int
c898261c 178intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 179{
cd9dde44 180 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
181}
182
fe27d53e
DA
183static int
184intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185{
186 return (max_link_clock * max_lanes * 8) / 10;
187}
188
c19de8eb 189static enum drm_mode_status
a4fc5ed6
KP
190intel_dp_mode_valid(struct drm_connector *connector,
191 struct drm_display_mode *mode)
192{
df0e9248 193 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
194 struct intel_connector *intel_connector = to_intel_connector(connector);
195 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
196 int target_clock = mode->clock;
197 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 198
dd06f90e
JN
199 if (is_edp(intel_dp) && fixed_mode) {
200 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
201 return MODE_PANEL;
202
dd06f90e 203 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 204 return MODE_PANEL;
03afc4a2
DV
205
206 target_clock = fixed_mode->clock;
7de56f43
ZY
207 }
208
50fec21a 209 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 210 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
211
212 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213 mode_rate = intel_dp_link_required(target_clock, 18);
214
215 if (mode_rate > max_rate)
c4867936 216 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
217
218 if (mode->clock < 10000)
219 return MODE_CLOCK_LOW;
220
0af78a2b
DV
221 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222 return MODE_H_ILLEGAL;
223
a4fc5ed6
KP
224 return MODE_OK;
225}
226
a4f1289e 227uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
228{
229 int i;
230 uint32_t v = 0;
231
232 if (src_bytes > 4)
233 src_bytes = 4;
234 for (i = 0; i < src_bytes; i++)
235 v |= ((uint32_t) src[i]) << ((3-i) * 8);
236 return v;
237}
238
c2af70e2 239static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
240{
241 int i;
242 if (dst_bytes > 4)
243 dst_bytes = 4;
244 for (i = 0; i < dst_bytes; i++)
245 dst[i] = src >> ((3-i) * 8);
246}
247
fb0f8fbf
KP
248/* hrawclock is 1/4 the FSB frequency */
249static int
250intel_hrawclk(struct drm_device *dev)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 uint32_t clkcfg;
254
9473c8f4
VP
255 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256 if (IS_VALLEYVIEW(dev))
257 return 200;
258
fb0f8fbf
KP
259 clkcfg = I915_READ(CLKCFG);
260 switch (clkcfg & CLKCFG_FSB_MASK) {
261 case CLKCFG_FSB_400:
262 return 100;
263 case CLKCFG_FSB_533:
264 return 133;
265 case CLKCFG_FSB_667:
266 return 166;
267 case CLKCFG_FSB_800:
268 return 200;
269 case CLKCFG_FSB_1067:
270 return 266;
271 case CLKCFG_FSB_1333:
272 return 333;
273 /* these two are just a guess; one of them might be right */
274 case CLKCFG_FSB_1600:
275 case CLKCFG_FSB_1600_ALT:
276 return 400;
277 default:
278 return 133;
279 }
280}
281
bf13e81b
JN
282static void
283intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 284 struct intel_dp *intel_dp);
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b 288
773538e8
VS
289static void pps_lock(struct intel_dp *intel_dp)
290{
291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292 struct intel_encoder *encoder = &intel_dig_port->base;
293 struct drm_device *dev = encoder->base.dev;
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 enum intel_display_power_domain power_domain;
296
297 /*
298 * See vlv_power_sequencer_reset() why we need
299 * a power domain reference here.
300 */
301 power_domain = intel_display_port_power_domain(encoder);
302 intel_display_power_get(dev_priv, power_domain);
303
304 mutex_lock(&dev_priv->pps_mutex);
305}
306
307static void pps_unlock(struct intel_dp *intel_dp)
308{
309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
310 struct intel_encoder *encoder = &intel_dig_port->base;
311 struct drm_device *dev = encoder->base.dev;
312 struct drm_i915_private *dev_priv = dev->dev_private;
313 enum intel_display_power_domain power_domain;
314
315 mutex_unlock(&dev_priv->pps_mutex);
316
317 power_domain = intel_display_port_power_domain(encoder);
318 intel_display_power_put(dev_priv, power_domain);
319}
320
961a0db0
VS
321static void
322vlv_power_sequencer_kick(struct intel_dp *intel_dp)
323{
324 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
325 struct drm_device *dev = intel_dig_port->base.base.dev;
326 struct drm_i915_private *dev_priv = dev->dev_private;
327 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 328 bool pll_enabled;
961a0db0
VS
329 uint32_t DP;
330
331 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
332 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
333 pipe_name(pipe), port_name(intel_dig_port->port)))
334 return;
335
336 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
337 pipe_name(pipe), port_name(intel_dig_port->port));
338
339 /* Preserve the BIOS-computed detected bit. This is
340 * supposed to be read-only.
341 */
342 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
343 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
344 DP |= DP_PORT_WIDTH(1);
345 DP |= DP_LINK_TRAIN_PAT_1;
346
347 if (IS_CHERRYVIEW(dev))
348 DP |= DP_PIPE_SELECT_CHV(pipe);
349 else if (pipe == PIPE_B)
350 DP |= DP_PIPEB_SELECT;
351
d288f65f
VS
352 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
353
354 /*
355 * The DPLL for the pipe must be enabled for this to work.
356 * So enable temporarily it if it's not already enabled.
357 */
358 if (!pll_enabled)
359 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
360 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
361
961a0db0
VS
362 /*
363 * Similar magic as in intel_dp_enable_port().
364 * We _must_ do this port enable + disable trick
365 * to make this power seqeuencer lock onto the port.
366 * Otherwise even VDD force bit won't work.
367 */
368 I915_WRITE(intel_dp->output_reg, DP);
369 POSTING_READ(intel_dp->output_reg);
370
371 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
376
377 if (!pll_enabled)
378 vlv_force_pll_off(dev, pipe);
961a0db0
VS
379}
380
bf13e81b
JN
381static enum pipe
382vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
383{
384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
385 struct drm_device *dev = intel_dig_port->base.base.dev;
386 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
387 struct intel_encoder *encoder;
388 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 389 enum pipe pipe;
bf13e81b 390
e39b999a 391 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 392
a8c3344e
VS
393 /* We should never land here with regular DP ports */
394 WARN_ON(!is_edp(intel_dp));
395
a4a5d2f8
VS
396 if (intel_dp->pps_pipe != INVALID_PIPE)
397 return intel_dp->pps_pipe;
398
399 /*
400 * We don't have power sequencer currently.
401 * Pick one that's not used by other ports.
402 */
403 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
404 base.head) {
405 struct intel_dp *tmp;
406
407 if (encoder->type != INTEL_OUTPUT_EDP)
408 continue;
409
410 tmp = enc_to_intel_dp(&encoder->base);
411
412 if (tmp->pps_pipe != INVALID_PIPE)
413 pipes &= ~(1 << tmp->pps_pipe);
414 }
415
416 /*
417 * Didn't find one. This should not happen since there
418 * are two power sequencers and up to two eDP ports.
419 */
420 if (WARN_ON(pipes == 0))
a8c3344e
VS
421 pipe = PIPE_A;
422 else
423 pipe = ffs(pipes) - 1;
a4a5d2f8 424
a8c3344e
VS
425 vlv_steal_power_sequencer(dev, pipe);
426 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
427
428 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
429 pipe_name(intel_dp->pps_pipe),
430 port_name(intel_dig_port->port));
431
432 /* init power sequencer on this pipe and port */
36b5f425
VS
433 intel_dp_init_panel_power_sequencer(dev, intel_dp);
434 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 435
961a0db0
VS
436 /*
437 * Even vdd force doesn't work until we've made
438 * the power sequencer lock in on the port.
439 */
440 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
441
442 return intel_dp->pps_pipe;
443}
444
6491ab27
VS
445typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
446 enum pipe pipe);
447
448static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
452}
453
454static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
455 enum pipe pipe)
456{
457 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
458}
459
460static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
461 enum pipe pipe)
462{
463 return true;
464}
bf13e81b 465
a4a5d2f8 466static enum pipe
6491ab27
VS
467vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
468 enum port port,
469 vlv_pipe_check pipe_check)
a4a5d2f8
VS
470{
471 enum pipe pipe;
bf13e81b 472
bf13e81b
JN
473 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
474 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
475 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
476
477 if (port_sel != PANEL_PORT_SELECT_VLV(port))
478 continue;
479
6491ab27
VS
480 if (!pipe_check(dev_priv, pipe))
481 continue;
482
a4a5d2f8 483 return pipe;
bf13e81b
JN
484 }
485
a4a5d2f8
VS
486 return INVALID_PIPE;
487}
488
489static void
490vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
491{
492 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
493 struct drm_device *dev = intel_dig_port->base.base.dev;
494 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
495 enum port port = intel_dig_port->port;
496
497 lockdep_assert_held(&dev_priv->pps_mutex);
498
499 /* try to find a pipe with this port selected */
6491ab27
VS
500 /* first pick one where the panel is on */
501 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502 vlv_pipe_has_pp_on);
503 /* didn't find one? pick one where vdd is on */
504 if (intel_dp->pps_pipe == INVALID_PIPE)
505 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 vlv_pipe_has_vdd_on);
507 /* didn't find one? pick one with just the correct port */
508 if (intel_dp->pps_pipe == INVALID_PIPE)
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_any);
a4a5d2f8
VS
511
512 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
513 if (intel_dp->pps_pipe == INVALID_PIPE) {
514 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
515 port_name(port));
516 return;
bf13e81b
JN
517 }
518
a4a5d2f8
VS
519 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
520 port_name(port), pipe_name(intel_dp->pps_pipe));
521
36b5f425
VS
522 intel_dp_init_panel_power_sequencer(dev, intel_dp);
523 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
524}
525
773538e8
VS
526void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
527{
528 struct drm_device *dev = dev_priv->dev;
529 struct intel_encoder *encoder;
530
531 if (WARN_ON(!IS_VALLEYVIEW(dev)))
532 return;
533
534 /*
535 * We can't grab pps_mutex here due to deadlock with power_domain
536 * mutex when power_domain functions are called while holding pps_mutex.
537 * That also means that in order to use pps_pipe the code needs to
538 * hold both a power domain reference and pps_mutex, and the power domain
539 * reference get/put must be done while _not_ holding pps_mutex.
540 * pps_{lock,unlock}() do these steps in the correct order, so one
541 * should use them always.
542 */
543
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
545 struct intel_dp *intel_dp;
546
547 if (encoder->type != INTEL_OUTPUT_EDP)
548 continue;
549
550 intel_dp = enc_to_intel_dp(&encoder->base);
551 intel_dp->pps_pipe = INVALID_PIPE;
552 }
bf13e81b
JN
553}
554
555static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
556{
557 struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
559 if (HAS_PCH_SPLIT(dev))
560 return PCH_PP_CONTROL;
561 else
562 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563}
564
565static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566{
567 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568
569 if (HAS_PCH_SPLIT(dev))
570 return PCH_PP_STATUS;
571 else
572 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
573}
574
01527b31
CT
575/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
576 This function only applicable when panel PM state is not to be tracked */
577static int edp_notify_handler(struct notifier_block *this, unsigned long code,
578 void *unused)
579{
580 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
581 edp_notifier);
582 struct drm_device *dev = intel_dp_to_dev(intel_dp);
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 u32 pp_div;
585 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
586
587 if (!is_edp(intel_dp) || code != SYS_RESTART)
588 return 0;
589
773538e8 590 pps_lock(intel_dp);
e39b999a 591
01527b31 592 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
593 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594
01527b31
CT
595 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
596 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
597 pp_div = I915_READ(pp_div_reg);
598 pp_div &= PP_REFERENCE_DIVIDER_MASK;
599
600 /* 0x1F write to PP_DIV_REG sets max cycle delay */
601 I915_WRITE(pp_div_reg, pp_div | 0x1F);
602 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
603 msleep(intel_dp->panel_power_cycle_delay);
604 }
605
773538e8 606 pps_unlock(intel_dp);
e39b999a 607
01527b31
CT
608 return 0;
609}
610
4be73780 611static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 612{
30add22d 613 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
614 struct drm_i915_private *dev_priv = dev->dev_private;
615
e39b999a
VS
616 lockdep_assert_held(&dev_priv->pps_mutex);
617
9a42356b
VS
618 if (IS_VALLEYVIEW(dev) &&
619 intel_dp->pps_pipe == INVALID_PIPE)
620 return false;
621
bf13e81b 622 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
623}
624
4be73780 625static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 626{
30add22d 627 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
628 struct drm_i915_private *dev_priv = dev->dev_private;
629
e39b999a
VS
630 lockdep_assert_held(&dev_priv->pps_mutex);
631
9a42356b
VS
632 if (IS_VALLEYVIEW(dev) &&
633 intel_dp->pps_pipe == INVALID_PIPE)
634 return false;
635
773538e8 636 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
637}
638
9b984dae
KP
639static void
640intel_dp_check_edp(struct intel_dp *intel_dp)
641{
30add22d 642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 643 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 644
9b984dae
KP
645 if (!is_edp(intel_dp))
646 return;
453c5420 647
4be73780 648 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
649 WARN(1, "eDP powered off while attempting aux channel communication.\n");
650 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
651 I915_READ(_pp_stat_reg(intel_dp)),
652 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
653 }
654}
655
9ee32fea
DV
656static uint32_t
657intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658{
659 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
660 struct drm_device *dev = intel_dig_port->base.base.dev;
661 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 662 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
663 uint32_t status;
664 bool done;
665
ef04f00d 666#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 667 if (has_aux_irq)
b18ac466 668 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 669 msecs_to_jiffies_timeout(10));
9ee32fea
DV
670 else
671 done = wait_for_atomic(C, 10) == 0;
672 if (!done)
673 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674 has_aux_irq);
675#undef C
676
677 return status;
678}
679
ec5b01dd 680static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 681{
174edf1f
PZ
682 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
683 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 684
ec5b01dd
DL
685 /*
686 * The clock divider is based off the hrawclk, and would like to run at
687 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 688 */
ec5b01dd
DL
689 return index ? 0 : intel_hrawclk(dev) / 2;
690}
691
692static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693{
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
696
697 if (index)
698 return 0;
699
700 if (intel_dig_port->port == PORT_A) {
701 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 702 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 703 else
b84a1cf8 704 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
705 } else {
706 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
707 }
708}
709
710static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711{
712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713 struct drm_device *dev = intel_dig_port->base.base.dev;
714 struct drm_i915_private *dev_priv = dev->dev_private;
715
716 if (intel_dig_port->port == PORT_A) {
717 if (index)
718 return 0;
719 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
720 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
721 /* Workaround for non-ULT HSW */
bc86625a
CW
722 switch (index) {
723 case 0: return 63;
724 case 1: return 72;
725 default: return 0;
726 }
ec5b01dd 727 } else {
bc86625a 728 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 729 }
b84a1cf8
RV
730}
731
ec5b01dd
DL
732static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733{
734 return index ? 0 : 100;
735}
736
b6b5e383
DL
737static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738{
739 /*
740 * SKL doesn't need us to program the AUX clock divider (Hardware will
741 * derive the clock from CDCLK automatically). We still implement the
742 * get_aux_clock_divider vfunc to plug-in into the existing code.
743 */
744 return index ? 0 : 1;
745}
746
5ed12a19
DL
747static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
748 bool has_aux_irq,
749 int send_bytes,
750 uint32_t aux_clock_divider)
751{
752 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
753 struct drm_device *dev = intel_dig_port->base.base.dev;
754 uint32_t precharge, timeout;
755
756 if (IS_GEN6(dev))
757 precharge = 3;
758 else
759 precharge = 5;
760
761 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
762 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
763 else
764 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
765
766 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 767 DP_AUX_CH_CTL_DONE |
5ed12a19 768 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 769 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 770 timeout |
788d4433 771 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
772 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 774 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
775}
776
b9ca5fad
DL
777static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778 bool has_aux_irq,
779 int send_bytes,
780 uint32_t unused)
781{
782 return DP_AUX_CH_CTL_SEND_BUSY |
783 DP_AUX_CH_CTL_DONE |
784 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
785 DP_AUX_CH_CTL_TIME_OUT_ERROR |
786 DP_AUX_CH_CTL_TIME_OUT_1600us |
787 DP_AUX_CH_CTL_RECEIVE_ERROR |
788 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
789 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
790}
791
b84a1cf8
RV
792static int
793intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 794 const uint8_t *send, int send_bytes,
b84a1cf8
RV
795 uint8_t *recv, int recv_size)
796{
797 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
798 struct drm_device *dev = intel_dig_port->base.base.dev;
799 struct drm_i915_private *dev_priv = dev->dev_private;
800 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
801 uint32_t ch_data = ch_ctl + 4;
bc86625a 802 uint32_t aux_clock_divider;
b84a1cf8
RV
803 int i, ret, recv_bytes;
804 uint32_t status;
5ed12a19 805 int try, clock = 0;
4e6b788c 806 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
807 bool vdd;
808
773538e8 809 pps_lock(intel_dp);
e39b999a 810
72c3500a
VS
811 /*
812 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 * In such cases we want to leave VDD enabled and it's up to upper layers
814 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 * ourselves.
816 */
1e0560e0 817 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
818
819 /* dp aux is extremely sensitive to irq latency, hence request the
820 * lowest possible wakeup latency and so prevent the cpu from going into
821 * deep sleep states.
822 */
823 pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825 intel_dp_check_edp(intel_dp);
5eb08b69 826
c67a470b
PZ
827 intel_aux_display_runtime_get(dev_priv);
828
11bee43e
JB
829 /* Try to wait for any previous AUX channel activity */
830 for (try = 0; try < 3; try++) {
ef04f00d 831 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
832 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833 break;
834 msleep(1);
835 }
836
837 if (try == 3) {
838 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839 I915_READ(ch_ctl));
9ee32fea
DV
840 ret = -EBUSY;
841 goto out;
4f7f7b7e
CW
842 }
843
46a5ae9f
PZ
844 /* Only 5 data registers! */
845 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846 ret = -E2BIG;
847 goto out;
848 }
849
ec5b01dd 850 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
851 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852 has_aux_irq,
853 send_bytes,
854 aux_clock_divider);
5ed12a19 855
bc86625a
CW
856 /* Must try at least 3 times according to DP spec */
857 for (try = 0; try < 5; try++) {
858 /* Load the send data into the aux channel data registers */
859 for (i = 0; i < send_bytes; i += 4)
860 I915_WRITE(ch_data + i,
a4f1289e
RV
861 intel_dp_pack_aux(send + i,
862 send_bytes - i));
bc86625a
CW
863
864 /* Send the command and wait for it to complete */
5ed12a19 865 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
866
867 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868
869 /* Clear done status and any errors */
870 I915_WRITE(ch_ctl,
871 status |
872 DP_AUX_CH_CTL_DONE |
873 DP_AUX_CH_CTL_TIME_OUT_ERROR |
874 DP_AUX_CH_CTL_RECEIVE_ERROR);
875
876 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR))
878 continue;
879 if (status & DP_AUX_CH_CTL_DONE)
880 break;
881 }
4f7f7b7e 882 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
883 break;
884 }
885
a4fc5ed6 886 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 887 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
888 ret = -EBUSY;
889 goto out;
a4fc5ed6
KP
890 }
891
892 /* Check for timeout or receive error.
893 * Timeouts occur when the sink is not connected
894 */
a5b3da54 895 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 896 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
897 ret = -EIO;
898 goto out;
a5b3da54 899 }
1ae8c0a5
KP
900
901 /* Timeouts occur when the device isn't connected, so they're
902 * "normal" -- don't fill the kernel log with these */
a5b3da54 903 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 904 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
905 ret = -ETIMEDOUT;
906 goto out;
a4fc5ed6
KP
907 }
908
909 /* Unload any bytes sent back from the other side */
910 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
912 if (recv_bytes > recv_size)
913 recv_bytes = recv_size;
0206e353 914
4f7f7b7e 915 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
916 intel_dp_unpack_aux(I915_READ(ch_data + i),
917 recv + i, recv_bytes - i);
a4fc5ed6 918
9ee32fea
DV
919 ret = recv_bytes;
920out:
921 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 922 intel_aux_display_runtime_put(dev_priv);
9ee32fea 923
884f19e9
JN
924 if (vdd)
925 edp_panel_vdd_off(intel_dp, false);
926
773538e8 927 pps_unlock(intel_dp);
e39b999a 928
9ee32fea 929 return ret;
a4fc5ed6
KP
930}
931
a6c8aff0
JN
932#define BARE_ADDRESS_SIZE 3
933#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
934static ssize_t
935intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 936{
9d1a1031
JN
937 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
938 uint8_t txbuf[20], rxbuf[20];
939 size_t txsize, rxsize;
a4fc5ed6 940 int ret;
a4fc5ed6 941
9d1a1031
JN
942 txbuf[0] = msg->request << 4;
943 txbuf[1] = msg->address >> 8;
944 txbuf[2] = msg->address & 0xff;
945 txbuf[3] = msg->size - 1;
46a5ae9f 946
9d1a1031
JN
947 switch (msg->request & ~DP_AUX_I2C_MOT) {
948 case DP_AUX_NATIVE_WRITE:
949 case DP_AUX_I2C_WRITE:
a6c8aff0 950 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 951 rxsize = 1;
f51a44b9 952
9d1a1031
JN
953 if (WARN_ON(txsize > 20))
954 return -E2BIG;
a4fc5ed6 955
9d1a1031 956 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 957
9d1a1031
JN
958 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
959 if (ret > 0) {
960 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 961
9d1a1031
JN
962 /* Return payload size. */
963 ret = msg->size;
964 }
965 break;
46a5ae9f 966
9d1a1031
JN
967 case DP_AUX_NATIVE_READ:
968 case DP_AUX_I2C_READ:
a6c8aff0 969 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 970 rxsize = msg->size + 1;
a4fc5ed6 971
9d1a1031
JN
972 if (WARN_ON(rxsize > 20))
973 return -E2BIG;
a4fc5ed6 974
9d1a1031
JN
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
978 /*
979 * Assume happy day, and copy the data. The caller is
980 * expected to check msg->reply before touching it.
981 *
982 * Return payload size.
983 */
984 ret--;
985 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 986 }
9d1a1031
JN
987 break;
988
989 default:
990 ret = -EINVAL;
991 break;
a4fc5ed6 992 }
f51a44b9 993
9d1a1031 994 return ret;
a4fc5ed6
KP
995}
996
9d1a1031
JN
997static void
998intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
999{
1000 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1001 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1002 enum port port = intel_dig_port->port;
0b99836f 1003 const char *name = NULL;
ab2c0672
DA
1004 int ret;
1005
33ad6626
JN
1006 switch (port) {
1007 case PORT_A:
1008 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1009 name = "DPDDC-A";
ab2c0672 1010 break;
33ad6626
JN
1011 case PORT_B:
1012 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1013 name = "DPDDC-B";
ab2c0672 1014 break;
33ad6626
JN
1015 case PORT_C:
1016 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1017 name = "DPDDC-C";
ab2c0672 1018 break;
33ad6626
JN
1019 case PORT_D:
1020 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1021 name = "DPDDC-D";
33ad6626
JN
1022 break;
1023 default:
1024 BUG();
ab2c0672
DA
1025 }
1026
1b1aad75
DL
1027 /*
1028 * The AUX_CTL register is usually DP_CTL + 0x10.
1029 *
1030 * On Haswell and Broadwell though:
1031 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1032 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1033 *
1034 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1035 */
1036 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1037 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1038
0b99836f 1039 intel_dp->aux.name = name;
9d1a1031
JN
1040 intel_dp->aux.dev = dev->dev;
1041 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1042
0b99836f
JN
1043 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1044 connector->base.kdev->kobj.name);
8316f337 1045
4f71d0cb 1046 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1047 if (ret < 0) {
4f71d0cb 1048 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1049 name, ret);
1050 return;
ab2c0672 1051 }
8a5e6aeb 1052
0b99836f
JN
1053 ret = sysfs_create_link(&connector->base.kdev->kobj,
1054 &intel_dp->aux.ddc.dev.kobj,
1055 intel_dp->aux.ddc.dev.kobj.name);
1056 if (ret < 0) {
1057 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1058 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1059 }
a4fc5ed6
KP
1060}
1061
80f65de3
ID
1062static void
1063intel_dp_connector_unregister(struct intel_connector *intel_connector)
1064{
1065 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1066
0e32b39c
DA
1067 if (!intel_connector->mst_port)
1068 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1069 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1070 intel_connector_unregister(intel_connector);
1071}
1072
5416d871 1073static void
c3346ef6 1074skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1075{
1076 u32 ctrl1;
1077
1078 pipe_config->ddi_pll_sel = SKL_DPLL0;
1079 pipe_config->dpll_hw_state.cfgcr1 = 0;
1080 pipe_config->dpll_hw_state.cfgcr2 = 0;
1081
1082 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1083 switch (link_clock / 2) {
1084 case 81000:
5416d871
DL
1085 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1086 SKL_DPLL0);
1087 break;
c3346ef6 1088 case 135000:
5416d871
DL
1089 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1090 SKL_DPLL0);
1091 break;
c3346ef6 1092 case 270000:
5416d871
DL
1093 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1094 SKL_DPLL0);
1095 break;
c3346ef6
SJ
1096 case 162000:
1097 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1098 SKL_DPLL0);
1099 break;
1100 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1101 results in CDCLK change. Need to handle the change of CDCLK by
1102 disabling pipes and re-enabling them */
1103 case 108000:
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1105 SKL_DPLL0);
1106 break;
1107 case 216000:
1108 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1109 SKL_DPLL0);
1110 break;
1111
5416d871
DL
1112 }
1113 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1114}
1115
0e50338c 1116static void
5cec258b 1117hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1118{
1119 switch (link_bw) {
1120 case DP_LINK_BW_1_62:
1121 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1122 break;
1123 case DP_LINK_BW_2_7:
1124 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1125 break;
1126 case DP_LINK_BW_5_4:
1127 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1128 break;
1129 }
1130}
1131
fc0f8e25 1132static int
12f6a2e2 1133intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1134{
12f6a2e2
VS
1135 if (intel_dp->num_supported_rates) {
1136 *sink_rates = intel_dp->supported_rates;
ea2d8a42 1137 return intel_dp->num_supported_rates;
fc0f8e25 1138 }
12f6a2e2
VS
1139
1140 *sink_rates = default_rates;
1141
1142 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1143}
1144
a8f3ef61 1145static int
1db10e28 1146intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1147{
636280ba
VS
1148 if (INTEL_INFO(dev)->gen >= 9) {
1149 *source_rates = gen9_rates;
1150 return ARRAY_SIZE(gen9_rates);
a8f3ef61 1151 }
636280ba
VS
1152
1153 *source_rates = default_rates;
1154
1db10e28
VS
1155 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1156 /* WaDisableHBR2:skl */
1157 return (DP_LINK_BW_2_7 >> 3) + 1;
1158 else if (INTEL_INFO(dev)->gen >= 8 ||
1159 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1160 return (DP_LINK_BW_5_4 >> 3) + 1;
1161 else
1162 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1163}
1164
c6bb3538
DV
1165static void
1166intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1167 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1168{
1169 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1170 const struct dp_link_dpll *divisor = NULL;
1171 int i, count = 0;
c6bb3538
DV
1172
1173 if (IS_G4X(dev)) {
9dd4ffdf
CML
1174 divisor = gen4_dpll;
1175 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1176 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1177 divisor = pch_dpll;
1178 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1179 } else if (IS_CHERRYVIEW(dev)) {
1180 divisor = chv_dpll;
1181 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1182 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1183 divisor = vlv_dpll;
1184 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1185 }
9dd4ffdf
CML
1186
1187 if (divisor && count) {
1188 for (i = 0; i < count; i++) {
1189 if (link_bw == divisor[i].link_bw) {
1190 pipe_config->dpll = divisor[i].dpll;
1191 pipe_config->clock_set = true;
1192 break;
1193 }
1194 }
c6bb3538
DV
1195 }
1196}
1197
2ecae76a
VS
1198static int intersect_rates(const int *source_rates, int source_len,
1199 const int *sink_rates, int sink_len,
1200 int *supported_rates)
a8f3ef61
SJ
1201{
1202 int i = 0, j = 0, k = 0;
1203
a8f3ef61
SJ
1204 while (i < source_len && j < sink_len) {
1205 if (source_rates[i] == sink_rates[j]) {
1206 supported_rates[k] = source_rates[i];
1207 ++k;
1208 ++i;
1209 ++j;
1210 } else if (source_rates[i] < sink_rates[j]) {
1211 ++i;
1212 } else {
1213 ++j;
1214 }
1215 }
1216 return k;
1217}
1218
2ecae76a
VS
1219static int intel_supported_rates(struct intel_dp *intel_dp,
1220 int *supported_rates)
1221{
1222 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1223 const int *source_rates, *sink_rates;
1224 int source_len, sink_len;
1225
1226 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1227 source_len = intel_dp_source_rates(dev, &source_rates);
1228
1229 return intersect_rates(source_rates, source_len,
1230 sink_rates, sink_len,
1231 supported_rates);
1232}
1233
f4896f15 1234static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1235{
1236 int i = 0;
1237
1238 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1239 if (find == rates[i])
1240 break;
1241
1242 return i;
1243}
1244
50fec21a
VS
1245int
1246intel_dp_max_link_rate(struct intel_dp *intel_dp)
1247{
1248 int rates[DP_MAX_SUPPORTED_RATES] = {};
1249 int len;
1250
1251 len = intel_supported_rates(intel_dp, rates);
1252 if (WARN_ON(len <= 0))
1253 return 162000;
1254
1255 return rates[rate_to_index(0, rates) - 1];
1256}
1257
00c09d70 1258bool
5bfe2ac0 1259intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1260 struct intel_crtc_state *pipe_config)
a4fc5ed6 1261{
5bfe2ac0 1262 struct drm_device *dev = encoder->base.dev;
36008365 1263 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1264 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1265 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1266 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1267 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1268 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1269 int lane_count, clock;
56071a20 1270 int min_lane_count = 1;
eeb6324d 1271 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1272 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1273 int min_clock = 0;
a8f3ef61 1274 int max_clock;
083f9560 1275 int bpp, mode_rate;
ff9a6750 1276 int link_avail, link_clock;
2ecae76a
VS
1277 int supported_rates[DP_MAX_SUPPORTED_RATES] = {};
1278 int supported_len;
a8f3ef61 1279
2ecae76a 1280 supported_len = intel_supported_rates(intel_dp, supported_rates);
a8f3ef61
SJ
1281
1282 /* No common link rates between source and sink */
1283 WARN_ON(supported_len <= 0);
1284
1285 max_clock = supported_len - 1;
a4fc5ed6 1286
bc7d38a4 1287 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1288 pipe_config->has_pch_encoder = true;
1289
03afc4a2 1290 pipe_config->has_dp_encoder = true;
f769cd24 1291 pipe_config->has_drrs = false;
9ed109a7 1292 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1293
dd06f90e
JN
1294 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1295 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1296 adjusted_mode);
2dd24552
JB
1297 if (!HAS_PCH_SPLIT(dev))
1298 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1299 intel_connector->panel.fitting_mode);
1300 else
b074cec8
JB
1301 intel_pch_panel_fitting(intel_crtc, pipe_config,
1302 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1303 }
1304
cb1793ce 1305 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1306 return false;
1307
083f9560 1308 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61
SJ
1309 "max bw %d pixel clock %iKHz\n",
1310 max_lane_count, supported_rates[max_clock],
241bfc38 1311 adjusted_mode->crtc_clock);
083f9560 1312
36008365
DV
1313 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1314 * bpc in between. */
3e7ca985 1315 bpp = pipe_config->pipe_bpp;
56071a20
JN
1316 if (is_edp(intel_dp)) {
1317 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1318 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1319 dev_priv->vbt.edp_bpp);
1320 bpp = dev_priv->vbt.edp_bpp;
1321 }
1322
344c5bbc
JN
1323 /*
1324 * Use the maximum clock and number of lanes the eDP panel
1325 * advertizes being capable of. The panels are generally
1326 * designed to support only a single clock and lane
1327 * configuration, and typically these values correspond to the
1328 * native resolution of the panel.
1329 */
1330 min_lane_count = max_lane_count;
1331 min_clock = max_clock;
7984211e 1332 }
657445fe 1333
36008365 1334 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1335 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1336 bpp);
36008365 1337
c6930992 1338 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1339 for (lane_count = min_lane_count;
1340 lane_count <= max_lane_count;
1341 lane_count <<= 1) {
1342
1343 link_clock = supported_rates[clock];
36008365
DV
1344 link_avail = intel_dp_max_data_rate(link_clock,
1345 lane_count);
1346
1347 if (mode_rate <= link_avail) {
1348 goto found;
1349 }
1350 }
1351 }
1352 }
c4867936 1353
36008365 1354 return false;
3685a8f3 1355
36008365 1356found:
55bc60db
VS
1357 if (intel_dp->color_range_auto) {
1358 /*
1359 * See:
1360 * CEA-861-E - 5.1 Default Encoding Parameters
1361 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1362 */
18316c8c 1363 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1364 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1365 else
1366 intel_dp->color_range = 0;
1367 }
1368
3685a8f3 1369 if (intel_dp->color_range)
50f3b016 1370 pipe_config->limited_color_range = true;
a4fc5ed6 1371
36008365 1372 intel_dp->lane_count = lane_count;
a8f3ef61 1373
bc27b7d3
VS
1374 if (intel_dp->num_supported_rates) {
1375 intel_dp->link_bw = 0;
a8f3ef61 1376 intel_dp->rate_select =
2ecae76a
VS
1377 rate_to_index(supported_rates[clock],
1378 intel_dp->supported_rates);
bc27b7d3
VS
1379 } else {
1380 intel_dp->link_bw =
1381 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1382 intel_dp->rate_select = 0;
a8f3ef61
SJ
1383 }
1384
657445fe 1385 pipe_config->pipe_bpp = bpp;
a8f3ef61 1386 pipe_config->port_clock = supported_rates[clock];
a4fc5ed6 1387
36008365
DV
1388 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1389 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1390 pipe_config->port_clock, bpp);
36008365
DV
1391 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1392 mode_rate, link_avail);
a4fc5ed6 1393
03afc4a2 1394 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1395 adjusted_mode->crtc_clock,
1396 pipe_config->port_clock,
03afc4a2 1397 &pipe_config->dp_m_n);
9d1a455b 1398
439d7ac0 1399 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1400 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1401 pipe_config->has_drrs = true;
439d7ac0
PB
1402 intel_link_compute_m_n(bpp, lane_count,
1403 intel_connector->panel.downclock_mode->clock,
1404 pipe_config->port_clock,
1405 &pipe_config->dp_m2_n2);
1406 }
1407
5416d871 1408 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
c3346ef6 1409 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
5416d871 1410 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1411 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1412 else
1413 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1414
03afc4a2 1415 return true;
a4fc5ed6
KP
1416}
1417
7c62a164 1418static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1419{
7c62a164
DV
1420 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1421 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1422 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1423 struct drm_i915_private *dev_priv = dev->dev_private;
1424 u32 dpa_ctl;
1425
6e3c9717
ACO
1426 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1427 crtc->config->port_clock);
ea9b6006
DV
1428 dpa_ctl = I915_READ(DP_A);
1429 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1430
6e3c9717 1431 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1432 /* For a long time we've carried around a ILK-DevA w/a for the
1433 * 160MHz clock. If we're really unlucky, it's still required.
1434 */
1435 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1436 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1437 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1438 } else {
1439 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1440 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1441 }
1ce17038 1442
ea9b6006
DV
1443 I915_WRITE(DP_A, dpa_ctl);
1444
1445 POSTING_READ(DP_A);
1446 udelay(500);
1447}
1448
8ac33ed3 1449static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1450{
b934223d 1451 struct drm_device *dev = encoder->base.dev;
417e822d 1452 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1453 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1454 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1455 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1456 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1457
417e822d 1458 /*
1a2eb460 1459 * There are four kinds of DP registers:
417e822d
KP
1460 *
1461 * IBX PCH
1a2eb460
KP
1462 * SNB CPU
1463 * IVB CPU
417e822d
KP
1464 * CPT PCH
1465 *
1466 * IBX PCH and CPU are the same for almost everything,
1467 * except that the CPU DP PLL is configured in this
1468 * register
1469 *
1470 * CPT PCH is quite different, having many bits moved
1471 * to the TRANS_DP_CTL register instead. That
1472 * configuration happens (oddly) in ironlake_pch_enable
1473 */
9c9e7927 1474
417e822d
KP
1475 /* Preserve the BIOS-computed detected bit. This is
1476 * supposed to be read-only.
1477 */
1478 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1479
417e822d 1480 /* Handle DP bits in common between all three register formats */
417e822d 1481 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1482 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1483
6e3c9717 1484 if (crtc->config->has_audio)
ea5b213a 1485 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1486
417e822d 1487 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1488
bc7d38a4 1489 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1490 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1491 intel_dp->DP |= DP_SYNC_HS_HIGH;
1492 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1493 intel_dp->DP |= DP_SYNC_VS_HIGH;
1494 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1495
6aba5b6c 1496 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1497 intel_dp->DP |= DP_ENHANCED_FRAMING;
1498
7c62a164 1499 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1500 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1501 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1502 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1503
1504 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1505 intel_dp->DP |= DP_SYNC_HS_HIGH;
1506 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1507 intel_dp->DP |= DP_SYNC_VS_HIGH;
1508 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1509
6aba5b6c 1510 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1511 intel_dp->DP |= DP_ENHANCED_FRAMING;
1512
44f37d1f
CML
1513 if (!IS_CHERRYVIEW(dev)) {
1514 if (crtc->pipe == 1)
1515 intel_dp->DP |= DP_PIPEB_SELECT;
1516 } else {
1517 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1518 }
417e822d
KP
1519 } else {
1520 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1521 }
a4fc5ed6
KP
1522}
1523
ffd6749d
PZ
1524#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1525#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1526
1a5ef5b7
PZ
1527#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1528#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1529
ffd6749d
PZ
1530#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1531#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1532
4be73780 1533static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1534 u32 mask,
1535 u32 value)
bd943159 1536{
30add22d 1537 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1538 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1539 u32 pp_stat_reg, pp_ctrl_reg;
1540
e39b999a
VS
1541 lockdep_assert_held(&dev_priv->pps_mutex);
1542
bf13e81b
JN
1543 pp_stat_reg = _pp_stat_reg(intel_dp);
1544 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1545
99ea7127 1546 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1547 mask, value,
1548 I915_READ(pp_stat_reg),
1549 I915_READ(pp_ctrl_reg));
32ce697c 1550
453c5420 1551 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1552 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1553 I915_READ(pp_stat_reg),
1554 I915_READ(pp_ctrl_reg));
32ce697c 1555 }
54c136d4
CW
1556
1557 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1558}
32ce697c 1559
4be73780 1560static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1561{
1562 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1563 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1564}
1565
4be73780 1566static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1567{
1568 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1569 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1570}
1571
4be73780 1572static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1573{
1574 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1575
1576 /* When we disable the VDD override bit last we have to do the manual
1577 * wait. */
1578 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1579 intel_dp->panel_power_cycle_delay);
1580
4be73780 1581 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1582}
1583
4be73780 1584static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1585{
1586 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1587 intel_dp->backlight_on_delay);
1588}
1589
4be73780 1590static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1591{
1592 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1593 intel_dp->backlight_off_delay);
1594}
99ea7127 1595
832dd3c1
KP
1596/* Read the current pp_control value, unlocking the register if it
1597 * is locked
1598 */
1599
453c5420 1600static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1601{
453c5420
JB
1602 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1603 struct drm_i915_private *dev_priv = dev->dev_private;
1604 u32 control;
832dd3c1 1605
e39b999a
VS
1606 lockdep_assert_held(&dev_priv->pps_mutex);
1607
bf13e81b 1608 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1609 control &= ~PANEL_UNLOCK_MASK;
1610 control |= PANEL_UNLOCK_REGS;
1611 return control;
bd943159
KP
1612}
1613
951468f3
VS
1614/*
1615 * Must be paired with edp_panel_vdd_off().
1616 * Must hold pps_mutex around the whole on/off sequence.
1617 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1618 */
1e0560e0 1619static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1620{
30add22d 1621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1622 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1623 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1624 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1625 enum intel_display_power_domain power_domain;
5d613501 1626 u32 pp;
453c5420 1627 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1628 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1629
e39b999a
VS
1630 lockdep_assert_held(&dev_priv->pps_mutex);
1631
97af61f5 1632 if (!is_edp(intel_dp))
adddaaf4 1633 return false;
bd943159 1634
2c623c11 1635 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1636 intel_dp->want_panel_vdd = true;
99ea7127 1637
4be73780 1638 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1639 return need_to_disable;
b0665d57 1640
4e6e1a54
ID
1641 power_domain = intel_display_port_power_domain(intel_encoder);
1642 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1643
3936fcf4
VS
1644 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1645 port_name(intel_dig_port->port));
bd943159 1646
4be73780
DV
1647 if (!edp_have_panel_power(intel_dp))
1648 wait_panel_power_cycle(intel_dp);
99ea7127 1649
453c5420 1650 pp = ironlake_get_pp_control(intel_dp);
5d613501 1651 pp |= EDP_FORCE_VDD;
ebf33b18 1652
bf13e81b
JN
1653 pp_stat_reg = _pp_stat_reg(intel_dp);
1654 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1655
1656 I915_WRITE(pp_ctrl_reg, pp);
1657 POSTING_READ(pp_ctrl_reg);
1658 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1659 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1660 /*
1661 * If the panel wasn't on, delay before accessing aux channel
1662 */
4be73780 1663 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1664 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1665 port_name(intel_dig_port->port));
f01eca2e 1666 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1667 }
adddaaf4
JN
1668
1669 return need_to_disable;
1670}
1671
951468f3
VS
1672/*
1673 * Must be paired with intel_edp_panel_vdd_off() or
1674 * intel_edp_panel_off().
1675 * Nested calls to these functions are not allowed since
1676 * we drop the lock. Caller must use some higher level
1677 * locking to prevent nested calls from other threads.
1678 */
b80d6c78 1679void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1680{
c695b6b6 1681 bool vdd;
adddaaf4 1682
c695b6b6
VS
1683 if (!is_edp(intel_dp))
1684 return;
1685
773538e8 1686 pps_lock(intel_dp);
c695b6b6 1687 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1688 pps_unlock(intel_dp);
c695b6b6 1689
e2c719b7 1690 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1691 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1692}
1693
4be73780 1694static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1695{
30add22d 1696 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1697 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1698 struct intel_digital_port *intel_dig_port =
1699 dp_to_dig_port(intel_dp);
1700 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1701 enum intel_display_power_domain power_domain;
5d613501 1702 u32 pp;
453c5420 1703 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1704
e39b999a 1705 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1706
15e899a0 1707 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1708
15e899a0 1709 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1710 return;
b0665d57 1711
3936fcf4
VS
1712 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1713 port_name(intel_dig_port->port));
bd943159 1714
be2c9196
VS
1715 pp = ironlake_get_pp_control(intel_dp);
1716 pp &= ~EDP_FORCE_VDD;
453c5420 1717
be2c9196
VS
1718 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1719 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1720
be2c9196
VS
1721 I915_WRITE(pp_ctrl_reg, pp);
1722 POSTING_READ(pp_ctrl_reg);
90791a5c 1723
be2c9196
VS
1724 /* Make sure sequencer is idle before allowing subsequent activity */
1725 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1726 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1727
be2c9196
VS
1728 if ((pp & POWER_TARGET_ON) == 0)
1729 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1730
be2c9196
VS
1731 power_domain = intel_display_port_power_domain(intel_encoder);
1732 intel_display_power_put(dev_priv, power_domain);
bd943159 1733}
5d613501 1734
4be73780 1735static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1736{
1737 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1738 struct intel_dp, panel_vdd_work);
bd943159 1739
773538e8 1740 pps_lock(intel_dp);
15e899a0
VS
1741 if (!intel_dp->want_panel_vdd)
1742 edp_panel_vdd_off_sync(intel_dp);
773538e8 1743 pps_unlock(intel_dp);
bd943159
KP
1744}
1745
aba86890
ID
1746static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1747{
1748 unsigned long delay;
1749
1750 /*
1751 * Queue the timer to fire a long time from now (relative to the power
1752 * down delay) to keep the panel power up across a sequence of
1753 * operations.
1754 */
1755 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1756 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1757}
1758
951468f3
VS
1759/*
1760 * Must be paired with edp_panel_vdd_on().
1761 * Must hold pps_mutex around the whole on/off sequence.
1762 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1763 */
4be73780 1764static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1765{
e39b999a
VS
1766 struct drm_i915_private *dev_priv =
1767 intel_dp_to_dev(intel_dp)->dev_private;
1768
1769 lockdep_assert_held(&dev_priv->pps_mutex);
1770
97af61f5
KP
1771 if (!is_edp(intel_dp))
1772 return;
5d613501 1773
e2c719b7 1774 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1775 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1776
bd943159
KP
1777 intel_dp->want_panel_vdd = false;
1778
aba86890 1779 if (sync)
4be73780 1780 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1781 else
1782 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1783}
1784
9f0fb5be 1785static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1786{
30add22d 1787 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1788 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1789 u32 pp;
453c5420 1790 u32 pp_ctrl_reg;
9934c132 1791
9f0fb5be
VS
1792 lockdep_assert_held(&dev_priv->pps_mutex);
1793
97af61f5 1794 if (!is_edp(intel_dp))
bd943159 1795 return;
99ea7127 1796
3936fcf4
VS
1797 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1798 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1799
e7a89ace
VS
1800 if (WARN(edp_have_panel_power(intel_dp),
1801 "eDP port %c panel power already on\n",
1802 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1803 return;
9934c132 1804
4be73780 1805 wait_panel_power_cycle(intel_dp);
37c6c9b0 1806
bf13e81b 1807 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1808 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1809 if (IS_GEN5(dev)) {
1810 /* ILK workaround: disable reset around power sequence */
1811 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1812 I915_WRITE(pp_ctrl_reg, pp);
1813 POSTING_READ(pp_ctrl_reg);
05ce1a49 1814 }
37c6c9b0 1815
1c0ae80a 1816 pp |= POWER_TARGET_ON;
99ea7127
KP
1817 if (!IS_GEN5(dev))
1818 pp |= PANEL_POWER_RESET;
1819
453c5420
JB
1820 I915_WRITE(pp_ctrl_reg, pp);
1821 POSTING_READ(pp_ctrl_reg);
9934c132 1822
4be73780 1823 wait_panel_on(intel_dp);
dce56b3c 1824 intel_dp->last_power_on = jiffies;
9934c132 1825
05ce1a49
KP
1826 if (IS_GEN5(dev)) {
1827 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1828 I915_WRITE(pp_ctrl_reg, pp);
1829 POSTING_READ(pp_ctrl_reg);
05ce1a49 1830 }
9f0fb5be 1831}
e39b999a 1832
9f0fb5be
VS
1833void intel_edp_panel_on(struct intel_dp *intel_dp)
1834{
1835 if (!is_edp(intel_dp))
1836 return;
1837
1838 pps_lock(intel_dp);
1839 edp_panel_on(intel_dp);
773538e8 1840 pps_unlock(intel_dp);
9934c132
JB
1841}
1842
9f0fb5be
VS
1843
1844static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1845{
4e6e1a54
ID
1846 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1847 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1848 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1849 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1850 enum intel_display_power_domain power_domain;
99ea7127 1851 u32 pp;
453c5420 1852 u32 pp_ctrl_reg;
9934c132 1853
9f0fb5be
VS
1854 lockdep_assert_held(&dev_priv->pps_mutex);
1855
97af61f5
KP
1856 if (!is_edp(intel_dp))
1857 return;
37c6c9b0 1858
3936fcf4
VS
1859 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1860 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1861
3936fcf4
VS
1862 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1863 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1864
453c5420 1865 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1866 /* We need to switch off panel power _and_ force vdd, for otherwise some
1867 * panels get very unhappy and cease to work. */
b3064154
PJ
1868 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1869 EDP_BLC_ENABLE);
453c5420 1870
bf13e81b 1871 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1872
849e39f5
PZ
1873 intel_dp->want_panel_vdd = false;
1874
453c5420
JB
1875 I915_WRITE(pp_ctrl_reg, pp);
1876 POSTING_READ(pp_ctrl_reg);
9934c132 1877
dce56b3c 1878 intel_dp->last_power_cycle = jiffies;
4be73780 1879 wait_panel_off(intel_dp);
849e39f5
PZ
1880
1881 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1882 power_domain = intel_display_port_power_domain(intel_encoder);
1883 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1884}
e39b999a 1885
9f0fb5be
VS
1886void intel_edp_panel_off(struct intel_dp *intel_dp)
1887{
1888 if (!is_edp(intel_dp))
1889 return;
e39b999a 1890
9f0fb5be
VS
1891 pps_lock(intel_dp);
1892 edp_panel_off(intel_dp);
773538e8 1893 pps_unlock(intel_dp);
9934c132
JB
1894}
1895
1250d107
JN
1896/* Enable backlight in the panel power control. */
1897static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1898{
da63a9f2
PZ
1899 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1900 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1901 struct drm_i915_private *dev_priv = dev->dev_private;
1902 u32 pp;
453c5420 1903 u32 pp_ctrl_reg;
32f9d658 1904
01cb9ea6
JB
1905 /*
1906 * If we enable the backlight right away following a panel power
1907 * on, we may see slight flicker as the panel syncs with the eDP
1908 * link. So delay a bit to make sure the image is solid before
1909 * allowing it to appear.
1910 */
4be73780 1911 wait_backlight_on(intel_dp);
e39b999a 1912
773538e8 1913 pps_lock(intel_dp);
e39b999a 1914
453c5420 1915 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1916 pp |= EDP_BLC_ENABLE;
453c5420 1917
bf13e81b 1918 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1919
1920 I915_WRITE(pp_ctrl_reg, pp);
1921 POSTING_READ(pp_ctrl_reg);
e39b999a 1922
773538e8 1923 pps_unlock(intel_dp);
32f9d658
ZW
1924}
1925
1250d107
JN
1926/* Enable backlight PWM and backlight PP control. */
1927void intel_edp_backlight_on(struct intel_dp *intel_dp)
1928{
1929 if (!is_edp(intel_dp))
1930 return;
1931
1932 DRM_DEBUG_KMS("\n");
1933
1934 intel_panel_enable_backlight(intel_dp->attached_connector);
1935 _intel_edp_backlight_on(intel_dp);
1936}
1937
1938/* Disable backlight in the panel power control. */
1939static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1940{
30add22d 1941 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1942 struct drm_i915_private *dev_priv = dev->dev_private;
1943 u32 pp;
453c5420 1944 u32 pp_ctrl_reg;
32f9d658 1945
f01eca2e
KP
1946 if (!is_edp(intel_dp))
1947 return;
1948
773538e8 1949 pps_lock(intel_dp);
e39b999a 1950
453c5420 1951 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1952 pp &= ~EDP_BLC_ENABLE;
453c5420 1953
bf13e81b 1954 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1955
1956 I915_WRITE(pp_ctrl_reg, pp);
1957 POSTING_READ(pp_ctrl_reg);
f7d2323c 1958
773538e8 1959 pps_unlock(intel_dp);
e39b999a
VS
1960
1961 intel_dp->last_backlight_off = jiffies;
f7d2323c 1962 edp_wait_backlight_off(intel_dp);
1250d107 1963}
f7d2323c 1964
1250d107
JN
1965/* Disable backlight PP control and backlight PWM. */
1966void intel_edp_backlight_off(struct intel_dp *intel_dp)
1967{
1968 if (!is_edp(intel_dp))
1969 return;
1970
1971 DRM_DEBUG_KMS("\n");
f7d2323c 1972
1250d107 1973 _intel_edp_backlight_off(intel_dp);
f7d2323c 1974 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 1975}
a4fc5ed6 1976
73580fb7
JN
1977/*
1978 * Hook for controlling the panel power control backlight through the bl_power
1979 * sysfs attribute. Take care to handle multiple calls.
1980 */
1981static void intel_edp_backlight_power(struct intel_connector *connector,
1982 bool enable)
1983{
1984 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
1985 bool is_enabled;
1986
773538e8 1987 pps_lock(intel_dp);
e39b999a 1988 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 1989 pps_unlock(intel_dp);
73580fb7
JN
1990
1991 if (is_enabled == enable)
1992 return;
1993
23ba9373
JN
1994 DRM_DEBUG_KMS("panel power control backlight %s\n",
1995 enable ? "enable" : "disable");
73580fb7
JN
1996
1997 if (enable)
1998 _intel_edp_backlight_on(intel_dp);
1999 else
2000 _intel_edp_backlight_off(intel_dp);
2001}
2002
2bd2ad64 2003static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2004{
da63a9f2
PZ
2005 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2006 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2007 struct drm_device *dev = crtc->dev;
d240f20f
JB
2008 struct drm_i915_private *dev_priv = dev->dev_private;
2009 u32 dpa_ctl;
2010
2bd2ad64
DV
2011 assert_pipe_disabled(dev_priv,
2012 to_intel_crtc(crtc)->pipe);
2013
d240f20f
JB
2014 DRM_DEBUG_KMS("\n");
2015 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2016 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2017 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2018
2019 /* We don't adjust intel_dp->DP while tearing down the link, to
2020 * facilitate link retraining (e.g. after hotplug). Hence clear all
2021 * enable bits here to ensure that we don't enable too much. */
2022 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2023 intel_dp->DP |= DP_PLL_ENABLE;
2024 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2025 POSTING_READ(DP_A);
2026 udelay(200);
d240f20f
JB
2027}
2028
2bd2ad64 2029static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2030{
da63a9f2
PZ
2031 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2032 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2033 struct drm_device *dev = crtc->dev;
d240f20f
JB
2034 struct drm_i915_private *dev_priv = dev->dev_private;
2035 u32 dpa_ctl;
2036
2bd2ad64
DV
2037 assert_pipe_disabled(dev_priv,
2038 to_intel_crtc(crtc)->pipe);
2039
d240f20f 2040 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2041 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2042 "dp pll off, should be on\n");
2043 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2044
2045 /* We can't rely on the value tracked for the DP register in
2046 * intel_dp->DP because link_down must not change that (otherwise link
2047 * re-training will fail. */
298b0b39 2048 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2049 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2050 POSTING_READ(DP_A);
d240f20f
JB
2051 udelay(200);
2052}
2053
c7ad3810 2054/* If the sink supports it, try to set the power state appropriately */
c19b0669 2055void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2056{
2057 int ret, i;
2058
2059 /* Should have a valid DPCD by this point */
2060 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2061 return;
2062
2063 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2064 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2065 DP_SET_POWER_D3);
c7ad3810
JB
2066 } else {
2067 /*
2068 * When turning on, we need to retry for 1ms to give the sink
2069 * time to wake up.
2070 */
2071 for (i = 0; i < 3; i++) {
9d1a1031
JN
2072 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2073 DP_SET_POWER_D0);
c7ad3810
JB
2074 if (ret == 1)
2075 break;
2076 msleep(1);
2077 }
2078 }
f9cac721
JN
2079
2080 if (ret != 1)
2081 DRM_DEBUG_KMS("failed to %s sink power state\n",
2082 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2083}
2084
19d8fe15
DV
2085static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2086 enum pipe *pipe)
d240f20f 2087{
19d8fe15 2088 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2089 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2090 struct drm_device *dev = encoder->base.dev;
2091 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2092 enum intel_display_power_domain power_domain;
2093 u32 tmp;
2094
2095 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2096 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2097 return false;
2098
2099 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2100
2101 if (!(tmp & DP_PORT_EN))
2102 return false;
2103
bc7d38a4 2104 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2105 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2106 } else if (IS_CHERRYVIEW(dev)) {
2107 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2108 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2109 *pipe = PORT_TO_PIPE(tmp);
2110 } else {
2111 u32 trans_sel;
2112 u32 trans_dp;
2113 int i;
2114
2115 switch (intel_dp->output_reg) {
2116 case PCH_DP_B:
2117 trans_sel = TRANS_DP_PORT_SEL_B;
2118 break;
2119 case PCH_DP_C:
2120 trans_sel = TRANS_DP_PORT_SEL_C;
2121 break;
2122 case PCH_DP_D:
2123 trans_sel = TRANS_DP_PORT_SEL_D;
2124 break;
2125 default:
2126 return true;
2127 }
2128
055e393f 2129 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2130 trans_dp = I915_READ(TRANS_DP_CTL(i));
2131 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2132 *pipe = i;
2133 return true;
2134 }
2135 }
19d8fe15 2136
4a0833ec
DV
2137 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2138 intel_dp->output_reg);
2139 }
d240f20f 2140
19d8fe15
DV
2141 return true;
2142}
d240f20f 2143
045ac3b5 2144static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2145 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2146{
2147 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2148 u32 tmp, flags = 0;
63000ef6
XZ
2149 struct drm_device *dev = encoder->base.dev;
2150 struct drm_i915_private *dev_priv = dev->dev_private;
2151 enum port port = dp_to_dig_port(intel_dp)->port;
2152 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2153 int dotclock;
045ac3b5 2154
9ed109a7
DV
2155 tmp = I915_READ(intel_dp->output_reg);
2156 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2157 pipe_config->has_audio = true;
2158
63000ef6 2159 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2160 if (tmp & DP_SYNC_HS_HIGH)
2161 flags |= DRM_MODE_FLAG_PHSYNC;
2162 else
2163 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2164
63000ef6
XZ
2165 if (tmp & DP_SYNC_VS_HIGH)
2166 flags |= DRM_MODE_FLAG_PVSYNC;
2167 else
2168 flags |= DRM_MODE_FLAG_NVSYNC;
2169 } else {
2170 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2171 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2172 flags |= DRM_MODE_FLAG_PHSYNC;
2173 else
2174 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2175
63000ef6
XZ
2176 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2177 flags |= DRM_MODE_FLAG_PVSYNC;
2178 else
2179 flags |= DRM_MODE_FLAG_NVSYNC;
2180 }
045ac3b5 2181
2d112de7 2182 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2183
8c875fca
VS
2184 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2185 tmp & DP_COLOR_RANGE_16_235)
2186 pipe_config->limited_color_range = true;
2187
eb14cb74
VS
2188 pipe_config->has_dp_encoder = true;
2189
2190 intel_dp_get_m_n(crtc, pipe_config);
2191
18442d08 2192 if (port == PORT_A) {
f1f644dc
JB
2193 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2194 pipe_config->port_clock = 162000;
2195 else
2196 pipe_config->port_clock = 270000;
2197 }
18442d08
VS
2198
2199 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2200 &pipe_config->dp_m_n);
2201
2202 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2203 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2204
2d112de7 2205 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2206
c6cd2ee2
JN
2207 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2208 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2209 /*
2210 * This is a big fat ugly hack.
2211 *
2212 * Some machines in UEFI boot mode provide us a VBT that has 18
2213 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2214 * unknown we fail to light up. Yet the same BIOS boots up with
2215 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2216 * max, not what it tells us to use.
2217 *
2218 * Note: This will still be broken if the eDP panel is not lit
2219 * up by the BIOS, and thus we can't get the mode at module
2220 * load.
2221 */
2222 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2223 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2224 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2225 }
045ac3b5
JB
2226}
2227
e8cb4558 2228static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2229{
e8cb4558 2230 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2231 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2232 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2233
6e3c9717 2234 if (crtc->config->has_audio)
495a5bb8 2235 intel_audio_codec_disable(encoder);
6cb49835 2236
b32c6f48
RV
2237 if (HAS_PSR(dev) && !HAS_DDI(dev))
2238 intel_psr_disable(intel_dp);
2239
6cb49835
DV
2240 /* Make sure the panel is off before trying to change the mode. But also
2241 * ensure that we have vdd while we switch off the panel. */
24f3e092 2242 intel_edp_panel_vdd_on(intel_dp);
4be73780 2243 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2244 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2245 intel_edp_panel_off(intel_dp);
3739850b 2246
08aff3fe
VS
2247 /* disable the port before the pipe on g4x */
2248 if (INTEL_INFO(dev)->gen < 5)
3739850b 2249 intel_dp_link_down(intel_dp);
d240f20f
JB
2250}
2251
08aff3fe 2252static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2253{
2bd2ad64 2254 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2255 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2256
49277c31 2257 intel_dp_link_down(intel_dp);
08aff3fe
VS
2258 if (port == PORT_A)
2259 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2260}
2261
2262static void vlv_post_disable_dp(struct intel_encoder *encoder)
2263{
2264 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2265
2266 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2267}
2268
580d3811
VS
2269static void chv_post_disable_dp(struct intel_encoder *encoder)
2270{
2271 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2272 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2273 struct drm_device *dev = encoder->base.dev;
2274 struct drm_i915_private *dev_priv = dev->dev_private;
2275 struct intel_crtc *intel_crtc =
2276 to_intel_crtc(encoder->base.crtc);
2277 enum dpio_channel ch = vlv_dport_to_channel(dport);
2278 enum pipe pipe = intel_crtc->pipe;
2279 u32 val;
2280
2281 intel_dp_link_down(intel_dp);
2282
2283 mutex_lock(&dev_priv->dpio_lock);
2284
2285 /* Propagate soft reset to data lane reset */
97fd4d5c 2286 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2287 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2288 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2289
97fd4d5c
VS
2290 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2291 val |= CHV_PCS_REQ_SOFTRESET_EN;
2292 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2293
2294 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2295 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2296 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2297
2298 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2299 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2300 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2301
2302 mutex_unlock(&dev_priv->dpio_lock);
2303}
2304
7b13b58a
VS
2305static void
2306_intel_dp_set_link_train(struct intel_dp *intel_dp,
2307 uint32_t *DP,
2308 uint8_t dp_train_pat)
2309{
2310 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2311 struct drm_device *dev = intel_dig_port->base.base.dev;
2312 struct drm_i915_private *dev_priv = dev->dev_private;
2313 enum port port = intel_dig_port->port;
2314
2315 if (HAS_DDI(dev)) {
2316 uint32_t temp = I915_READ(DP_TP_CTL(port));
2317
2318 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2319 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2320 else
2321 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2322
2323 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2324 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2325 case DP_TRAINING_PATTERN_DISABLE:
2326 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2327
2328 break;
2329 case DP_TRAINING_PATTERN_1:
2330 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2331 break;
2332 case DP_TRAINING_PATTERN_2:
2333 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2334 break;
2335 case DP_TRAINING_PATTERN_3:
2336 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2337 break;
2338 }
2339 I915_WRITE(DP_TP_CTL(port), temp);
2340
2341 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2342 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2343
2344 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2345 case DP_TRAINING_PATTERN_DISABLE:
2346 *DP |= DP_LINK_TRAIN_OFF_CPT;
2347 break;
2348 case DP_TRAINING_PATTERN_1:
2349 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2350 break;
2351 case DP_TRAINING_PATTERN_2:
2352 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2353 break;
2354 case DP_TRAINING_PATTERN_3:
2355 DRM_ERROR("DP training pattern 3 not supported\n");
2356 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2357 break;
2358 }
2359
2360 } else {
2361 if (IS_CHERRYVIEW(dev))
2362 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2363 else
2364 *DP &= ~DP_LINK_TRAIN_MASK;
2365
2366 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2367 case DP_TRAINING_PATTERN_DISABLE:
2368 *DP |= DP_LINK_TRAIN_OFF;
2369 break;
2370 case DP_TRAINING_PATTERN_1:
2371 *DP |= DP_LINK_TRAIN_PAT_1;
2372 break;
2373 case DP_TRAINING_PATTERN_2:
2374 *DP |= DP_LINK_TRAIN_PAT_2;
2375 break;
2376 case DP_TRAINING_PATTERN_3:
2377 if (IS_CHERRYVIEW(dev)) {
2378 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2379 } else {
2380 DRM_ERROR("DP training pattern 3 not supported\n");
2381 *DP |= DP_LINK_TRAIN_PAT_2;
2382 }
2383 break;
2384 }
2385 }
2386}
2387
2388static void intel_dp_enable_port(struct intel_dp *intel_dp)
2389{
2390 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2391 struct drm_i915_private *dev_priv = dev->dev_private;
2392
7b13b58a
VS
2393 /* enable with pattern 1 (as per spec) */
2394 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2395 DP_TRAINING_PATTERN_1);
2396
2397 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2398 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2399
2400 /*
2401 * Magic for VLV/CHV. We _must_ first set up the register
2402 * without actually enabling the port, and then do another
2403 * write to enable the port. Otherwise link training will
2404 * fail when the power sequencer is freshly used for this port.
2405 */
2406 intel_dp->DP |= DP_PORT_EN;
2407
2408 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2409 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2410}
2411
e8cb4558 2412static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2413{
e8cb4558
DV
2414 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2415 struct drm_device *dev = encoder->base.dev;
2416 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2417 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2418 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2419
0c33d8d7
DV
2420 if (WARN_ON(dp_reg & DP_PORT_EN))
2421 return;
5d613501 2422
093e3f13
VS
2423 pps_lock(intel_dp);
2424
2425 if (IS_VALLEYVIEW(dev))
2426 vlv_init_panel_power_sequencer(intel_dp);
2427
7b13b58a 2428 intel_dp_enable_port(intel_dp);
093e3f13
VS
2429
2430 edp_panel_vdd_on(intel_dp);
2431 edp_panel_on(intel_dp);
2432 edp_panel_vdd_off(intel_dp, true);
2433
2434 pps_unlock(intel_dp);
2435
61234fa5
VS
2436 if (IS_VALLEYVIEW(dev))
2437 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2438
f01eca2e 2439 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2440 intel_dp_start_link_train(intel_dp);
33a34e4e 2441 intel_dp_complete_link_train(intel_dp);
3ab9c637 2442 intel_dp_stop_link_train(intel_dp);
c1dec79a 2443
6e3c9717 2444 if (crtc->config->has_audio) {
c1dec79a
JN
2445 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2446 pipe_name(crtc->pipe));
2447 intel_audio_codec_enable(encoder);
2448 }
ab1f90f9 2449}
89b667f8 2450
ecff4f3b
JN
2451static void g4x_enable_dp(struct intel_encoder *encoder)
2452{
828f5c6e
JN
2453 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2454
ecff4f3b 2455 intel_enable_dp(encoder);
4be73780 2456 intel_edp_backlight_on(intel_dp);
ab1f90f9 2457}
89b667f8 2458
ab1f90f9
JN
2459static void vlv_enable_dp(struct intel_encoder *encoder)
2460{
828f5c6e
JN
2461 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2462
4be73780 2463 intel_edp_backlight_on(intel_dp);
b32c6f48 2464 intel_psr_enable(intel_dp);
d240f20f
JB
2465}
2466
ecff4f3b 2467static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2468{
2469 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2470 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2471
8ac33ed3
DV
2472 intel_dp_prepare(encoder);
2473
d41f1efb
DV
2474 /* Only ilk+ has port A */
2475 if (dport->port == PORT_A) {
2476 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2477 ironlake_edp_pll_on(intel_dp);
d41f1efb 2478 }
ab1f90f9
JN
2479}
2480
83b84597
VS
2481static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2482{
2483 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2484 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2485 enum pipe pipe = intel_dp->pps_pipe;
2486 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2487
2488 edp_panel_vdd_off_sync(intel_dp);
2489
2490 /*
2491 * VLV seems to get confused when multiple power seqeuencers
2492 * have the same port selected (even if only one has power/vdd
2493 * enabled). The failure manifests as vlv_wait_port_ready() failing
2494 * CHV on the other hand doesn't seem to mind having the same port
2495 * selected in multiple power seqeuencers, but let's clear the
2496 * port select always when logically disconnecting a power sequencer
2497 * from a port.
2498 */
2499 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2500 pipe_name(pipe), port_name(intel_dig_port->port));
2501 I915_WRITE(pp_on_reg, 0);
2502 POSTING_READ(pp_on_reg);
2503
2504 intel_dp->pps_pipe = INVALID_PIPE;
2505}
2506
a4a5d2f8
VS
2507static void vlv_steal_power_sequencer(struct drm_device *dev,
2508 enum pipe pipe)
2509{
2510 struct drm_i915_private *dev_priv = dev->dev_private;
2511 struct intel_encoder *encoder;
2512
2513 lockdep_assert_held(&dev_priv->pps_mutex);
2514
ac3c12e4
VS
2515 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2516 return;
2517
a4a5d2f8
VS
2518 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2519 base.head) {
2520 struct intel_dp *intel_dp;
773538e8 2521 enum port port;
a4a5d2f8
VS
2522
2523 if (encoder->type != INTEL_OUTPUT_EDP)
2524 continue;
2525
2526 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2527 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2528
2529 if (intel_dp->pps_pipe != pipe)
2530 continue;
2531
2532 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2533 pipe_name(pipe), port_name(port));
a4a5d2f8 2534
034e43c6
VS
2535 WARN(encoder->connectors_active,
2536 "stealing pipe %c power sequencer from active eDP port %c\n",
2537 pipe_name(pipe), port_name(port));
a4a5d2f8 2538
a4a5d2f8 2539 /* make sure vdd is off before we steal it */
83b84597 2540 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2541 }
2542}
2543
2544static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2545{
2546 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2547 struct intel_encoder *encoder = &intel_dig_port->base;
2548 struct drm_device *dev = encoder->base.dev;
2549 struct drm_i915_private *dev_priv = dev->dev_private;
2550 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2551
2552 lockdep_assert_held(&dev_priv->pps_mutex);
2553
093e3f13
VS
2554 if (!is_edp(intel_dp))
2555 return;
2556
a4a5d2f8
VS
2557 if (intel_dp->pps_pipe == crtc->pipe)
2558 return;
2559
2560 /*
2561 * If another power sequencer was being used on this
2562 * port previously make sure to turn off vdd there while
2563 * we still have control of it.
2564 */
2565 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2566 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2567
2568 /*
2569 * We may be stealing the power
2570 * sequencer from another port.
2571 */
2572 vlv_steal_power_sequencer(dev, crtc->pipe);
2573
2574 /* now it's all ours */
2575 intel_dp->pps_pipe = crtc->pipe;
2576
2577 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2578 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2579
2580 /* init power sequencer on this pipe and port */
36b5f425
VS
2581 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2582 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2583}
2584
ab1f90f9 2585static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2586{
2bd2ad64 2587 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2588 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2589 struct drm_device *dev = encoder->base.dev;
89b667f8 2590 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2591 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2592 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2593 int pipe = intel_crtc->pipe;
2594 u32 val;
a4fc5ed6 2595
ab1f90f9 2596 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2597
ab3c759a 2598 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2599 val = 0;
2600 if (pipe)
2601 val |= (1<<21);
2602 else
2603 val &= ~(1<<21);
2604 val |= 0x001000c4;
ab3c759a
CML
2605 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2606 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2607 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2608
ab1f90f9
JN
2609 mutex_unlock(&dev_priv->dpio_lock);
2610
2611 intel_enable_dp(encoder);
89b667f8
JB
2612}
2613
ecff4f3b 2614static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2615{
2616 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2617 struct drm_device *dev = encoder->base.dev;
2618 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2619 struct intel_crtc *intel_crtc =
2620 to_intel_crtc(encoder->base.crtc);
e4607fcf 2621 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2622 int pipe = intel_crtc->pipe;
89b667f8 2623
8ac33ed3
DV
2624 intel_dp_prepare(encoder);
2625
89b667f8 2626 /* Program Tx lane resets to default */
0980a60f 2627 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2628 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2629 DPIO_PCS_TX_LANE2_RESET |
2630 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2631 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2632 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2633 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2634 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2635 DPIO_PCS_CLK_SOFT_RESET);
2636
2637 /* Fix up inter-pair skew failure */
ab3c759a
CML
2638 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2639 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2640 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2641 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2642}
2643
e4a1d846
CML
2644static void chv_pre_enable_dp(struct intel_encoder *encoder)
2645{
2646 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2647 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2648 struct drm_device *dev = encoder->base.dev;
2649 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2650 struct intel_crtc *intel_crtc =
2651 to_intel_crtc(encoder->base.crtc);
2652 enum dpio_channel ch = vlv_dport_to_channel(dport);
2653 int pipe = intel_crtc->pipe;
2654 int data, i;
949c1d43 2655 u32 val;
e4a1d846 2656
e4a1d846 2657 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2658
570e2a74
VS
2659 /* allow hardware to manage TX FIFO reset source */
2660 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2661 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2662 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2663
2664 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2665 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2666 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2667
949c1d43 2668 /* Deassert soft data lane reset*/
97fd4d5c 2669 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2670 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2671 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2672
2673 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2674 val |= CHV_PCS_REQ_SOFTRESET_EN;
2675 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2676
2677 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2678 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2679 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2680
97fd4d5c 2681 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2682 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2683 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2684
2685 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2686 for (i = 0; i < 4; i++) {
2687 /* Set the latency optimal bit */
2688 data = (i == 1) ? 0x0 : 0x6;
2689 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2690 data << DPIO_FRC_LATENCY_SHFIT);
2691
2692 /* Set the upar bit */
2693 data = (i == 1) ? 0x0 : 0x1;
2694 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2695 data << DPIO_UPAR_SHIFT);
2696 }
2697
2698 /* Data lane stagger programming */
2699 /* FIXME: Fix up value only after power analysis */
2700
2701 mutex_unlock(&dev_priv->dpio_lock);
2702
e4a1d846 2703 intel_enable_dp(encoder);
e4a1d846
CML
2704}
2705
9197c88b
VS
2706static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2707{
2708 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2709 struct drm_device *dev = encoder->base.dev;
2710 struct drm_i915_private *dev_priv = dev->dev_private;
2711 struct intel_crtc *intel_crtc =
2712 to_intel_crtc(encoder->base.crtc);
2713 enum dpio_channel ch = vlv_dport_to_channel(dport);
2714 enum pipe pipe = intel_crtc->pipe;
2715 u32 val;
2716
625695f8
VS
2717 intel_dp_prepare(encoder);
2718
9197c88b
VS
2719 mutex_lock(&dev_priv->dpio_lock);
2720
b9e5ac3c
VS
2721 /* program left/right clock distribution */
2722 if (pipe != PIPE_B) {
2723 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2724 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2725 if (ch == DPIO_CH0)
2726 val |= CHV_BUFLEFTENA1_FORCE;
2727 if (ch == DPIO_CH1)
2728 val |= CHV_BUFRIGHTENA1_FORCE;
2729 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2730 } else {
2731 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2732 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2733 if (ch == DPIO_CH0)
2734 val |= CHV_BUFLEFTENA2_FORCE;
2735 if (ch == DPIO_CH1)
2736 val |= CHV_BUFRIGHTENA2_FORCE;
2737 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2738 }
2739
9197c88b
VS
2740 /* program clock channel usage */
2741 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2742 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2743 if (pipe != PIPE_B)
2744 val &= ~CHV_PCS_USEDCLKCHANNEL;
2745 else
2746 val |= CHV_PCS_USEDCLKCHANNEL;
2747 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2748
2749 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2750 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2751 if (pipe != PIPE_B)
2752 val &= ~CHV_PCS_USEDCLKCHANNEL;
2753 else
2754 val |= CHV_PCS_USEDCLKCHANNEL;
2755 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2756
2757 /*
2758 * This a a bit weird since generally CL
2759 * matches the pipe, but here we need to
2760 * pick the CL based on the port.
2761 */
2762 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2763 if (pipe != PIPE_B)
2764 val &= ~CHV_CMN_USEDCLKCHANNEL;
2765 else
2766 val |= CHV_CMN_USEDCLKCHANNEL;
2767 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2768
2769 mutex_unlock(&dev_priv->dpio_lock);
2770}
2771
a4fc5ed6 2772/*
df0c237d
JB
2773 * Native read with retry for link status and receiver capability reads for
2774 * cases where the sink may still be asleep.
9d1a1031
JN
2775 *
2776 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2777 * supposed to retry 3 times per the spec.
a4fc5ed6 2778 */
9d1a1031
JN
2779static ssize_t
2780intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2781 void *buffer, size_t size)
a4fc5ed6 2782{
9d1a1031
JN
2783 ssize_t ret;
2784 int i;
61da5fab 2785
f6a19066
VS
2786 /*
2787 * Sometime we just get the same incorrect byte repeated
2788 * over the entire buffer. Doing just one throw away read
2789 * initially seems to "solve" it.
2790 */
2791 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2792
61da5fab 2793 for (i = 0; i < 3; i++) {
9d1a1031
JN
2794 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2795 if (ret == size)
2796 return ret;
61da5fab
JB
2797 msleep(1);
2798 }
a4fc5ed6 2799
9d1a1031 2800 return ret;
a4fc5ed6
KP
2801}
2802
2803/*
2804 * Fetch AUX CH registers 0x202 - 0x207 which contain
2805 * link status information
2806 */
2807static bool
93f62dad 2808intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2809{
9d1a1031
JN
2810 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2811 DP_LANE0_1_STATUS,
2812 link_status,
2813 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2814}
2815
1100244e 2816/* These are source-specific values. */
a4fc5ed6 2817static uint8_t
1a2eb460 2818intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2819{
30add22d 2820 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2821 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2822 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2823
7ad14a29
SJ
2824 if (INTEL_INFO(dev)->gen >= 9) {
2825 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2826 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2827 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2828 } else if (IS_VALLEYVIEW(dev))
bd60018a 2829 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2830 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2831 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2832 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2833 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2834 else
bd60018a 2835 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2836}
2837
2838static uint8_t
2839intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2840{
30add22d 2841 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2842 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2843
5a9d1f1a
DL
2844 if (INTEL_INFO(dev)->gen >= 9) {
2845 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2846 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2847 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2848 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2849 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2850 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2851 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2853 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2854 default:
2855 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2856 }
2857 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2858 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2859 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2860 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2862 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2864 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2865 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2866 default:
bd60018a 2867 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2868 }
e2fa6fba
P
2869 } else if (IS_VALLEYVIEW(dev)) {
2870 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2872 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2873 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2874 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2876 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2878 default:
bd60018a 2879 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2880 }
bc7d38a4 2881 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2882 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2884 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2885 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2886 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2887 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2888 default:
bd60018a 2889 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2890 }
2891 } else {
2892 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2893 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2894 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2895 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2896 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2897 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2898 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2900 default:
bd60018a 2901 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2902 }
a4fc5ed6
KP
2903 }
2904}
2905
e2fa6fba
P
2906static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2907{
2908 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2909 struct drm_i915_private *dev_priv = dev->dev_private;
2910 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2911 struct intel_crtc *intel_crtc =
2912 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2913 unsigned long demph_reg_value, preemph_reg_value,
2914 uniqtranscale_reg_value;
2915 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2916 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2917 int pipe = intel_crtc->pipe;
e2fa6fba
P
2918
2919 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2920 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2921 preemph_reg_value = 0x0004000;
2922 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2924 demph_reg_value = 0x2B405555;
2925 uniqtranscale_reg_value = 0x552AB83A;
2926 break;
bd60018a 2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2928 demph_reg_value = 0x2B404040;
2929 uniqtranscale_reg_value = 0x5548B83A;
2930 break;
bd60018a 2931 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2932 demph_reg_value = 0x2B245555;
2933 uniqtranscale_reg_value = 0x5560B83A;
2934 break;
bd60018a 2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2936 demph_reg_value = 0x2B405555;
2937 uniqtranscale_reg_value = 0x5598DA3A;
2938 break;
2939 default:
2940 return 0;
2941 }
2942 break;
bd60018a 2943 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2944 preemph_reg_value = 0x0002000;
2945 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2946 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2947 demph_reg_value = 0x2B404040;
2948 uniqtranscale_reg_value = 0x5552B83A;
2949 break;
bd60018a 2950 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2951 demph_reg_value = 0x2B404848;
2952 uniqtranscale_reg_value = 0x5580B83A;
2953 break;
bd60018a 2954 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2955 demph_reg_value = 0x2B404040;
2956 uniqtranscale_reg_value = 0x55ADDA3A;
2957 break;
2958 default:
2959 return 0;
2960 }
2961 break;
bd60018a 2962 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
2963 preemph_reg_value = 0x0000000;
2964 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2965 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2966 demph_reg_value = 0x2B305555;
2967 uniqtranscale_reg_value = 0x5570B83A;
2968 break;
bd60018a 2969 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2970 demph_reg_value = 0x2B2B4040;
2971 uniqtranscale_reg_value = 0x55ADDA3A;
2972 break;
2973 default:
2974 return 0;
2975 }
2976 break;
bd60018a 2977 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
2978 preemph_reg_value = 0x0006000;
2979 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2981 demph_reg_value = 0x1B405555;
2982 uniqtranscale_reg_value = 0x55ADDA3A;
2983 break;
2984 default:
2985 return 0;
2986 }
2987 break;
2988 default:
2989 return 0;
2990 }
2991
0980a60f 2992 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
2993 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2994 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2995 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 2996 uniqtranscale_reg_value);
ab3c759a
CML
2997 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2998 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2999 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3000 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3001 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3002
3003 return 0;
3004}
3005
e4a1d846
CML
3006static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3007{
3008 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3009 struct drm_i915_private *dev_priv = dev->dev_private;
3010 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3011 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3012 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3013 uint8_t train_set = intel_dp->train_set[0];
3014 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3015 enum pipe pipe = intel_crtc->pipe;
3016 int i;
e4a1d846
CML
3017
3018 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3019 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3020 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3021 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3022 deemph_reg_value = 128;
3023 margin_reg_value = 52;
3024 break;
bd60018a 3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3026 deemph_reg_value = 128;
3027 margin_reg_value = 77;
3028 break;
bd60018a 3029 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3030 deemph_reg_value = 128;
3031 margin_reg_value = 102;
3032 break;
bd60018a 3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3034 deemph_reg_value = 128;
3035 margin_reg_value = 154;
3036 /* FIXME extra to set for 1200 */
3037 break;
3038 default:
3039 return 0;
3040 }
3041 break;
bd60018a 3042 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3043 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3045 deemph_reg_value = 85;
3046 margin_reg_value = 78;
3047 break;
bd60018a 3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3049 deemph_reg_value = 85;
3050 margin_reg_value = 116;
3051 break;
bd60018a 3052 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3053 deemph_reg_value = 85;
3054 margin_reg_value = 154;
3055 break;
3056 default:
3057 return 0;
3058 }
3059 break;
bd60018a 3060 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3061 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3062 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3063 deemph_reg_value = 64;
3064 margin_reg_value = 104;
3065 break;
bd60018a 3066 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3067 deemph_reg_value = 64;
3068 margin_reg_value = 154;
3069 break;
3070 default:
3071 return 0;
3072 }
3073 break;
bd60018a 3074 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3075 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3076 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3077 deemph_reg_value = 43;
3078 margin_reg_value = 154;
3079 break;
3080 default:
3081 return 0;
3082 }
3083 break;
3084 default:
3085 return 0;
3086 }
3087
3088 mutex_lock(&dev_priv->dpio_lock);
3089
3090 /* Clear calc init */
1966e59e
VS
3091 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3092 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3093 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3094 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3095 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3096
3097 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3098 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3099 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3100 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3101 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3102
a02ef3c7
VS
3103 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3104 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3105 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3106 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3107
3108 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3109 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3110 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3111 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3112
e4a1d846 3113 /* Program swing deemph */
f72df8db
VS
3114 for (i = 0; i < 4; i++) {
3115 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3116 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3117 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3118 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3119 }
e4a1d846
CML
3120
3121 /* Program swing margin */
f72df8db
VS
3122 for (i = 0; i < 4; i++) {
3123 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3124 val &= ~DPIO_SWING_MARGIN000_MASK;
3125 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3126 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3127 }
e4a1d846
CML
3128
3129 /* Disable unique transition scale */
f72df8db
VS
3130 for (i = 0; i < 4; i++) {
3131 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3132 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3133 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3134 }
e4a1d846
CML
3135
3136 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3137 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3138 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3139 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3140
3141 /*
3142 * The document said it needs to set bit 27 for ch0 and bit 26
3143 * for ch1. Might be a typo in the doc.
3144 * For now, for this unique transition scale selection, set bit
3145 * 27 for ch0 and ch1.
3146 */
f72df8db
VS
3147 for (i = 0; i < 4; i++) {
3148 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3149 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3150 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3151 }
e4a1d846 3152
f72df8db
VS
3153 for (i = 0; i < 4; i++) {
3154 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3155 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3156 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3157 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3158 }
e4a1d846
CML
3159 }
3160
3161 /* Start swing calculation */
1966e59e
VS
3162 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3163 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3164 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3165
3166 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3167 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3168 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3169
3170 /* LRC Bypass */
3171 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3172 val |= DPIO_LRC_BYPASS;
3173 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3174
3175 mutex_unlock(&dev_priv->dpio_lock);
3176
3177 return 0;
3178}
3179
a4fc5ed6 3180static void
0301b3ac
JN
3181intel_get_adjust_train(struct intel_dp *intel_dp,
3182 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3183{
3184 uint8_t v = 0;
3185 uint8_t p = 0;
3186 int lane;
1a2eb460
KP
3187 uint8_t voltage_max;
3188 uint8_t preemph_max;
a4fc5ed6 3189
33a34e4e 3190 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3191 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3192 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3193
3194 if (this_v > v)
3195 v = this_v;
3196 if (this_p > p)
3197 p = this_p;
3198 }
3199
1a2eb460 3200 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3201 if (v >= voltage_max)
3202 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3203
1a2eb460
KP
3204 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3205 if (p >= preemph_max)
3206 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3207
3208 for (lane = 0; lane < 4; lane++)
33a34e4e 3209 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3210}
3211
3212static uint32_t
f0a3424e 3213intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3214{
3cf2efb1 3215 uint32_t signal_levels = 0;
a4fc5ed6 3216
3cf2efb1 3217 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3218 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3219 default:
3220 signal_levels |= DP_VOLTAGE_0_4;
3221 break;
bd60018a 3222 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3223 signal_levels |= DP_VOLTAGE_0_6;
3224 break;
bd60018a 3225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3226 signal_levels |= DP_VOLTAGE_0_8;
3227 break;
bd60018a 3228 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3229 signal_levels |= DP_VOLTAGE_1_2;
3230 break;
3231 }
3cf2efb1 3232 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3233 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3234 default:
3235 signal_levels |= DP_PRE_EMPHASIS_0;
3236 break;
bd60018a 3237 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3238 signal_levels |= DP_PRE_EMPHASIS_3_5;
3239 break;
bd60018a 3240 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3241 signal_levels |= DP_PRE_EMPHASIS_6;
3242 break;
bd60018a 3243 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3244 signal_levels |= DP_PRE_EMPHASIS_9_5;
3245 break;
3246 }
3247 return signal_levels;
3248}
3249
e3421a18
ZW
3250/* Gen6's DP voltage swing and pre-emphasis control */
3251static uint32_t
3252intel_gen6_edp_signal_levels(uint8_t train_set)
3253{
3c5a62b5
YL
3254 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3255 DP_TRAIN_PRE_EMPHASIS_MASK);
3256 switch (signal_levels) {
bd60018a
SJ
3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3259 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3260 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3261 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3263 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3264 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3267 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3270 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3271 default:
3c5a62b5
YL
3272 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3273 "0x%x\n", signal_levels);
3274 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3275 }
3276}
3277
1a2eb460
KP
3278/* Gen7's DP voltage swing and pre-emphasis control */
3279static uint32_t
3280intel_gen7_edp_signal_levels(uint8_t train_set)
3281{
3282 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3283 DP_TRAIN_PRE_EMPHASIS_MASK);
3284 switch (signal_levels) {
bd60018a 3285 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3286 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3288 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3290 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3291
bd60018a 3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3293 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3295 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3296
bd60018a 3297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3298 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3300 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3301
3302 default:
3303 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3304 "0x%x\n", signal_levels);
3305 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3306 }
3307}
3308
d6c0d722
PZ
3309/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3310static uint32_t
f0a3424e 3311intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3312{
d6c0d722
PZ
3313 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3314 DP_TRAIN_PRE_EMPHASIS_MASK);
3315 switch (signal_levels) {
bd60018a 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3317 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3319 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3321 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3323 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3324
bd60018a 3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3326 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3327 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3328 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3330 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3331
bd60018a 3332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3333 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3335 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3336
3337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3338 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3339 default:
3340 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3341 "0x%x\n", signal_levels);
c5fe6a06 3342 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3343 }
a4fc5ed6
KP
3344}
3345
f0a3424e
PZ
3346/* Properly updates "DP" with the correct signal levels. */
3347static void
3348intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3349{
3350 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3351 enum port port = intel_dig_port->port;
f0a3424e
PZ
3352 struct drm_device *dev = intel_dig_port->base.base.dev;
3353 uint32_t signal_levels, mask;
3354 uint8_t train_set = intel_dp->train_set[0];
3355
5a9d1f1a 3356 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3357 signal_levels = intel_hsw_signal_levels(train_set);
3358 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3359 } else if (IS_CHERRYVIEW(dev)) {
3360 signal_levels = intel_chv_signal_levels(intel_dp);
3361 mask = 0;
e2fa6fba
P
3362 } else if (IS_VALLEYVIEW(dev)) {
3363 signal_levels = intel_vlv_signal_levels(intel_dp);
3364 mask = 0;
bc7d38a4 3365 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3366 signal_levels = intel_gen7_edp_signal_levels(train_set);
3367 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3368 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3369 signal_levels = intel_gen6_edp_signal_levels(train_set);
3370 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3371 } else {
3372 signal_levels = intel_gen4_signal_levels(train_set);
3373 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3374 }
3375
3376 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3377
3378 *DP = (*DP & ~mask) | signal_levels;
3379}
3380
a4fc5ed6 3381static bool
ea5b213a 3382intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3383 uint32_t *DP,
58e10eb9 3384 uint8_t dp_train_pat)
a4fc5ed6 3385{
174edf1f
PZ
3386 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3387 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3388 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3389 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3390 int ret, len;
a4fc5ed6 3391
7b13b58a 3392 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3393
70aff66c 3394 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3395 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3396
2cdfe6c8
JN
3397 buf[0] = dp_train_pat;
3398 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3399 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3400 /* don't write DP_TRAINING_LANEx_SET on disable */
3401 len = 1;
3402 } else {
3403 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3404 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3405 len = intel_dp->lane_count + 1;
47ea7542 3406 }
a4fc5ed6 3407
9d1a1031
JN
3408 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3409 buf, len);
2cdfe6c8
JN
3410
3411 return ret == len;
a4fc5ed6
KP
3412}
3413
70aff66c
JN
3414static bool
3415intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3416 uint8_t dp_train_pat)
3417{
953d22e8 3418 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3419 intel_dp_set_signal_levels(intel_dp, DP);
3420 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3421}
3422
3423static bool
3424intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3425 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3426{
3427 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3428 struct drm_device *dev = intel_dig_port->base.base.dev;
3429 struct drm_i915_private *dev_priv = dev->dev_private;
3430 int ret;
3431
3432 intel_get_adjust_train(intel_dp, link_status);
3433 intel_dp_set_signal_levels(intel_dp, DP);
3434
3435 I915_WRITE(intel_dp->output_reg, *DP);
3436 POSTING_READ(intel_dp->output_reg);
3437
9d1a1031
JN
3438 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3439 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3440
3441 return ret == intel_dp->lane_count;
3442}
3443
3ab9c637
ID
3444static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3445{
3446 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3447 struct drm_device *dev = intel_dig_port->base.base.dev;
3448 struct drm_i915_private *dev_priv = dev->dev_private;
3449 enum port port = intel_dig_port->port;
3450 uint32_t val;
3451
3452 if (!HAS_DDI(dev))
3453 return;
3454
3455 val = I915_READ(DP_TP_CTL(port));
3456 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3457 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3458 I915_WRITE(DP_TP_CTL(port), val);
3459
3460 /*
3461 * On PORT_A we can have only eDP in SST mode. There the only reason
3462 * we need to set idle transmission mode is to work around a HW issue
3463 * where we enable the pipe while not in idle link-training mode.
3464 * In this case there is requirement to wait for a minimum number of
3465 * idle patterns to be sent.
3466 */
3467 if (port == PORT_A)
3468 return;
3469
3470 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3471 1))
3472 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3473}
3474
33a34e4e 3475/* Enable corresponding port and start training pattern 1 */
c19b0669 3476void
33a34e4e 3477intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3478{
da63a9f2 3479 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3480 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3481 int i;
3482 uint8_t voltage;
cdb0e95b 3483 int voltage_tries, loop_tries;
ea5b213a 3484 uint32_t DP = intel_dp->DP;
6aba5b6c 3485 uint8_t link_config[2];
a4fc5ed6 3486
affa9354 3487 if (HAS_DDI(dev))
c19b0669
PZ
3488 intel_ddi_prepare_link_retrain(encoder);
3489
3cf2efb1 3490 /* Write the link configuration data */
6aba5b6c
JN
3491 link_config[0] = intel_dp->link_bw;
3492 link_config[1] = intel_dp->lane_count;
3493 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3494 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3495 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
bc27b7d3 3496 if (intel_dp->num_supported_rates)
a8f3ef61
SJ
3497 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3498 &intel_dp->rate_select, 1);
6aba5b6c
JN
3499
3500 link_config[0] = 0;
3501 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3502 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3503
3504 DP |= DP_PORT_EN;
1a2eb460 3505
70aff66c
JN
3506 /* clock recovery */
3507 if (!intel_dp_reset_link_train(intel_dp, &DP,
3508 DP_TRAINING_PATTERN_1 |
3509 DP_LINK_SCRAMBLING_DISABLE)) {
3510 DRM_ERROR("failed to enable link training\n");
3511 return;
3512 }
3513
a4fc5ed6 3514 voltage = 0xff;
cdb0e95b
KP
3515 voltage_tries = 0;
3516 loop_tries = 0;
a4fc5ed6 3517 for (;;) {
70aff66c 3518 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3519
a7c9655f 3520 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3521 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3522 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3523 break;
93f62dad 3524 }
a4fc5ed6 3525
01916270 3526 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3527 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3528 break;
3529 }
3530
3531 /* Check to see if we've tried the max voltage */
3532 for (i = 0; i < intel_dp->lane_count; i++)
3533 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3534 break;
3b4f819d 3535 if (i == intel_dp->lane_count) {
b06fbda3
DV
3536 ++loop_tries;
3537 if (loop_tries == 5) {
3def84b3 3538 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3539 break;
3540 }
70aff66c
JN
3541 intel_dp_reset_link_train(intel_dp, &DP,
3542 DP_TRAINING_PATTERN_1 |
3543 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3544 voltage_tries = 0;
3545 continue;
3546 }
a4fc5ed6 3547
3cf2efb1 3548 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3549 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3550 ++voltage_tries;
b06fbda3 3551 if (voltage_tries == 5) {
3def84b3 3552 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3553 break;
3554 }
3555 } else
3556 voltage_tries = 0;
3557 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3558
70aff66c
JN
3559 /* Update training set as requested by target */
3560 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3561 DRM_ERROR("failed to update link training\n");
3562 break;
3563 }
a4fc5ed6
KP
3564 }
3565
33a34e4e
JB
3566 intel_dp->DP = DP;
3567}
3568
c19b0669 3569void
33a34e4e
JB
3570intel_dp_complete_link_train(struct intel_dp *intel_dp)
3571{
33a34e4e 3572 bool channel_eq = false;
37f80975 3573 int tries, cr_tries;
33a34e4e 3574 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3575 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3576
3577 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3578 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3579 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3580
a4fc5ed6 3581 /* channel equalization */
70aff66c 3582 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3583 training_pattern |
70aff66c
JN
3584 DP_LINK_SCRAMBLING_DISABLE)) {
3585 DRM_ERROR("failed to start channel equalization\n");
3586 return;
3587 }
3588
a4fc5ed6 3589 tries = 0;
37f80975 3590 cr_tries = 0;
a4fc5ed6
KP
3591 channel_eq = false;
3592 for (;;) {
70aff66c 3593 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3594
37f80975
JB
3595 if (cr_tries > 5) {
3596 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3597 break;
3598 }
3599
a7c9655f 3600 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3601 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3602 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3603 break;
70aff66c 3604 }
a4fc5ed6 3605
37f80975 3606 /* Make sure clock is still ok */
01916270 3607 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3608 intel_dp_start_link_train(intel_dp);
70aff66c 3609 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3610 training_pattern |
70aff66c 3611 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3612 cr_tries++;
3613 continue;
3614 }
3615
1ffdff13 3616 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3617 channel_eq = true;
3618 break;
3619 }
a4fc5ed6 3620
37f80975
JB
3621 /* Try 5 times, then try clock recovery if that fails */
3622 if (tries > 5) {
37f80975 3623 intel_dp_start_link_train(intel_dp);
70aff66c 3624 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3625 training_pattern |
70aff66c 3626 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3627 tries = 0;
3628 cr_tries++;
3629 continue;
3630 }
a4fc5ed6 3631
70aff66c
JN
3632 /* Update training set as requested by target */
3633 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3634 DRM_ERROR("failed to update link training\n");
3635 break;
3636 }
3cf2efb1 3637 ++tries;
869184a6 3638 }
3cf2efb1 3639
3ab9c637
ID
3640 intel_dp_set_idle_link_train(intel_dp);
3641
3642 intel_dp->DP = DP;
3643
d6c0d722 3644 if (channel_eq)
07f42258 3645 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3646
3ab9c637
ID
3647}
3648
3649void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3650{
70aff66c 3651 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3652 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3653}
3654
3655static void
ea5b213a 3656intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3657{
da63a9f2 3658 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3659 enum port port = intel_dig_port->port;
da63a9f2 3660 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3661 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3662 uint32_t DP = intel_dp->DP;
a4fc5ed6 3663
bc76e320 3664 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3665 return;
3666
0c33d8d7 3667 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3668 return;
3669
28c97730 3670 DRM_DEBUG_KMS("\n");
32f9d658 3671
bc7d38a4 3672 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3673 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3674 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3675 } else {
aad3d14d
VS
3676 if (IS_CHERRYVIEW(dev))
3677 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3678 else
3679 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3680 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3681 }
fe255d00 3682 POSTING_READ(intel_dp->output_reg);
5eb08b69 3683
493a7081 3684 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3685 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3686 /* Hardware workaround: leaving our transcoder select
3687 * set to transcoder B while it's off will prevent the
3688 * corresponding HDMI output on transcoder A.
3689 *
3690 * Combine this with another hardware workaround:
3691 * transcoder select bit can only be cleared while the
3692 * port is enabled.
3693 */
3694 DP &= ~DP_PIPEB_SELECT;
3695 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3696 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3697 }
3698
832afda6 3699 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3700 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3701 POSTING_READ(intel_dp->output_reg);
f01eca2e 3702 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3703}
3704
26d61aad
KP
3705static bool
3706intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3707{
a031d709
RV
3708 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3709 struct drm_device *dev = dig_port->base.base.dev;
3710 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3711 uint8_t rev;
a031d709 3712
9d1a1031
JN
3713 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3714 sizeof(intel_dp->dpcd)) < 0)
edb39244 3715 return false; /* aux transfer failed */
92fd8fd1 3716
a8e98153 3717 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3718
edb39244
AJ
3719 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3720 return false; /* DPCD not present */
3721
2293bb5c
SK
3722 /* Check if the panel supports PSR */
3723 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3724 if (is_edp(intel_dp)) {
9d1a1031
JN
3725 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3726 intel_dp->psr_dpcd,
3727 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3728 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3729 dev_priv->psr.sink_support = true;
50003939 3730 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3731 }
50003939
JN
3732 }
3733
7809a611 3734 /* Training Pattern 3 support, both source and sink */
06ea66b6 3735 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3736 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3737 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3738 intel_dp->use_tps3 = true;
f8d8a672 3739 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3740 } else
3741 intel_dp->use_tps3 = false;
3742
fc0f8e25
SJ
3743 /* Intermediate frequency support */
3744 if (is_edp(intel_dp) &&
3745 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3746 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3747 (rev >= 0x03)) { /* eDp v1.4 or higher */
ea2d8a42
VS
3748 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3749 int i;
3750
fc0f8e25
SJ
3751 intel_dp_dpcd_read_wake(&intel_dp->aux,
3752 DP_SUPPORTED_LINK_RATES,
ea2d8a42
VS
3753 supported_rates,
3754 sizeof(supported_rates));
3755
3756 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3757 int val = le16_to_cpu(supported_rates[i]);
3758
3759 if (val == 0)
3760 break;
3761
3762 intel_dp->supported_rates[i] = val * 200;
3763 }
3764 intel_dp->num_supported_rates = i;
fc0f8e25 3765 }
edb39244
AJ
3766 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3767 DP_DWN_STRM_PORT_PRESENT))
3768 return true; /* native DP sink */
3769
3770 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3771 return true; /* no per-port downstream info */
3772
9d1a1031
JN
3773 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3774 intel_dp->downstream_ports,
3775 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3776 return false; /* downstream port status fetch failed */
3777
3778 return true;
92fd8fd1
KP
3779}
3780
0d198328
AJ
3781static void
3782intel_dp_probe_oui(struct intel_dp *intel_dp)
3783{
3784 u8 buf[3];
3785
3786 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3787 return;
3788
9d1a1031 3789 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3790 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3791 buf[0], buf[1], buf[2]);
3792
9d1a1031 3793 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3794 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3795 buf[0], buf[1], buf[2]);
3796}
3797
0e32b39c
DA
3798static bool
3799intel_dp_probe_mst(struct intel_dp *intel_dp)
3800{
3801 u8 buf[1];
3802
3803 if (!intel_dp->can_mst)
3804 return false;
3805
3806 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3807 return false;
3808
0e32b39c
DA
3809 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3810 if (buf[0] & DP_MST_CAP) {
3811 DRM_DEBUG_KMS("Sink is MST capable\n");
3812 intel_dp->is_mst = true;
3813 } else {
3814 DRM_DEBUG_KMS("Sink is not MST capable\n");
3815 intel_dp->is_mst = false;
3816 }
3817 }
0e32b39c
DA
3818
3819 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3820 return intel_dp->is_mst;
3821}
3822
d2e216d0
RV
3823int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3824{
3825 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3826 struct drm_device *dev = intel_dig_port->base.base.dev;
3827 struct intel_crtc *intel_crtc =
3828 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3829 u8 buf;
3830 int test_crc_count;
3831 int attempts = 6;
d2e216d0 3832
ad9dc91b 3833 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3834 return -EIO;
d2e216d0 3835
ad9dc91b 3836 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3837 return -ENOTTY;
3838
1dda5f93
RV
3839 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3840 return -EIO;
3841
9d1a1031 3842 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3843 buf | DP_TEST_SINK_START) < 0)
bda0381e 3844 return -EIO;
d2e216d0 3845
1dda5f93 3846 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3847 return -EIO;
ad9dc91b 3848 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3849
ad9dc91b 3850 do {
1dda5f93
RV
3851 if (drm_dp_dpcd_readb(&intel_dp->aux,
3852 DP_TEST_SINK_MISC, &buf) < 0)
3853 return -EIO;
ad9dc91b
RV
3854 intel_wait_for_vblank(dev, intel_crtc->pipe);
3855 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3856
3857 if (attempts == 0) {
90bd1f46
DV
3858 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3859 return -ETIMEDOUT;
ad9dc91b 3860 }
d2e216d0 3861
9d1a1031 3862 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3863 return -EIO;
d2e216d0 3864
1dda5f93
RV
3865 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3866 return -EIO;
3867 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3868 buf & ~DP_TEST_SINK_START) < 0)
3869 return -EIO;
ce31d9f4 3870
d2e216d0
RV
3871 return 0;
3872}
3873
a60f0e38
JB
3874static bool
3875intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3876{
9d1a1031
JN
3877 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3878 DP_DEVICE_SERVICE_IRQ_VECTOR,
3879 sink_irq_vector, 1) == 1;
a60f0e38
JB
3880}
3881
0e32b39c
DA
3882static bool
3883intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3884{
3885 int ret;
3886
3887 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3888 DP_SINK_COUNT_ESI,
3889 sink_irq_vector, 14);
3890 if (ret != 14)
3891 return false;
3892
3893 return true;
3894}
3895
a60f0e38
JB
3896static void
3897intel_dp_handle_test_request(struct intel_dp *intel_dp)
3898{
3899 /* NAK by default */
9d1a1031 3900 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3901}
3902
0e32b39c
DA
3903static int
3904intel_dp_check_mst_status(struct intel_dp *intel_dp)
3905{
3906 bool bret;
3907
3908 if (intel_dp->is_mst) {
3909 u8 esi[16] = { 0 };
3910 int ret = 0;
3911 int retry;
3912 bool handled;
3913 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3914go_again:
3915 if (bret == true) {
3916
3917 /* check link status - esi[10] = 0x200c */
3918 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3919 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3920 intel_dp_start_link_train(intel_dp);
3921 intel_dp_complete_link_train(intel_dp);
3922 intel_dp_stop_link_train(intel_dp);
3923 }
3924
6f34cc39 3925 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3926 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3927
3928 if (handled) {
3929 for (retry = 0; retry < 3; retry++) {
3930 int wret;
3931 wret = drm_dp_dpcd_write(&intel_dp->aux,
3932 DP_SINK_COUNT_ESI+1,
3933 &esi[1], 3);
3934 if (wret == 3) {
3935 break;
3936 }
3937 }
3938
3939 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3940 if (bret == true) {
6f34cc39 3941 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3942 goto go_again;
3943 }
3944 } else
3945 ret = 0;
3946
3947 return ret;
3948 } else {
3949 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3950 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3951 intel_dp->is_mst = false;
3952 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3953 /* send a hotplug event */
3954 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3955 }
3956 }
3957 return -EINVAL;
3958}
3959
a4fc5ed6
KP
3960/*
3961 * According to DP spec
3962 * 5.1.2:
3963 * 1. Read DPCD
3964 * 2. Configure link according to Receiver Capabilities
3965 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3966 * 4. Check link status on receipt of hot-plug interrupt
3967 */
a5146200 3968static void
ea5b213a 3969intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 3970{
5b215bcf 3971 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 3972 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 3973 u8 sink_irq_vector;
93f62dad 3974 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 3975
5b215bcf
DA
3976 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3977
da63a9f2 3978 if (!intel_encoder->connectors_active)
d2b996ac 3979 return;
59cd09e1 3980
da63a9f2 3981 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
3982 return;
3983
1a125d8a
ID
3984 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3985 return;
3986
92fd8fd1 3987 /* Try to read receiver status if the link appears to be up */
93f62dad 3988 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
3989 return;
3990 }
3991
92fd8fd1 3992 /* Now read the DPCD to see if it's actually running */
26d61aad 3993 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
3994 return;
3995 }
3996
a60f0e38
JB
3997 /* Try to read the source of the interrupt */
3998 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3999 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4000 /* Clear interrupt source */
9d1a1031
JN
4001 drm_dp_dpcd_writeb(&intel_dp->aux,
4002 DP_DEVICE_SERVICE_IRQ_VECTOR,
4003 sink_irq_vector);
a60f0e38
JB
4004
4005 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4006 intel_dp_handle_test_request(intel_dp);
4007 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4008 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4009 }
4010
1ffdff13 4011 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4012 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4013 intel_encoder->base.name);
33a34e4e
JB
4014 intel_dp_start_link_train(intel_dp);
4015 intel_dp_complete_link_train(intel_dp);
3ab9c637 4016 intel_dp_stop_link_train(intel_dp);
33a34e4e 4017 }
a4fc5ed6 4018}
a4fc5ed6 4019
caf9ab24 4020/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4021static enum drm_connector_status
26d61aad 4022intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4023{
caf9ab24 4024 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4025 uint8_t type;
4026
4027 if (!intel_dp_get_dpcd(intel_dp))
4028 return connector_status_disconnected;
4029
4030 /* if there's no downstream port, we're done */
4031 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4032 return connector_status_connected;
caf9ab24
AJ
4033
4034 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4035 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4036 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4037 uint8_t reg;
9d1a1031
JN
4038
4039 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4040 &reg, 1) < 0)
caf9ab24 4041 return connector_status_unknown;
9d1a1031 4042
23235177
AJ
4043 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4044 : connector_status_disconnected;
caf9ab24
AJ
4045 }
4046
4047 /* If no HPD, poke DDC gently */
0b99836f 4048 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4049 return connector_status_connected;
caf9ab24
AJ
4050
4051 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4052 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4053 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4054 if (type == DP_DS_PORT_TYPE_VGA ||
4055 type == DP_DS_PORT_TYPE_NON_EDID)
4056 return connector_status_unknown;
4057 } else {
4058 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4059 DP_DWN_STRM_PORT_TYPE_MASK;
4060 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4061 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4062 return connector_status_unknown;
4063 }
caf9ab24
AJ
4064
4065 /* Anything else is out of spec, warn and ignore */
4066 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4067 return connector_status_disconnected;
71ba9000
AJ
4068}
4069
d410b56d
CW
4070static enum drm_connector_status
4071edp_detect(struct intel_dp *intel_dp)
4072{
4073 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4074 enum drm_connector_status status;
4075
4076 status = intel_panel_detect(dev);
4077 if (status == connector_status_unknown)
4078 status = connector_status_connected;
4079
4080 return status;
4081}
4082
5eb08b69 4083static enum drm_connector_status
a9756bb5 4084ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4085{
30add22d 4086 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4087 struct drm_i915_private *dev_priv = dev->dev_private;
4088 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4089
1b469639
DL
4090 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4091 return connector_status_disconnected;
4092
26d61aad 4093 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4094}
4095
2a592bec
DA
4096static int g4x_digital_port_connected(struct drm_device *dev,
4097 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4098{
a4fc5ed6 4099 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4100 uint32_t bit;
5eb08b69 4101
232a6ee9
TP
4102 if (IS_VALLEYVIEW(dev)) {
4103 switch (intel_dig_port->port) {
4104 case PORT_B:
4105 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4106 break;
4107 case PORT_C:
4108 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4109 break;
4110 case PORT_D:
4111 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4112 break;
4113 default:
2a592bec 4114 return -EINVAL;
232a6ee9
TP
4115 }
4116 } else {
4117 switch (intel_dig_port->port) {
4118 case PORT_B:
4119 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4120 break;
4121 case PORT_C:
4122 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4123 break;
4124 case PORT_D:
4125 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4126 break;
4127 default:
2a592bec 4128 return -EINVAL;
232a6ee9 4129 }
a4fc5ed6
KP
4130 }
4131
10f76a38 4132 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4133 return 0;
4134 return 1;
4135}
4136
4137static enum drm_connector_status
4138g4x_dp_detect(struct intel_dp *intel_dp)
4139{
4140 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4141 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4142 int ret;
4143
4144 /* Can't disconnect eDP, but you can close the lid... */
4145 if (is_edp(intel_dp)) {
4146 enum drm_connector_status status;
4147
4148 status = intel_panel_detect(dev);
4149 if (status == connector_status_unknown)
4150 status = connector_status_connected;
4151 return status;
4152 }
4153
4154 ret = g4x_digital_port_connected(dev, intel_dig_port);
4155 if (ret == -EINVAL)
4156 return connector_status_unknown;
4157 else if (ret == 0)
a4fc5ed6
KP
4158 return connector_status_disconnected;
4159
26d61aad 4160 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4161}
4162
8c241fef 4163static struct edid *
beb60608 4164intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4165{
beb60608 4166 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4167
9cd300e0
JN
4168 /* use cached edid if we have one */
4169 if (intel_connector->edid) {
9cd300e0
JN
4170 /* invalid edid */
4171 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4172 return NULL;
4173
55e9edeb 4174 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4175 } else
4176 return drm_get_edid(&intel_connector->base,
4177 &intel_dp->aux.ddc);
4178}
8c241fef 4179
beb60608
CW
4180static void
4181intel_dp_set_edid(struct intel_dp *intel_dp)
4182{
4183 struct intel_connector *intel_connector = intel_dp->attached_connector;
4184 struct edid *edid;
8c241fef 4185
beb60608
CW
4186 edid = intel_dp_get_edid(intel_dp);
4187 intel_connector->detect_edid = edid;
4188
4189 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4190 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4191 else
4192 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4193}
4194
beb60608
CW
4195static void
4196intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4197{
beb60608 4198 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4199
beb60608
CW
4200 kfree(intel_connector->detect_edid);
4201 intel_connector->detect_edid = NULL;
9cd300e0 4202
beb60608
CW
4203 intel_dp->has_audio = false;
4204}
d6f24d0f 4205
beb60608
CW
4206static enum intel_display_power_domain
4207intel_dp_power_get(struct intel_dp *dp)
4208{
4209 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4210 enum intel_display_power_domain power_domain;
4211
4212 power_domain = intel_display_port_power_domain(encoder);
4213 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4214
4215 return power_domain;
4216}
d6f24d0f 4217
beb60608
CW
4218static void
4219intel_dp_power_put(struct intel_dp *dp,
4220 enum intel_display_power_domain power_domain)
4221{
4222 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4223 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4224}
4225
a9756bb5
ZW
4226static enum drm_connector_status
4227intel_dp_detect(struct drm_connector *connector, bool force)
4228{
4229 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4230 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4231 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4232 struct drm_device *dev = connector->dev;
a9756bb5 4233 enum drm_connector_status status;
671dedd2 4234 enum intel_display_power_domain power_domain;
0e32b39c 4235 bool ret;
a9756bb5 4236
164c8598 4237 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4238 connector->base.id, connector->name);
beb60608 4239 intel_dp_unset_edid(intel_dp);
164c8598 4240
0e32b39c
DA
4241 if (intel_dp->is_mst) {
4242 /* MST devices are disconnected from a monitor POV */
4243 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4244 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4245 return connector_status_disconnected;
0e32b39c
DA
4246 }
4247
beb60608 4248 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4249
d410b56d
CW
4250 /* Can't disconnect eDP, but you can close the lid... */
4251 if (is_edp(intel_dp))
4252 status = edp_detect(intel_dp);
4253 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4254 status = ironlake_dp_detect(intel_dp);
4255 else
4256 status = g4x_dp_detect(intel_dp);
4257 if (status != connector_status_connected)
c8c8fb33 4258 goto out;
a9756bb5 4259
0d198328
AJ
4260 intel_dp_probe_oui(intel_dp);
4261
0e32b39c
DA
4262 ret = intel_dp_probe_mst(intel_dp);
4263 if (ret) {
4264 /* if we are in MST mode then this connector
4265 won't appear connected or have anything with EDID on it */
4266 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4267 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4268 status = connector_status_disconnected;
4269 goto out;
4270 }
4271
beb60608 4272 intel_dp_set_edid(intel_dp);
a9756bb5 4273
d63885da
PZ
4274 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4275 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4276 status = connector_status_connected;
4277
4278out:
beb60608 4279 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4280 return status;
a4fc5ed6
KP
4281}
4282
beb60608
CW
4283static void
4284intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4285{
df0e9248 4286 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4287 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4288 enum intel_display_power_domain power_domain;
a4fc5ed6 4289
beb60608
CW
4290 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4291 connector->base.id, connector->name);
4292 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4293
beb60608
CW
4294 if (connector->status != connector_status_connected)
4295 return;
671dedd2 4296
beb60608
CW
4297 power_domain = intel_dp_power_get(intel_dp);
4298
4299 intel_dp_set_edid(intel_dp);
4300
4301 intel_dp_power_put(intel_dp, power_domain);
4302
4303 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4304 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4305}
4306
4307static int intel_dp_get_modes(struct drm_connector *connector)
4308{
4309 struct intel_connector *intel_connector = to_intel_connector(connector);
4310 struct edid *edid;
4311
4312 edid = intel_connector->detect_edid;
4313 if (edid) {
4314 int ret = intel_connector_update_modes(connector, edid);
4315 if (ret)
4316 return ret;
4317 }
32f9d658 4318
f8779fda 4319 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4320 if (is_edp(intel_attached_dp(connector)) &&
4321 intel_connector->panel.fixed_mode) {
f8779fda 4322 struct drm_display_mode *mode;
beb60608
CW
4323
4324 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4325 intel_connector->panel.fixed_mode);
f8779fda 4326 if (mode) {
32f9d658
ZW
4327 drm_mode_probed_add(connector, mode);
4328 return 1;
4329 }
4330 }
beb60608 4331
32f9d658 4332 return 0;
a4fc5ed6
KP
4333}
4334
1aad7ac0
CW
4335static bool
4336intel_dp_detect_audio(struct drm_connector *connector)
4337{
1aad7ac0 4338 bool has_audio = false;
beb60608 4339 struct edid *edid;
1aad7ac0 4340
beb60608
CW
4341 edid = to_intel_connector(connector)->detect_edid;
4342 if (edid)
1aad7ac0 4343 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4344
1aad7ac0
CW
4345 return has_audio;
4346}
4347
f684960e
CW
4348static int
4349intel_dp_set_property(struct drm_connector *connector,
4350 struct drm_property *property,
4351 uint64_t val)
4352{
e953fd7b 4353 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4354 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4355 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4356 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4357 int ret;
4358
662595df 4359 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4360 if (ret)
4361 return ret;
4362
3f43c48d 4363 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4364 int i = val;
4365 bool has_audio;
4366
4367 if (i == intel_dp->force_audio)
f684960e
CW
4368 return 0;
4369
1aad7ac0 4370 intel_dp->force_audio = i;
f684960e 4371
c3e5f67b 4372 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4373 has_audio = intel_dp_detect_audio(connector);
4374 else
c3e5f67b 4375 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4376
4377 if (has_audio == intel_dp->has_audio)
f684960e
CW
4378 return 0;
4379
1aad7ac0 4380 intel_dp->has_audio = has_audio;
f684960e
CW
4381 goto done;
4382 }
4383
e953fd7b 4384 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4385 bool old_auto = intel_dp->color_range_auto;
4386 uint32_t old_range = intel_dp->color_range;
4387
55bc60db
VS
4388 switch (val) {
4389 case INTEL_BROADCAST_RGB_AUTO:
4390 intel_dp->color_range_auto = true;
4391 break;
4392 case INTEL_BROADCAST_RGB_FULL:
4393 intel_dp->color_range_auto = false;
4394 intel_dp->color_range = 0;
4395 break;
4396 case INTEL_BROADCAST_RGB_LIMITED:
4397 intel_dp->color_range_auto = false;
4398 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4399 break;
4400 default:
4401 return -EINVAL;
4402 }
ae4edb80
DV
4403
4404 if (old_auto == intel_dp->color_range_auto &&
4405 old_range == intel_dp->color_range)
4406 return 0;
4407
e953fd7b
CW
4408 goto done;
4409 }
4410
53b41837
YN
4411 if (is_edp(intel_dp) &&
4412 property == connector->dev->mode_config.scaling_mode_property) {
4413 if (val == DRM_MODE_SCALE_NONE) {
4414 DRM_DEBUG_KMS("no scaling not supported\n");
4415 return -EINVAL;
4416 }
4417
4418 if (intel_connector->panel.fitting_mode == val) {
4419 /* the eDP scaling property is not changed */
4420 return 0;
4421 }
4422 intel_connector->panel.fitting_mode = val;
4423
4424 goto done;
4425 }
4426
f684960e
CW
4427 return -EINVAL;
4428
4429done:
c0c36b94
CW
4430 if (intel_encoder->base.crtc)
4431 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4432
4433 return 0;
4434}
4435
a4fc5ed6 4436static void
73845adf 4437intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4438{
1d508706 4439 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4440
10e972d3 4441 kfree(intel_connector->detect_edid);
beb60608 4442
9cd300e0
JN
4443 if (!IS_ERR_OR_NULL(intel_connector->edid))
4444 kfree(intel_connector->edid);
4445
acd8db10
PZ
4446 /* Can't call is_edp() since the encoder may have been destroyed
4447 * already. */
4448 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4449 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4450
a4fc5ed6 4451 drm_connector_cleanup(connector);
55f78c43 4452 kfree(connector);
a4fc5ed6
KP
4453}
4454
00c09d70 4455void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4456{
da63a9f2
PZ
4457 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4458 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4459
4f71d0cb 4460 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4461 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4462 if (is_edp(intel_dp)) {
4463 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4464 /*
4465 * vdd might still be enabled do to the delayed vdd off.
4466 * Make sure vdd is actually turned off here.
4467 */
773538e8 4468 pps_lock(intel_dp);
4be73780 4469 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4470 pps_unlock(intel_dp);
4471
01527b31
CT
4472 if (intel_dp->edp_notifier.notifier_call) {
4473 unregister_reboot_notifier(&intel_dp->edp_notifier);
4474 intel_dp->edp_notifier.notifier_call = NULL;
4475 }
bd943159 4476 }
c8bd0e49 4477 drm_encoder_cleanup(encoder);
da63a9f2 4478 kfree(intel_dig_port);
24d05927
DV
4479}
4480
07f9cd0b
ID
4481static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4482{
4483 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4484
4485 if (!is_edp(intel_dp))
4486 return;
4487
951468f3
VS
4488 /*
4489 * vdd might still be enabled do to the delayed vdd off.
4490 * Make sure vdd is actually turned off here.
4491 */
afa4e53a 4492 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4493 pps_lock(intel_dp);
07f9cd0b 4494 edp_panel_vdd_off_sync(intel_dp);
773538e8 4495 pps_unlock(intel_dp);
07f9cd0b
ID
4496}
4497
49e6bc51
VS
4498static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4499{
4500 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4501 struct drm_device *dev = intel_dig_port->base.base.dev;
4502 struct drm_i915_private *dev_priv = dev->dev_private;
4503 enum intel_display_power_domain power_domain;
4504
4505 lockdep_assert_held(&dev_priv->pps_mutex);
4506
4507 if (!edp_have_panel_vdd(intel_dp))
4508 return;
4509
4510 /*
4511 * The VDD bit needs a power domain reference, so if the bit is
4512 * already enabled when we boot or resume, grab this reference and
4513 * schedule a vdd off, so we don't hold on to the reference
4514 * indefinitely.
4515 */
4516 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4517 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4518 intel_display_power_get(dev_priv, power_domain);
4519
4520 edp_panel_vdd_schedule_off(intel_dp);
4521}
4522
6d93c0c4
ID
4523static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4524{
49e6bc51
VS
4525 struct intel_dp *intel_dp;
4526
4527 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4528 return;
4529
4530 intel_dp = enc_to_intel_dp(encoder);
4531
4532 pps_lock(intel_dp);
4533
4534 /*
4535 * Read out the current power sequencer assignment,
4536 * in case the BIOS did something with it.
4537 */
4538 if (IS_VALLEYVIEW(encoder->dev))
4539 vlv_initial_power_sequencer_setup(intel_dp);
4540
4541 intel_edp_panel_vdd_sanitize(intel_dp);
4542
4543 pps_unlock(intel_dp);
6d93c0c4
ID
4544}
4545
a4fc5ed6 4546static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4547 .dpms = intel_connector_dpms,
a4fc5ed6 4548 .detect = intel_dp_detect,
beb60608 4549 .force = intel_dp_force,
a4fc5ed6 4550 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4551 .set_property = intel_dp_set_property,
2545e4a6 4552 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4553 .destroy = intel_dp_connector_destroy,
c6f95f27 4554 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4555};
4556
4557static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4558 .get_modes = intel_dp_get_modes,
4559 .mode_valid = intel_dp_mode_valid,
df0e9248 4560 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4561};
4562
a4fc5ed6 4563static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4564 .reset = intel_dp_encoder_reset,
24d05927 4565 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4566};
4567
0e32b39c 4568void
21d40d37 4569intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4570{
0e32b39c 4571 return;
c8110e52 4572}
6207937d 4573
b2c5c181 4574enum irqreturn
13cf5504
DA
4575intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4576{
4577 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4578 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4579 struct drm_device *dev = intel_dig_port->base.base.dev;
4580 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4581 enum intel_display_power_domain power_domain;
b2c5c181 4582 enum irqreturn ret = IRQ_NONE;
1c767b33 4583
0e32b39c
DA
4584 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4585 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4586
7a7f84cc
VS
4587 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4588 /*
4589 * vdd off can generate a long pulse on eDP which
4590 * would require vdd on to handle it, and thus we
4591 * would end up in an endless cycle of
4592 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4593 */
4594 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4595 port_name(intel_dig_port->port));
a8b3d52f 4596 return IRQ_HANDLED;
7a7f84cc
VS
4597 }
4598
26fbb774
VS
4599 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4600 port_name(intel_dig_port->port),
0e32b39c 4601 long_hpd ? "long" : "short");
13cf5504 4602
1c767b33
ID
4603 power_domain = intel_display_port_power_domain(intel_encoder);
4604 intel_display_power_get(dev_priv, power_domain);
4605
0e32b39c 4606 if (long_hpd) {
2a592bec
DA
4607
4608 if (HAS_PCH_SPLIT(dev)) {
4609 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4610 goto mst_fail;
4611 } else {
4612 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4613 goto mst_fail;
4614 }
0e32b39c
DA
4615
4616 if (!intel_dp_get_dpcd(intel_dp)) {
4617 goto mst_fail;
4618 }
4619
4620 intel_dp_probe_oui(intel_dp);
4621
4622 if (!intel_dp_probe_mst(intel_dp))
4623 goto mst_fail;
4624
4625 } else {
4626 if (intel_dp->is_mst) {
1c767b33 4627 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4628 goto mst_fail;
4629 }
4630
4631 if (!intel_dp->is_mst) {
4632 /*
4633 * we'll check the link status via the normal hot plug path later -
4634 * but for short hpds we should check it now
4635 */
5b215bcf 4636 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4637 intel_dp_check_link_status(intel_dp);
5b215bcf 4638 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4639 }
4640 }
b2c5c181
DV
4641
4642 ret = IRQ_HANDLED;
4643
1c767b33 4644 goto put_power;
0e32b39c
DA
4645mst_fail:
4646 /* if we were in MST mode, and device is not there get out of MST mode */
4647 if (intel_dp->is_mst) {
4648 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4649 intel_dp->is_mst = false;
4650 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4651 }
1c767b33
ID
4652put_power:
4653 intel_display_power_put(dev_priv, power_domain);
4654
4655 return ret;
13cf5504
DA
4656}
4657
e3421a18
ZW
4658/* Return which DP Port should be selected for Transcoder DP control */
4659int
0206e353 4660intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4661{
4662 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4663 struct intel_encoder *intel_encoder;
4664 struct intel_dp *intel_dp;
e3421a18 4665
fa90ecef
PZ
4666 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4667 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4668
fa90ecef
PZ
4669 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4670 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4671 return intel_dp->output_reg;
e3421a18 4672 }
ea5b213a 4673
e3421a18
ZW
4674 return -1;
4675}
4676
36e83a18 4677/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4678bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4679{
4680 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4681 union child_device_config *p_child;
36e83a18 4682 int i;
5d8a7752
VS
4683 static const short port_mapping[] = {
4684 [PORT_B] = PORT_IDPB,
4685 [PORT_C] = PORT_IDPC,
4686 [PORT_D] = PORT_IDPD,
4687 };
36e83a18 4688
3b32a35b
VS
4689 if (port == PORT_A)
4690 return true;
4691
41aa3448 4692 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4693 return false;
4694
41aa3448
RV
4695 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4696 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4697
5d8a7752 4698 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4699 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4700 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4701 return true;
4702 }
4703 return false;
4704}
4705
0e32b39c 4706void
f684960e
CW
4707intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4708{
53b41837
YN
4709 struct intel_connector *intel_connector = to_intel_connector(connector);
4710
3f43c48d 4711 intel_attach_force_audio_property(connector);
e953fd7b 4712 intel_attach_broadcast_rgb_property(connector);
55bc60db 4713 intel_dp->color_range_auto = true;
53b41837
YN
4714
4715 if (is_edp(intel_dp)) {
4716 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4717 drm_object_attach_property(
4718 &connector->base,
53b41837 4719 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4720 DRM_MODE_SCALE_ASPECT);
4721 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4722 }
f684960e
CW
4723}
4724
dada1a9f
ID
4725static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4726{
4727 intel_dp->last_power_cycle = jiffies;
4728 intel_dp->last_power_on = jiffies;
4729 intel_dp->last_backlight_off = jiffies;
4730}
4731
67a54566
DV
4732static void
4733intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4734 struct intel_dp *intel_dp)
67a54566
DV
4735{
4736 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4737 struct edp_power_seq cur, vbt, spec,
4738 *final = &intel_dp->pps_delays;
67a54566 4739 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4740 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4741
e39b999a
VS
4742 lockdep_assert_held(&dev_priv->pps_mutex);
4743
81ddbc69
VS
4744 /* already initialized? */
4745 if (final->t11_t12 != 0)
4746 return;
4747
453c5420 4748 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4749 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4750 pp_on_reg = PCH_PP_ON_DELAYS;
4751 pp_off_reg = PCH_PP_OFF_DELAYS;
4752 pp_div_reg = PCH_PP_DIVISOR;
4753 } else {
bf13e81b
JN
4754 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4755
4756 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4757 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4758 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4759 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4760 }
67a54566
DV
4761
4762 /* Workaround: Need to write PP_CONTROL with the unlock key as
4763 * the very first thing. */
453c5420 4764 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4765 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4766
453c5420
JB
4767 pp_on = I915_READ(pp_on_reg);
4768 pp_off = I915_READ(pp_off_reg);
4769 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4770
4771 /* Pull timing values out of registers */
4772 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4773 PANEL_POWER_UP_DELAY_SHIFT;
4774
4775 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4776 PANEL_LIGHT_ON_DELAY_SHIFT;
4777
4778 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4779 PANEL_LIGHT_OFF_DELAY_SHIFT;
4780
4781 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4782 PANEL_POWER_DOWN_DELAY_SHIFT;
4783
4784 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4785 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4786
4787 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4788 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4789
41aa3448 4790 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4791
4792 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4793 * our hw here, which are all in 100usec. */
4794 spec.t1_t3 = 210 * 10;
4795 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4796 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4797 spec.t10 = 500 * 10;
4798 /* This one is special and actually in units of 100ms, but zero
4799 * based in the hw (so we need to add 100 ms). But the sw vbt
4800 * table multiplies it with 1000 to make it in units of 100usec,
4801 * too. */
4802 spec.t11_t12 = (510 + 100) * 10;
4803
4804 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4805 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4806
4807 /* Use the max of the register settings and vbt. If both are
4808 * unset, fall back to the spec limits. */
36b5f425 4809#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4810 spec.field : \
4811 max(cur.field, vbt.field))
4812 assign_final(t1_t3);
4813 assign_final(t8);
4814 assign_final(t9);
4815 assign_final(t10);
4816 assign_final(t11_t12);
4817#undef assign_final
4818
36b5f425 4819#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4820 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4821 intel_dp->backlight_on_delay = get_delay(t8);
4822 intel_dp->backlight_off_delay = get_delay(t9);
4823 intel_dp->panel_power_down_delay = get_delay(t10);
4824 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4825#undef get_delay
4826
f30d26e4
JN
4827 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4828 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4829 intel_dp->panel_power_cycle_delay);
4830
4831 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4832 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4833}
4834
4835static void
4836intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4837 struct intel_dp *intel_dp)
f30d26e4
JN
4838{
4839 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4840 u32 pp_on, pp_off, pp_div, port_sel = 0;
4841 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4842 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4843 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4844 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4845
e39b999a 4846 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4847
4848 if (HAS_PCH_SPLIT(dev)) {
4849 pp_on_reg = PCH_PP_ON_DELAYS;
4850 pp_off_reg = PCH_PP_OFF_DELAYS;
4851 pp_div_reg = PCH_PP_DIVISOR;
4852 } else {
bf13e81b
JN
4853 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4854
4855 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4856 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4857 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4858 }
4859
b2f19d1a
PZ
4860 /*
4861 * And finally store the new values in the power sequencer. The
4862 * backlight delays are set to 1 because we do manual waits on them. For
4863 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4864 * we'll end up waiting for the backlight off delay twice: once when we
4865 * do the manual sleep, and once when we disable the panel and wait for
4866 * the PP_STATUS bit to become zero.
4867 */
f30d26e4 4868 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4869 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4870 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4871 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4872 /* Compute the divisor for the pp clock, simply match the Bspec
4873 * formula. */
453c5420 4874 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4875 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4876 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4877
4878 /* Haswell doesn't have any port selection bits for the panel
4879 * power sequencer any more. */
bc7d38a4 4880 if (IS_VALLEYVIEW(dev)) {
ad933b56 4881 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4882 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4883 if (port == PORT_A)
a24c144c 4884 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4885 else
a24c144c 4886 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4887 }
4888
453c5420
JB
4889 pp_on |= port_sel;
4890
4891 I915_WRITE(pp_on_reg, pp_on);
4892 I915_WRITE(pp_off_reg, pp_off);
4893 I915_WRITE(pp_div_reg, pp_div);
67a54566 4894
67a54566 4895 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4896 I915_READ(pp_on_reg),
4897 I915_READ(pp_off_reg),
4898 I915_READ(pp_div_reg));
f684960e
CW
4899}
4900
b33a2815
VK
4901/**
4902 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4903 * @dev: DRM device
4904 * @refresh_rate: RR to be programmed
4905 *
4906 * This function gets called when refresh rate (RR) has to be changed from
4907 * one frequency to another. Switches can be between high and low RR
4908 * supported by the panel or to any other RR based on media playback (in
4909 * this case, RR value needs to be passed from user space).
4910 *
4911 * The caller of this function needs to take a lock on dev_priv->drrs.
4912 */
96178eeb 4913static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4914{
4915 struct drm_i915_private *dev_priv = dev->dev_private;
4916 struct intel_encoder *encoder;
96178eeb
VK
4917 struct intel_digital_port *dig_port = NULL;
4918 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4919 struct intel_crtc_state *config = NULL;
439d7ac0 4920 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4921 u32 reg, val;
96178eeb 4922 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4923
4924 if (refresh_rate <= 0) {
4925 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4926 return;
4927 }
4928
96178eeb
VK
4929 if (intel_dp == NULL) {
4930 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4931 return;
4932 }
4933
1fcc9d1c 4934 /*
e4d59f6b
RV
4935 * FIXME: This needs proper synchronization with psr state for some
4936 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4937 */
439d7ac0 4938
96178eeb
VK
4939 dig_port = dp_to_dig_port(intel_dp);
4940 encoder = &dig_port->base;
439d7ac0
PB
4941 intel_crtc = encoder->new_crtc;
4942
4943 if (!intel_crtc) {
4944 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4945 return;
4946 }
4947
6e3c9717 4948 config = intel_crtc->config;
439d7ac0 4949
96178eeb 4950 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
4951 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4952 return;
4953 }
4954
96178eeb
VK
4955 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4956 refresh_rate)
439d7ac0
PB
4957 index = DRRS_LOW_RR;
4958
96178eeb 4959 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
4960 DRM_DEBUG_KMS(
4961 "DRRS requested for previously set RR...ignoring\n");
4962 return;
4963 }
4964
4965 if (!intel_crtc->active) {
4966 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4967 return;
4968 }
4969
44395bfe 4970 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
4971 switch (index) {
4972 case DRRS_HIGH_RR:
4973 intel_dp_set_m_n(intel_crtc, M1_N1);
4974 break;
4975 case DRRS_LOW_RR:
4976 intel_dp_set_m_n(intel_crtc, M2_N2);
4977 break;
4978 case DRRS_MAX_RR:
4979 default:
4980 DRM_ERROR("Unsupported refreshrate type\n");
4981 }
4982 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 4983 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 4984 val = I915_READ(reg);
a4c30b1d 4985
439d7ac0 4986 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
4987 if (IS_VALLEYVIEW(dev))
4988 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4989 else
4990 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 4991 } else {
6fa7aec1
VK
4992 if (IS_VALLEYVIEW(dev))
4993 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4994 else
4995 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
4996 }
4997 I915_WRITE(reg, val);
4998 }
4999
4e9ac947
VK
5000 dev_priv->drrs.refresh_rate_type = index;
5001
5002 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5003}
5004
b33a2815
VK
5005/**
5006 * intel_edp_drrs_enable - init drrs struct if supported
5007 * @intel_dp: DP struct
5008 *
5009 * Initializes frontbuffer_bits and drrs.dp
5010 */
c395578e
VK
5011void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5012{
5013 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5014 struct drm_i915_private *dev_priv = dev->dev_private;
5015 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5016 struct drm_crtc *crtc = dig_port->base.base.crtc;
5017 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5018
5019 if (!intel_crtc->config->has_drrs) {
5020 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5021 return;
5022 }
5023
5024 mutex_lock(&dev_priv->drrs.mutex);
5025 if (WARN_ON(dev_priv->drrs.dp)) {
5026 DRM_ERROR("DRRS already enabled\n");
5027 goto unlock;
5028 }
5029
5030 dev_priv->drrs.busy_frontbuffer_bits = 0;
5031
5032 dev_priv->drrs.dp = intel_dp;
5033
5034unlock:
5035 mutex_unlock(&dev_priv->drrs.mutex);
5036}
5037
b33a2815
VK
5038/**
5039 * intel_edp_drrs_disable - Disable DRRS
5040 * @intel_dp: DP struct
5041 *
5042 */
c395578e
VK
5043void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5044{
5045 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5046 struct drm_i915_private *dev_priv = dev->dev_private;
5047 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5048 struct drm_crtc *crtc = dig_port->base.base.crtc;
5049 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5050
5051 if (!intel_crtc->config->has_drrs)
5052 return;
5053
5054 mutex_lock(&dev_priv->drrs.mutex);
5055 if (!dev_priv->drrs.dp) {
5056 mutex_unlock(&dev_priv->drrs.mutex);
5057 return;
5058 }
5059
5060 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5061 intel_dp_set_drrs_state(dev_priv->dev,
5062 intel_dp->attached_connector->panel.
5063 fixed_mode->vrefresh);
5064
5065 dev_priv->drrs.dp = NULL;
5066 mutex_unlock(&dev_priv->drrs.mutex);
5067
5068 cancel_delayed_work_sync(&dev_priv->drrs.work);
5069}
5070
4e9ac947
VK
5071static void intel_edp_drrs_downclock_work(struct work_struct *work)
5072{
5073 struct drm_i915_private *dev_priv =
5074 container_of(work, typeof(*dev_priv), drrs.work.work);
5075 struct intel_dp *intel_dp;
5076
5077 mutex_lock(&dev_priv->drrs.mutex);
5078
5079 intel_dp = dev_priv->drrs.dp;
5080
5081 if (!intel_dp)
5082 goto unlock;
5083
439d7ac0 5084 /*
4e9ac947
VK
5085 * The delayed work can race with an invalidate hence we need to
5086 * recheck.
439d7ac0
PB
5087 */
5088
4e9ac947
VK
5089 if (dev_priv->drrs.busy_frontbuffer_bits)
5090 goto unlock;
439d7ac0 5091
4e9ac947
VK
5092 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5093 intel_dp_set_drrs_state(dev_priv->dev,
5094 intel_dp->attached_connector->panel.
5095 downclock_mode->vrefresh);
439d7ac0 5096
4e9ac947 5097unlock:
439d7ac0 5098
4e9ac947 5099 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5100}
5101
b33a2815
VK
5102/**
5103 * intel_edp_drrs_invalidate - Invalidate DRRS
5104 * @dev: DRM device
5105 * @frontbuffer_bits: frontbuffer plane tracking bits
5106 *
5107 * When there is a disturbance on screen (due to cursor movement/time
5108 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5109 * high RR.
5110 *
5111 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5112 */
a93fad0f
VK
5113void intel_edp_drrs_invalidate(struct drm_device *dev,
5114 unsigned frontbuffer_bits)
5115{
5116 struct drm_i915_private *dev_priv = dev->dev_private;
5117 struct drm_crtc *crtc;
5118 enum pipe pipe;
5119
5120 if (!dev_priv->drrs.dp)
5121 return;
5122
3954e733
R
5123 cancel_delayed_work_sync(&dev_priv->drrs.work);
5124
a93fad0f
VK
5125 mutex_lock(&dev_priv->drrs.mutex);
5126 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5127 pipe = to_intel_crtc(crtc)->pipe;
5128
5129 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5130 intel_dp_set_drrs_state(dev_priv->dev,
5131 dev_priv->drrs.dp->attached_connector->panel.
5132 fixed_mode->vrefresh);
5133 }
5134
5135 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5136
5137 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5138 mutex_unlock(&dev_priv->drrs.mutex);
5139}
5140
b33a2815
VK
5141/**
5142 * intel_edp_drrs_flush - Flush DRRS
5143 * @dev: DRM device
5144 * @frontbuffer_bits: frontbuffer plane tracking bits
5145 *
5146 * When there is no movement on screen, DRRS work can be scheduled.
5147 * This DRRS work is responsible for setting relevant registers after a
5148 * timeout of 1 second.
5149 *
5150 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5151 */
a93fad0f
VK
5152void intel_edp_drrs_flush(struct drm_device *dev,
5153 unsigned frontbuffer_bits)
5154{
5155 struct drm_i915_private *dev_priv = dev->dev_private;
5156 struct drm_crtc *crtc;
5157 enum pipe pipe;
5158
5159 if (!dev_priv->drrs.dp)
5160 return;
5161
3954e733
R
5162 cancel_delayed_work_sync(&dev_priv->drrs.work);
5163
a93fad0f
VK
5164 mutex_lock(&dev_priv->drrs.mutex);
5165 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5166 pipe = to_intel_crtc(crtc)->pipe;
5167 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5168
a93fad0f
VK
5169 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5170 !dev_priv->drrs.busy_frontbuffer_bits)
5171 schedule_delayed_work(&dev_priv->drrs.work,
5172 msecs_to_jiffies(1000));
5173 mutex_unlock(&dev_priv->drrs.mutex);
5174}
5175
b33a2815
VK
5176/**
5177 * DOC: Display Refresh Rate Switching (DRRS)
5178 *
5179 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5180 * which enables swtching between low and high refresh rates,
5181 * dynamically, based on the usage scenario. This feature is applicable
5182 * for internal panels.
5183 *
5184 * Indication that the panel supports DRRS is given by the panel EDID, which
5185 * would list multiple refresh rates for one resolution.
5186 *
5187 * DRRS is of 2 types - static and seamless.
5188 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5189 * (may appear as a blink on screen) and is used in dock-undock scenario.
5190 * Seamless DRRS involves changing RR without any visual effect to the user
5191 * and can be used during normal system usage. This is done by programming
5192 * certain registers.
5193 *
5194 * Support for static/seamless DRRS may be indicated in the VBT based on
5195 * inputs from the panel spec.
5196 *
5197 * DRRS saves power by switching to low RR based on usage scenarios.
5198 *
5199 * eDP DRRS:-
5200 * The implementation is based on frontbuffer tracking implementation.
5201 * When there is a disturbance on the screen triggered by user activity or a
5202 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5203 * When there is no movement on screen, after a timeout of 1 second, a switch
5204 * to low RR is made.
5205 * For integration with frontbuffer tracking code,
5206 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5207 *
5208 * DRRS can be further extended to support other internal panels and also
5209 * the scenario of video playback wherein RR is set based on the rate
5210 * requested by userspace.
5211 */
5212
5213/**
5214 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5215 * @intel_connector: eDP connector
5216 * @fixed_mode: preferred mode of panel
5217 *
5218 * This function is called only once at driver load to initialize basic
5219 * DRRS stuff.
5220 *
5221 * Returns:
5222 * Downclock mode if panel supports it, else return NULL.
5223 * DRRS support is determined by the presence of downclock mode (apart
5224 * from VBT setting).
5225 */
4f9db5b5 5226static struct drm_display_mode *
96178eeb
VK
5227intel_dp_drrs_init(struct intel_connector *intel_connector,
5228 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5229{
5230 struct drm_connector *connector = &intel_connector->base;
96178eeb 5231 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5232 struct drm_i915_private *dev_priv = dev->dev_private;
5233 struct drm_display_mode *downclock_mode = NULL;
5234
5235 if (INTEL_INFO(dev)->gen <= 6) {
5236 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5237 return NULL;
5238 }
5239
5240 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5241 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5242 return NULL;
5243 }
5244
5245 downclock_mode = intel_find_panel_downclock
5246 (dev, fixed_mode, connector);
5247
5248 if (!downclock_mode) {
a1d26342 5249 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5250 return NULL;
5251 }
5252
4e9ac947
VK
5253 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5254
96178eeb 5255 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5256
96178eeb 5257 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5258
96178eeb 5259 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5260 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5261 return downclock_mode;
5262}
5263
ed92f0b2 5264static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5265 struct intel_connector *intel_connector)
ed92f0b2
PZ
5266{
5267 struct drm_connector *connector = &intel_connector->base;
5268 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5269 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5270 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5271 struct drm_i915_private *dev_priv = dev->dev_private;
5272 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5273 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5274 bool has_dpcd;
5275 struct drm_display_mode *scan;
5276 struct edid *edid;
6517d273 5277 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5278
96178eeb 5279 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5280
ed92f0b2
PZ
5281 if (!is_edp(intel_dp))
5282 return true;
5283
49e6bc51
VS
5284 pps_lock(intel_dp);
5285 intel_edp_panel_vdd_sanitize(intel_dp);
5286 pps_unlock(intel_dp);
63635217 5287
ed92f0b2 5288 /* Cache DPCD and EDID for edp. */
ed92f0b2 5289 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5290
5291 if (has_dpcd) {
5292 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5293 dev_priv->no_aux_handshake =
5294 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5295 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5296 } else {
5297 /* if this fails, presume the device is a ghost */
5298 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5299 return false;
5300 }
5301
5302 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5303 pps_lock(intel_dp);
36b5f425 5304 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5305 pps_unlock(intel_dp);
ed92f0b2 5306
060c8778 5307 mutex_lock(&dev->mode_config.mutex);
0b99836f 5308 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5309 if (edid) {
5310 if (drm_add_edid_modes(connector, edid)) {
5311 drm_mode_connector_update_edid_property(connector,
5312 edid);
5313 drm_edid_to_eld(connector, edid);
5314 } else {
5315 kfree(edid);
5316 edid = ERR_PTR(-EINVAL);
5317 }
5318 } else {
5319 edid = ERR_PTR(-ENOENT);
5320 }
5321 intel_connector->edid = edid;
5322
5323 /* prefer fixed mode from EDID if available */
5324 list_for_each_entry(scan, &connector->probed_modes, head) {
5325 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5326 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5327 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5328 intel_connector, fixed_mode);
ed92f0b2
PZ
5329 break;
5330 }
5331 }
5332
5333 /* fallback to VBT if available for eDP */
5334 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5335 fixed_mode = drm_mode_duplicate(dev,
5336 dev_priv->vbt.lfp_lvds_vbt_mode);
5337 if (fixed_mode)
5338 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5339 }
060c8778 5340 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5341
01527b31
CT
5342 if (IS_VALLEYVIEW(dev)) {
5343 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5344 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5345
5346 /*
5347 * Figure out the current pipe for the initial backlight setup.
5348 * If the current pipe isn't valid, try the PPS pipe, and if that
5349 * fails just assume pipe A.
5350 */
5351 if (IS_CHERRYVIEW(dev))
5352 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5353 else
5354 pipe = PORT_TO_PIPE(intel_dp->DP);
5355
5356 if (pipe != PIPE_A && pipe != PIPE_B)
5357 pipe = intel_dp->pps_pipe;
5358
5359 if (pipe != PIPE_A && pipe != PIPE_B)
5360 pipe = PIPE_A;
5361
5362 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5363 pipe_name(pipe));
01527b31
CT
5364 }
5365
4f9db5b5 5366 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5367 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5368 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5369
5370 return true;
5371}
5372
16c25533 5373bool
f0fec3f2
PZ
5374intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5375 struct intel_connector *intel_connector)
a4fc5ed6 5376{
f0fec3f2
PZ
5377 struct drm_connector *connector = &intel_connector->base;
5378 struct intel_dp *intel_dp = &intel_dig_port->dp;
5379 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5380 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5381 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5382 enum port port = intel_dig_port->port;
0b99836f 5383 int type;
a4fc5ed6 5384
a4a5d2f8
VS
5385 intel_dp->pps_pipe = INVALID_PIPE;
5386
ec5b01dd 5387 /* intel_dp vfuncs */
b6b5e383
DL
5388 if (INTEL_INFO(dev)->gen >= 9)
5389 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5390 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5391 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5392 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5393 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5394 else if (HAS_PCH_SPLIT(dev))
5395 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5396 else
5397 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5398
b9ca5fad
DL
5399 if (INTEL_INFO(dev)->gen >= 9)
5400 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5401 else
5402 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5403
0767935e
DV
5404 /* Preserve the current hw state. */
5405 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5406 intel_dp->attached_connector = intel_connector;
3d3dc149 5407
3b32a35b 5408 if (intel_dp_is_edp(dev, port))
b329530c 5409 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5410 else
5411 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5412
f7d24902
ID
5413 /*
5414 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5415 * for DP the encoder type can be set by the caller to
5416 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5417 */
5418 if (type == DRM_MODE_CONNECTOR_eDP)
5419 intel_encoder->type = INTEL_OUTPUT_EDP;
5420
c17ed5b5
VS
5421 /* eDP only on port B and/or C on vlv/chv */
5422 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5423 port != PORT_B && port != PORT_C))
5424 return false;
5425
e7281eab
ID
5426 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5427 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5428 port_name(port));
5429
b329530c 5430 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5431 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5432
a4fc5ed6
KP
5433 connector->interlace_allowed = true;
5434 connector->doublescan_allowed = 0;
5435
f0fec3f2 5436 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5437 edp_panel_vdd_work);
a4fc5ed6 5438
df0e9248 5439 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5440 drm_connector_register(connector);
a4fc5ed6 5441
affa9354 5442 if (HAS_DDI(dev))
bcbc889b
PZ
5443 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5444 else
5445 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5446 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5447
0b99836f 5448 /* Set up the hotplug pin. */
ab9d7c30
PZ
5449 switch (port) {
5450 case PORT_A:
1d843f9d 5451 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5452 break;
5453 case PORT_B:
1d843f9d 5454 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5455 break;
5456 case PORT_C:
1d843f9d 5457 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5458 break;
5459 case PORT_D:
1d843f9d 5460 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5461 break;
5462 default:
ad1c0b19 5463 BUG();
5eb08b69
ZW
5464 }
5465
dada1a9f 5466 if (is_edp(intel_dp)) {
773538e8 5467 pps_lock(intel_dp);
1e74a324
VS
5468 intel_dp_init_panel_power_timestamps(intel_dp);
5469 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5470 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5471 else
36b5f425 5472 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5473 pps_unlock(intel_dp);
dada1a9f 5474 }
0095e6dc 5475
9d1a1031 5476 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5477
0e32b39c 5478 /* init MST on ports that can support it */
c86ea3d0 5479 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5480 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5481 intel_dp_mst_encoder_init(intel_dig_port,
5482 intel_connector->base.base.id);
0e32b39c
DA
5483 }
5484 }
5485
36b5f425 5486 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5487 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5488 if (is_edp(intel_dp)) {
5489 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5490 /*
5491 * vdd might still be enabled do to the delayed vdd off.
5492 * Make sure vdd is actually turned off here.
5493 */
773538e8 5494 pps_lock(intel_dp);
4be73780 5495 edp_panel_vdd_off_sync(intel_dp);
773538e8 5496 pps_unlock(intel_dp);
15b1d171 5497 }
34ea3d38 5498 drm_connector_unregister(connector);
b2f246a8 5499 drm_connector_cleanup(connector);
16c25533 5500 return false;
b2f246a8 5501 }
32f9d658 5502
f684960e
CW
5503 intel_dp_add_properties(intel_dp, connector);
5504
a4fc5ed6
KP
5505 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5506 * 0xd. Failure to do so will result in spurious interrupts being
5507 * generated on the port when a cable is not attached.
5508 */
5509 if (IS_G4X(dev) && !IS_GM45(dev)) {
5510 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5511 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5512 }
16c25533
PZ
5513
5514 return true;
a4fc5ed6 5515}
f0fec3f2
PZ
5516
5517void
5518intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5519{
13cf5504 5520 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5521 struct intel_digital_port *intel_dig_port;
5522 struct intel_encoder *intel_encoder;
5523 struct drm_encoder *encoder;
5524 struct intel_connector *intel_connector;
5525
b14c5679 5526 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5527 if (!intel_dig_port)
5528 return;
5529
b14c5679 5530 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5531 if (!intel_connector) {
5532 kfree(intel_dig_port);
5533 return;
5534 }
5535
5536 intel_encoder = &intel_dig_port->base;
5537 encoder = &intel_encoder->base;
5538
5539 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5540 DRM_MODE_ENCODER_TMDS);
5541
5bfe2ac0 5542 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5543 intel_encoder->disable = intel_disable_dp;
00c09d70 5544 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5545 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5546 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5547 if (IS_CHERRYVIEW(dev)) {
9197c88b 5548 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5549 intel_encoder->pre_enable = chv_pre_enable_dp;
5550 intel_encoder->enable = vlv_enable_dp;
580d3811 5551 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5552 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5553 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5554 intel_encoder->pre_enable = vlv_pre_enable_dp;
5555 intel_encoder->enable = vlv_enable_dp;
49277c31 5556 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5557 } else {
ecff4f3b
JN
5558 intel_encoder->pre_enable = g4x_pre_enable_dp;
5559 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5560 if (INTEL_INFO(dev)->gen >= 5)
5561 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5562 }
f0fec3f2 5563
174edf1f 5564 intel_dig_port->port = port;
f0fec3f2
PZ
5565 intel_dig_port->dp.output_reg = output_reg;
5566
00c09d70 5567 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5568 if (IS_CHERRYVIEW(dev)) {
5569 if (port == PORT_D)
5570 intel_encoder->crtc_mask = 1 << 2;
5571 else
5572 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5573 } else {
5574 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5575 }
bc079e8b 5576 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5577 intel_encoder->hot_plug = intel_dp_hot_plug;
5578
13cf5504
DA
5579 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5580 dev_priv->hpd_irq_port[port] = intel_dig_port;
5581
15b1d171
PZ
5582 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5583 drm_encoder_cleanup(encoder);
5584 kfree(intel_dig_port);
b2f246a8 5585 kfree(intel_connector);
15b1d171 5586 }
f0fec3f2 5587}
0e32b39c
DA
5588
5589void intel_dp_mst_suspend(struct drm_device *dev)
5590{
5591 struct drm_i915_private *dev_priv = dev->dev_private;
5592 int i;
5593
5594 /* disable MST */
5595 for (i = 0; i < I915_MAX_PORTS; i++) {
5596 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5597 if (!intel_dig_port)
5598 continue;
5599
5600 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5601 if (!intel_dig_port->dp.can_mst)
5602 continue;
5603 if (intel_dig_port->dp.is_mst)
5604 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5605 }
5606 }
5607}
5608
5609void intel_dp_mst_resume(struct drm_device *dev)
5610{
5611 struct drm_i915_private *dev_priv = dev->dev_private;
5612 int i;
5613
5614 for (i = 0; i < I915_MAX_PORTS; i++) {
5615 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5616 if (!intel_dig_port)
5617 continue;
5618 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5619 int ret;
5620
5621 if (!intel_dig_port->dp.can_mst)
5622 continue;
5623
5624 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5625 if (ret != 0) {
5626 intel_dp_check_mst_status(&intel_dig_port->dp);
5627 }
5628 }
5629 }
5630}