drm/i915: Fully separate source vs. sink rates
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 91
cfcb0fc9
JB
92/**
93 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94 * @intel_dp: DP struct
95 *
96 * If a CPU or PCH DP output is attached to an eDP panel, this function
97 * will return true, and false otherwise.
98 */
99static bool is_edp(struct intel_dp *intel_dp)
100{
da63a9f2
PZ
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
104}
105
68b4d824 106static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 107{
68b4d824
ID
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
111}
112
df0e9248
CW
113static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114{
fa90ecef 115 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
116}
117
ea5b213a 118static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 119static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 120static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 121static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
122static void vlv_steal_power_sequencer(struct drm_device *dev,
123 enum pipe pipe);
a4fc5ed6 124
0e32b39c 125int
ea5b213a 126intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 127{
7183dc29 128 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
129
130 switch (max_link_bw) {
131 case DP_LINK_BW_1_62:
132 case DP_LINK_BW_2_7:
1db10e28 133 case DP_LINK_BW_5_4:
d4eead50 134 break;
a4fc5ed6 135 default:
d4eead50
ID
136 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137 max_link_bw);
a4fc5ed6
KP
138 max_link_bw = DP_LINK_BW_1_62;
139 break;
140 }
141 return max_link_bw;
142}
143
eeb6324d
PZ
144static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145{
146 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147 struct drm_device *dev = intel_dig_port->base.base.dev;
148 u8 source_max, sink_max;
149
150 source_max = 4;
151 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153 source_max = 2;
154
155 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157 return min(source_max, sink_max);
158}
159
cd9dde44
AJ
160/*
161 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec.
163 *
164 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165 *
166 * 270000 * 1 * 8 / 10 == 216000
167 *
168 * The actual data capacity of that configuration is 2.16Gbit/s, so the
169 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
170 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171 * 119000. At 18bpp that's 2142000 kilobits per second.
172 *
173 * Thus the strange-looking division by 10 in intel_dp_link_required, to
174 * get the result in decakilobits instead of kilobits.
175 */
176
a4fc5ed6 177static int
c898261c 178intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 179{
cd9dde44 180 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
181}
182
fe27d53e
DA
183static int
184intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185{
186 return (max_link_clock * max_lanes * 8) / 10;
187}
188
c19de8eb 189static enum drm_mode_status
a4fc5ed6
KP
190intel_dp_mode_valid(struct drm_connector *connector,
191 struct drm_display_mode *mode)
192{
df0e9248 193 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
194 struct intel_connector *intel_connector = to_intel_connector(connector);
195 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
196 int target_clock = mode->clock;
197 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 198
dd06f90e
JN
199 if (is_edp(intel_dp) && fixed_mode) {
200 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
201 return MODE_PANEL;
202
dd06f90e 203 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 204 return MODE_PANEL;
03afc4a2
DV
205
206 target_clock = fixed_mode->clock;
7de56f43
ZY
207 }
208
36008365 209 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
eeb6324d 210 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
211
212 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213 mode_rate = intel_dp_link_required(target_clock, 18);
214
215 if (mode_rate > max_rate)
c4867936 216 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
217
218 if (mode->clock < 10000)
219 return MODE_CLOCK_LOW;
220
0af78a2b
DV
221 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222 return MODE_H_ILLEGAL;
223
a4fc5ed6
KP
224 return MODE_OK;
225}
226
a4f1289e 227uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
228{
229 int i;
230 uint32_t v = 0;
231
232 if (src_bytes > 4)
233 src_bytes = 4;
234 for (i = 0; i < src_bytes; i++)
235 v |= ((uint32_t) src[i]) << ((3-i) * 8);
236 return v;
237}
238
c2af70e2 239static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
240{
241 int i;
242 if (dst_bytes > 4)
243 dst_bytes = 4;
244 for (i = 0; i < dst_bytes; i++)
245 dst[i] = src >> ((3-i) * 8);
246}
247
fb0f8fbf
KP
248/* hrawclock is 1/4 the FSB frequency */
249static int
250intel_hrawclk(struct drm_device *dev)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 uint32_t clkcfg;
254
9473c8f4
VP
255 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256 if (IS_VALLEYVIEW(dev))
257 return 200;
258
fb0f8fbf
KP
259 clkcfg = I915_READ(CLKCFG);
260 switch (clkcfg & CLKCFG_FSB_MASK) {
261 case CLKCFG_FSB_400:
262 return 100;
263 case CLKCFG_FSB_533:
264 return 133;
265 case CLKCFG_FSB_667:
266 return 166;
267 case CLKCFG_FSB_800:
268 return 200;
269 case CLKCFG_FSB_1067:
270 return 266;
271 case CLKCFG_FSB_1333:
272 return 333;
273 /* these two are just a guess; one of them might be right */
274 case CLKCFG_FSB_1600:
275 case CLKCFG_FSB_1600_ALT:
276 return 400;
277 default:
278 return 133;
279 }
280}
281
bf13e81b
JN
282static void
283intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 284 struct intel_dp *intel_dp);
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b 288
773538e8
VS
289static void pps_lock(struct intel_dp *intel_dp)
290{
291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292 struct intel_encoder *encoder = &intel_dig_port->base;
293 struct drm_device *dev = encoder->base.dev;
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 enum intel_display_power_domain power_domain;
296
297 /*
298 * See vlv_power_sequencer_reset() why we need
299 * a power domain reference here.
300 */
301 power_domain = intel_display_port_power_domain(encoder);
302 intel_display_power_get(dev_priv, power_domain);
303
304 mutex_lock(&dev_priv->pps_mutex);
305}
306
307static void pps_unlock(struct intel_dp *intel_dp)
308{
309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
310 struct intel_encoder *encoder = &intel_dig_port->base;
311 struct drm_device *dev = encoder->base.dev;
312 struct drm_i915_private *dev_priv = dev->dev_private;
313 enum intel_display_power_domain power_domain;
314
315 mutex_unlock(&dev_priv->pps_mutex);
316
317 power_domain = intel_display_port_power_domain(encoder);
318 intel_display_power_put(dev_priv, power_domain);
319}
320
961a0db0
VS
321static void
322vlv_power_sequencer_kick(struct intel_dp *intel_dp)
323{
324 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
325 struct drm_device *dev = intel_dig_port->base.base.dev;
326 struct drm_i915_private *dev_priv = dev->dev_private;
327 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 328 bool pll_enabled;
961a0db0
VS
329 uint32_t DP;
330
331 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
332 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
333 pipe_name(pipe), port_name(intel_dig_port->port)))
334 return;
335
336 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
337 pipe_name(pipe), port_name(intel_dig_port->port));
338
339 /* Preserve the BIOS-computed detected bit. This is
340 * supposed to be read-only.
341 */
342 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
343 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
344 DP |= DP_PORT_WIDTH(1);
345 DP |= DP_LINK_TRAIN_PAT_1;
346
347 if (IS_CHERRYVIEW(dev))
348 DP |= DP_PIPE_SELECT_CHV(pipe);
349 else if (pipe == PIPE_B)
350 DP |= DP_PIPEB_SELECT;
351
d288f65f
VS
352 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
353
354 /*
355 * The DPLL for the pipe must be enabled for this to work.
356 * So enable temporarily it if it's not already enabled.
357 */
358 if (!pll_enabled)
359 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
360 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
361
961a0db0
VS
362 /*
363 * Similar magic as in intel_dp_enable_port().
364 * We _must_ do this port enable + disable trick
365 * to make this power seqeuencer lock onto the port.
366 * Otherwise even VDD force bit won't work.
367 */
368 I915_WRITE(intel_dp->output_reg, DP);
369 POSTING_READ(intel_dp->output_reg);
370
371 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
376
377 if (!pll_enabled)
378 vlv_force_pll_off(dev, pipe);
961a0db0
VS
379}
380
bf13e81b
JN
381static enum pipe
382vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
383{
384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
385 struct drm_device *dev = intel_dig_port->base.base.dev;
386 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
387 struct intel_encoder *encoder;
388 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 389 enum pipe pipe;
bf13e81b 390
e39b999a 391 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 392
a8c3344e
VS
393 /* We should never land here with regular DP ports */
394 WARN_ON(!is_edp(intel_dp));
395
a4a5d2f8
VS
396 if (intel_dp->pps_pipe != INVALID_PIPE)
397 return intel_dp->pps_pipe;
398
399 /*
400 * We don't have power sequencer currently.
401 * Pick one that's not used by other ports.
402 */
403 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
404 base.head) {
405 struct intel_dp *tmp;
406
407 if (encoder->type != INTEL_OUTPUT_EDP)
408 continue;
409
410 tmp = enc_to_intel_dp(&encoder->base);
411
412 if (tmp->pps_pipe != INVALID_PIPE)
413 pipes &= ~(1 << tmp->pps_pipe);
414 }
415
416 /*
417 * Didn't find one. This should not happen since there
418 * are two power sequencers and up to two eDP ports.
419 */
420 if (WARN_ON(pipes == 0))
a8c3344e
VS
421 pipe = PIPE_A;
422 else
423 pipe = ffs(pipes) - 1;
a4a5d2f8 424
a8c3344e
VS
425 vlv_steal_power_sequencer(dev, pipe);
426 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
427
428 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
429 pipe_name(intel_dp->pps_pipe),
430 port_name(intel_dig_port->port));
431
432 /* init power sequencer on this pipe and port */
36b5f425
VS
433 intel_dp_init_panel_power_sequencer(dev, intel_dp);
434 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 435
961a0db0
VS
436 /*
437 * Even vdd force doesn't work until we've made
438 * the power sequencer lock in on the port.
439 */
440 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
441
442 return intel_dp->pps_pipe;
443}
444
6491ab27
VS
445typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
446 enum pipe pipe);
447
448static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
452}
453
454static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
455 enum pipe pipe)
456{
457 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
458}
459
460static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
461 enum pipe pipe)
462{
463 return true;
464}
bf13e81b 465
a4a5d2f8 466static enum pipe
6491ab27
VS
467vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
468 enum port port,
469 vlv_pipe_check pipe_check)
a4a5d2f8
VS
470{
471 enum pipe pipe;
bf13e81b 472
bf13e81b
JN
473 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
474 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
475 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
476
477 if (port_sel != PANEL_PORT_SELECT_VLV(port))
478 continue;
479
6491ab27
VS
480 if (!pipe_check(dev_priv, pipe))
481 continue;
482
a4a5d2f8 483 return pipe;
bf13e81b
JN
484 }
485
a4a5d2f8
VS
486 return INVALID_PIPE;
487}
488
489static void
490vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
491{
492 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
493 struct drm_device *dev = intel_dig_port->base.base.dev;
494 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
495 enum port port = intel_dig_port->port;
496
497 lockdep_assert_held(&dev_priv->pps_mutex);
498
499 /* try to find a pipe with this port selected */
6491ab27
VS
500 /* first pick one where the panel is on */
501 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502 vlv_pipe_has_pp_on);
503 /* didn't find one? pick one where vdd is on */
504 if (intel_dp->pps_pipe == INVALID_PIPE)
505 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 vlv_pipe_has_vdd_on);
507 /* didn't find one? pick one with just the correct port */
508 if (intel_dp->pps_pipe == INVALID_PIPE)
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_any);
a4a5d2f8
VS
511
512 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
513 if (intel_dp->pps_pipe == INVALID_PIPE) {
514 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
515 port_name(port));
516 return;
bf13e81b
JN
517 }
518
a4a5d2f8
VS
519 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
520 port_name(port), pipe_name(intel_dp->pps_pipe));
521
36b5f425
VS
522 intel_dp_init_panel_power_sequencer(dev, intel_dp);
523 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
524}
525
773538e8
VS
526void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
527{
528 struct drm_device *dev = dev_priv->dev;
529 struct intel_encoder *encoder;
530
531 if (WARN_ON(!IS_VALLEYVIEW(dev)))
532 return;
533
534 /*
535 * We can't grab pps_mutex here due to deadlock with power_domain
536 * mutex when power_domain functions are called while holding pps_mutex.
537 * That also means that in order to use pps_pipe the code needs to
538 * hold both a power domain reference and pps_mutex, and the power domain
539 * reference get/put must be done while _not_ holding pps_mutex.
540 * pps_{lock,unlock}() do these steps in the correct order, so one
541 * should use them always.
542 */
543
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
545 struct intel_dp *intel_dp;
546
547 if (encoder->type != INTEL_OUTPUT_EDP)
548 continue;
549
550 intel_dp = enc_to_intel_dp(&encoder->base);
551 intel_dp->pps_pipe = INVALID_PIPE;
552 }
bf13e81b
JN
553}
554
555static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
556{
557 struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
559 if (HAS_PCH_SPLIT(dev))
560 return PCH_PP_CONTROL;
561 else
562 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563}
564
565static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566{
567 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568
569 if (HAS_PCH_SPLIT(dev))
570 return PCH_PP_STATUS;
571 else
572 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
573}
574
01527b31
CT
575/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
576 This function only applicable when panel PM state is not to be tracked */
577static int edp_notify_handler(struct notifier_block *this, unsigned long code,
578 void *unused)
579{
580 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
581 edp_notifier);
582 struct drm_device *dev = intel_dp_to_dev(intel_dp);
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 u32 pp_div;
585 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
586
587 if (!is_edp(intel_dp) || code != SYS_RESTART)
588 return 0;
589
773538e8 590 pps_lock(intel_dp);
e39b999a 591
01527b31 592 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
593 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594
01527b31
CT
595 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
596 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
597 pp_div = I915_READ(pp_div_reg);
598 pp_div &= PP_REFERENCE_DIVIDER_MASK;
599
600 /* 0x1F write to PP_DIV_REG sets max cycle delay */
601 I915_WRITE(pp_div_reg, pp_div | 0x1F);
602 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
603 msleep(intel_dp->panel_power_cycle_delay);
604 }
605
773538e8 606 pps_unlock(intel_dp);
e39b999a 607
01527b31
CT
608 return 0;
609}
610
4be73780 611static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 612{
30add22d 613 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
614 struct drm_i915_private *dev_priv = dev->dev_private;
615
e39b999a
VS
616 lockdep_assert_held(&dev_priv->pps_mutex);
617
9a42356b
VS
618 if (IS_VALLEYVIEW(dev) &&
619 intel_dp->pps_pipe == INVALID_PIPE)
620 return false;
621
bf13e81b 622 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
623}
624
4be73780 625static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 626{
30add22d 627 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
628 struct drm_i915_private *dev_priv = dev->dev_private;
629
e39b999a
VS
630 lockdep_assert_held(&dev_priv->pps_mutex);
631
9a42356b
VS
632 if (IS_VALLEYVIEW(dev) &&
633 intel_dp->pps_pipe == INVALID_PIPE)
634 return false;
635
773538e8 636 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
637}
638
9b984dae
KP
639static void
640intel_dp_check_edp(struct intel_dp *intel_dp)
641{
30add22d 642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 643 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 644
9b984dae
KP
645 if (!is_edp(intel_dp))
646 return;
453c5420 647
4be73780 648 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
649 WARN(1, "eDP powered off while attempting aux channel communication.\n");
650 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
651 I915_READ(_pp_stat_reg(intel_dp)),
652 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
653 }
654}
655
9ee32fea
DV
656static uint32_t
657intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658{
659 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
660 struct drm_device *dev = intel_dig_port->base.base.dev;
661 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 662 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
663 uint32_t status;
664 bool done;
665
ef04f00d 666#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 667 if (has_aux_irq)
b18ac466 668 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 669 msecs_to_jiffies_timeout(10));
9ee32fea
DV
670 else
671 done = wait_for_atomic(C, 10) == 0;
672 if (!done)
673 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674 has_aux_irq);
675#undef C
676
677 return status;
678}
679
ec5b01dd 680static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 681{
174edf1f
PZ
682 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
683 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 684
ec5b01dd
DL
685 /*
686 * The clock divider is based off the hrawclk, and would like to run at
687 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 688 */
ec5b01dd
DL
689 return index ? 0 : intel_hrawclk(dev) / 2;
690}
691
692static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693{
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
696
697 if (index)
698 return 0;
699
700 if (intel_dig_port->port == PORT_A) {
701 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 702 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 703 else
b84a1cf8 704 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
705 } else {
706 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
707 }
708}
709
710static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711{
712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713 struct drm_device *dev = intel_dig_port->base.base.dev;
714 struct drm_i915_private *dev_priv = dev->dev_private;
715
716 if (intel_dig_port->port == PORT_A) {
717 if (index)
718 return 0;
719 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
720 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
721 /* Workaround for non-ULT HSW */
bc86625a
CW
722 switch (index) {
723 case 0: return 63;
724 case 1: return 72;
725 default: return 0;
726 }
ec5b01dd 727 } else {
bc86625a 728 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 729 }
b84a1cf8
RV
730}
731
ec5b01dd
DL
732static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733{
734 return index ? 0 : 100;
735}
736
b6b5e383
DL
737static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738{
739 /*
740 * SKL doesn't need us to program the AUX clock divider (Hardware will
741 * derive the clock from CDCLK automatically). We still implement the
742 * get_aux_clock_divider vfunc to plug-in into the existing code.
743 */
744 return index ? 0 : 1;
745}
746
5ed12a19
DL
747static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
748 bool has_aux_irq,
749 int send_bytes,
750 uint32_t aux_clock_divider)
751{
752 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
753 struct drm_device *dev = intel_dig_port->base.base.dev;
754 uint32_t precharge, timeout;
755
756 if (IS_GEN6(dev))
757 precharge = 3;
758 else
759 precharge = 5;
760
761 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
762 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
763 else
764 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
765
766 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 767 DP_AUX_CH_CTL_DONE |
5ed12a19 768 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 769 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 770 timeout |
788d4433 771 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
772 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 774 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
775}
776
b9ca5fad
DL
777static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778 bool has_aux_irq,
779 int send_bytes,
780 uint32_t unused)
781{
782 return DP_AUX_CH_CTL_SEND_BUSY |
783 DP_AUX_CH_CTL_DONE |
784 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
785 DP_AUX_CH_CTL_TIME_OUT_ERROR |
786 DP_AUX_CH_CTL_TIME_OUT_1600us |
787 DP_AUX_CH_CTL_RECEIVE_ERROR |
788 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
789 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
790}
791
b84a1cf8
RV
792static int
793intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 794 const uint8_t *send, int send_bytes,
b84a1cf8
RV
795 uint8_t *recv, int recv_size)
796{
797 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
798 struct drm_device *dev = intel_dig_port->base.base.dev;
799 struct drm_i915_private *dev_priv = dev->dev_private;
800 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
801 uint32_t ch_data = ch_ctl + 4;
bc86625a 802 uint32_t aux_clock_divider;
b84a1cf8
RV
803 int i, ret, recv_bytes;
804 uint32_t status;
5ed12a19 805 int try, clock = 0;
4e6b788c 806 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
807 bool vdd;
808
773538e8 809 pps_lock(intel_dp);
e39b999a 810
72c3500a
VS
811 /*
812 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 * In such cases we want to leave VDD enabled and it's up to upper layers
814 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 * ourselves.
816 */
1e0560e0 817 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
818
819 /* dp aux is extremely sensitive to irq latency, hence request the
820 * lowest possible wakeup latency and so prevent the cpu from going into
821 * deep sleep states.
822 */
823 pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825 intel_dp_check_edp(intel_dp);
5eb08b69 826
c67a470b
PZ
827 intel_aux_display_runtime_get(dev_priv);
828
11bee43e
JB
829 /* Try to wait for any previous AUX channel activity */
830 for (try = 0; try < 3; try++) {
ef04f00d 831 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
832 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833 break;
834 msleep(1);
835 }
836
837 if (try == 3) {
838 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839 I915_READ(ch_ctl));
9ee32fea
DV
840 ret = -EBUSY;
841 goto out;
4f7f7b7e
CW
842 }
843
46a5ae9f
PZ
844 /* Only 5 data registers! */
845 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846 ret = -E2BIG;
847 goto out;
848 }
849
ec5b01dd 850 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
851 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852 has_aux_irq,
853 send_bytes,
854 aux_clock_divider);
5ed12a19 855
bc86625a
CW
856 /* Must try at least 3 times according to DP spec */
857 for (try = 0; try < 5; try++) {
858 /* Load the send data into the aux channel data registers */
859 for (i = 0; i < send_bytes; i += 4)
860 I915_WRITE(ch_data + i,
a4f1289e
RV
861 intel_dp_pack_aux(send + i,
862 send_bytes - i));
bc86625a
CW
863
864 /* Send the command and wait for it to complete */
5ed12a19 865 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
866
867 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868
869 /* Clear done status and any errors */
870 I915_WRITE(ch_ctl,
871 status |
872 DP_AUX_CH_CTL_DONE |
873 DP_AUX_CH_CTL_TIME_OUT_ERROR |
874 DP_AUX_CH_CTL_RECEIVE_ERROR);
875
876 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR))
878 continue;
879 if (status & DP_AUX_CH_CTL_DONE)
880 break;
881 }
4f7f7b7e 882 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
883 break;
884 }
885
a4fc5ed6 886 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 887 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
888 ret = -EBUSY;
889 goto out;
a4fc5ed6
KP
890 }
891
892 /* Check for timeout or receive error.
893 * Timeouts occur when the sink is not connected
894 */
a5b3da54 895 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 896 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
897 ret = -EIO;
898 goto out;
a5b3da54 899 }
1ae8c0a5
KP
900
901 /* Timeouts occur when the device isn't connected, so they're
902 * "normal" -- don't fill the kernel log with these */
a5b3da54 903 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 904 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
905 ret = -ETIMEDOUT;
906 goto out;
a4fc5ed6
KP
907 }
908
909 /* Unload any bytes sent back from the other side */
910 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
912 if (recv_bytes > recv_size)
913 recv_bytes = recv_size;
0206e353 914
4f7f7b7e 915 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
916 intel_dp_unpack_aux(I915_READ(ch_data + i),
917 recv + i, recv_bytes - i);
a4fc5ed6 918
9ee32fea
DV
919 ret = recv_bytes;
920out:
921 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 922 intel_aux_display_runtime_put(dev_priv);
9ee32fea 923
884f19e9
JN
924 if (vdd)
925 edp_panel_vdd_off(intel_dp, false);
926
773538e8 927 pps_unlock(intel_dp);
e39b999a 928
9ee32fea 929 return ret;
a4fc5ed6
KP
930}
931
a6c8aff0
JN
932#define BARE_ADDRESS_SIZE 3
933#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
934static ssize_t
935intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 936{
9d1a1031
JN
937 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
938 uint8_t txbuf[20], rxbuf[20];
939 size_t txsize, rxsize;
a4fc5ed6 940 int ret;
a4fc5ed6 941
9d1a1031
JN
942 txbuf[0] = msg->request << 4;
943 txbuf[1] = msg->address >> 8;
944 txbuf[2] = msg->address & 0xff;
945 txbuf[3] = msg->size - 1;
46a5ae9f 946
9d1a1031
JN
947 switch (msg->request & ~DP_AUX_I2C_MOT) {
948 case DP_AUX_NATIVE_WRITE:
949 case DP_AUX_I2C_WRITE:
a6c8aff0 950 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 951 rxsize = 1;
f51a44b9 952
9d1a1031
JN
953 if (WARN_ON(txsize > 20))
954 return -E2BIG;
a4fc5ed6 955
9d1a1031 956 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 957
9d1a1031
JN
958 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
959 if (ret > 0) {
960 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 961
9d1a1031
JN
962 /* Return payload size. */
963 ret = msg->size;
964 }
965 break;
46a5ae9f 966
9d1a1031
JN
967 case DP_AUX_NATIVE_READ:
968 case DP_AUX_I2C_READ:
a6c8aff0 969 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 970 rxsize = msg->size + 1;
a4fc5ed6 971
9d1a1031
JN
972 if (WARN_ON(rxsize > 20))
973 return -E2BIG;
a4fc5ed6 974
9d1a1031
JN
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
978 /*
979 * Assume happy day, and copy the data. The caller is
980 * expected to check msg->reply before touching it.
981 *
982 * Return payload size.
983 */
984 ret--;
985 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 986 }
9d1a1031
JN
987 break;
988
989 default:
990 ret = -EINVAL;
991 break;
a4fc5ed6 992 }
f51a44b9 993
9d1a1031 994 return ret;
a4fc5ed6
KP
995}
996
9d1a1031
JN
997static void
998intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
999{
1000 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1001 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1002 enum port port = intel_dig_port->port;
0b99836f 1003 const char *name = NULL;
ab2c0672
DA
1004 int ret;
1005
33ad6626
JN
1006 switch (port) {
1007 case PORT_A:
1008 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1009 name = "DPDDC-A";
ab2c0672 1010 break;
33ad6626
JN
1011 case PORT_B:
1012 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1013 name = "DPDDC-B";
ab2c0672 1014 break;
33ad6626
JN
1015 case PORT_C:
1016 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1017 name = "DPDDC-C";
ab2c0672 1018 break;
33ad6626
JN
1019 case PORT_D:
1020 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1021 name = "DPDDC-D";
33ad6626
JN
1022 break;
1023 default:
1024 BUG();
ab2c0672
DA
1025 }
1026
1b1aad75
DL
1027 /*
1028 * The AUX_CTL register is usually DP_CTL + 0x10.
1029 *
1030 * On Haswell and Broadwell though:
1031 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1032 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1033 *
1034 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1035 */
1036 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1037 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1038
0b99836f 1039 intel_dp->aux.name = name;
9d1a1031
JN
1040 intel_dp->aux.dev = dev->dev;
1041 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1042
0b99836f
JN
1043 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1044 connector->base.kdev->kobj.name);
8316f337 1045
4f71d0cb 1046 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1047 if (ret < 0) {
4f71d0cb 1048 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1049 name, ret);
1050 return;
ab2c0672 1051 }
8a5e6aeb 1052
0b99836f
JN
1053 ret = sysfs_create_link(&connector->base.kdev->kobj,
1054 &intel_dp->aux.ddc.dev.kobj,
1055 intel_dp->aux.ddc.dev.kobj.name);
1056 if (ret < 0) {
1057 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1058 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1059 }
a4fc5ed6
KP
1060}
1061
80f65de3
ID
1062static void
1063intel_dp_connector_unregister(struct intel_connector *intel_connector)
1064{
1065 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1066
0e32b39c
DA
1067 if (!intel_connector->mst_port)
1068 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1069 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1070 intel_connector_unregister(intel_connector);
1071}
1072
5416d871 1073static void
c3346ef6 1074skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1075{
1076 u32 ctrl1;
1077
1078 pipe_config->ddi_pll_sel = SKL_DPLL0;
1079 pipe_config->dpll_hw_state.cfgcr1 = 0;
1080 pipe_config->dpll_hw_state.cfgcr2 = 0;
1081
1082 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1083 switch (link_clock / 2) {
1084 case 81000:
5416d871
DL
1085 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1086 SKL_DPLL0);
1087 break;
c3346ef6 1088 case 135000:
5416d871
DL
1089 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1090 SKL_DPLL0);
1091 break;
c3346ef6 1092 case 270000:
5416d871
DL
1093 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1094 SKL_DPLL0);
1095 break;
c3346ef6
SJ
1096 case 162000:
1097 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1098 SKL_DPLL0);
1099 break;
1100 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1101 results in CDCLK change. Need to handle the change of CDCLK by
1102 disabling pipes and re-enabling them */
1103 case 108000:
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1105 SKL_DPLL0);
1106 break;
1107 case 216000:
1108 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1109 SKL_DPLL0);
1110 break;
1111
5416d871
DL
1112 }
1113 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1114}
1115
0e50338c 1116static void
5cec258b 1117hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1118{
1119 switch (link_bw) {
1120 case DP_LINK_BW_1_62:
1121 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1122 break;
1123 case DP_LINK_BW_2_7:
1124 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1125 break;
1126 case DP_LINK_BW_5_4:
1127 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1128 break;
1129 }
1130}
1131
fc0f8e25 1132static int
12f6a2e2 1133intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1134{
12f6a2e2
VS
1135 if (intel_dp->num_supported_rates) {
1136 *sink_rates = intel_dp->supported_rates;
ea2d8a42 1137 return intel_dp->num_supported_rates;
fc0f8e25 1138 }
12f6a2e2
VS
1139
1140 *sink_rates = default_rates;
1141
1142 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1143}
1144
a8f3ef61 1145static int
1db10e28 1146intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1147{
636280ba
VS
1148 if (INTEL_INFO(dev)->gen >= 9) {
1149 *source_rates = gen9_rates;
1150 return ARRAY_SIZE(gen9_rates);
a8f3ef61 1151 }
636280ba
VS
1152
1153 *source_rates = default_rates;
1154
1db10e28
VS
1155 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1156 /* WaDisableHBR2:skl */
1157 return (DP_LINK_BW_2_7 >> 3) + 1;
1158 else if (INTEL_INFO(dev)->gen >= 8 ||
1159 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1160 return (DP_LINK_BW_5_4 >> 3) + 1;
1161 else
1162 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1163}
1164
c6bb3538
DV
1165static void
1166intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1167 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1168{
1169 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1170 const struct dp_link_dpll *divisor = NULL;
1171 int i, count = 0;
c6bb3538
DV
1172
1173 if (IS_G4X(dev)) {
9dd4ffdf
CML
1174 divisor = gen4_dpll;
1175 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1176 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1177 divisor = pch_dpll;
1178 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1179 } else if (IS_CHERRYVIEW(dev)) {
1180 divisor = chv_dpll;
1181 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1182 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1183 divisor = vlv_dpll;
1184 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1185 }
9dd4ffdf
CML
1186
1187 if (divisor && count) {
1188 for (i = 0; i < count; i++) {
1189 if (link_bw == divisor[i].link_bw) {
1190 pipe_config->dpll = divisor[i].dpll;
1191 pipe_config->clock_set = true;
1192 break;
1193 }
1194 }
c6bb3538
DV
1195 }
1196}
1197
f4896f15
VS
1198static int intel_supported_rates(const int *source_rates, int source_len,
1199 const int *sink_rates, int sink_len,
1200 int *supported_rates)
a8f3ef61
SJ
1201{
1202 int i = 0, j = 0, k = 0;
1203
a8f3ef61
SJ
1204 while (i < source_len && j < sink_len) {
1205 if (source_rates[i] == sink_rates[j]) {
1206 supported_rates[k] = source_rates[i];
1207 ++k;
1208 ++i;
1209 ++j;
1210 } else if (source_rates[i] < sink_rates[j]) {
1211 ++i;
1212 } else {
1213 ++j;
1214 }
1215 }
1216 return k;
1217}
1218
f4896f15 1219static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1220{
1221 int i = 0;
1222
1223 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1224 if (find == rates[i])
1225 break;
1226
1227 return i;
1228}
1229
00c09d70 1230bool
5bfe2ac0 1231intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1232 struct intel_crtc_state *pipe_config)
a4fc5ed6 1233{
5bfe2ac0 1234 struct drm_device *dev = encoder->base.dev;
36008365 1235 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1236 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1237 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1238 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1239 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1240 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1241 int lane_count, clock;
56071a20 1242 int min_lane_count = 1;
eeb6324d 1243 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1244 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1245 int min_clock = 0;
a8f3ef61 1246 int max_clock;
083f9560 1247 int bpp, mode_rate;
ff9a6750 1248 int link_avail, link_clock;
12f6a2e2 1249 const int *sink_rates;
f4896f15 1250 int supported_rates[8] = {0};
636280ba 1251 const int *source_rates;
a8f3ef61
SJ
1252 int source_len, sink_len, supported_len;
1253
12f6a2e2 1254 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
a8f3ef61 1255
1db10e28 1256 source_len = intel_dp_source_rates(dev, &source_rates);
a8f3ef61
SJ
1257
1258 supported_len = intel_supported_rates(source_rates, source_len,
1259 sink_rates, sink_len, supported_rates);
1260
1261 /* No common link rates between source and sink */
1262 WARN_ON(supported_len <= 0);
1263
1264 max_clock = supported_len - 1;
a4fc5ed6 1265
bc7d38a4 1266 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1267 pipe_config->has_pch_encoder = true;
1268
03afc4a2 1269 pipe_config->has_dp_encoder = true;
f769cd24 1270 pipe_config->has_drrs = false;
9ed109a7 1271 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1272
dd06f90e
JN
1273 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1274 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1275 adjusted_mode);
2dd24552
JB
1276 if (!HAS_PCH_SPLIT(dev))
1277 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1278 intel_connector->panel.fitting_mode);
1279 else
b074cec8
JB
1280 intel_pch_panel_fitting(intel_crtc, pipe_config,
1281 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1282 }
1283
cb1793ce 1284 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1285 return false;
1286
083f9560 1287 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61
SJ
1288 "max bw %d pixel clock %iKHz\n",
1289 max_lane_count, supported_rates[max_clock],
241bfc38 1290 adjusted_mode->crtc_clock);
083f9560 1291
36008365
DV
1292 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1293 * bpc in between. */
3e7ca985 1294 bpp = pipe_config->pipe_bpp;
56071a20
JN
1295 if (is_edp(intel_dp)) {
1296 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1297 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1298 dev_priv->vbt.edp_bpp);
1299 bpp = dev_priv->vbt.edp_bpp;
1300 }
1301
344c5bbc
JN
1302 /*
1303 * Use the maximum clock and number of lanes the eDP panel
1304 * advertizes being capable of. The panels are generally
1305 * designed to support only a single clock and lane
1306 * configuration, and typically these values correspond to the
1307 * native resolution of the panel.
1308 */
1309 min_lane_count = max_lane_count;
1310 min_clock = max_clock;
7984211e 1311 }
657445fe 1312
36008365 1313 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1314 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1315 bpp);
36008365 1316
c6930992 1317 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1318 for (lane_count = min_lane_count;
1319 lane_count <= max_lane_count;
1320 lane_count <<= 1) {
1321
1322 link_clock = supported_rates[clock];
36008365
DV
1323 link_avail = intel_dp_max_data_rate(link_clock,
1324 lane_count);
1325
1326 if (mode_rate <= link_avail) {
1327 goto found;
1328 }
1329 }
1330 }
1331 }
c4867936 1332
36008365 1333 return false;
3685a8f3 1334
36008365 1335found:
55bc60db
VS
1336 if (intel_dp->color_range_auto) {
1337 /*
1338 * See:
1339 * CEA-861-E - 5.1 Default Encoding Parameters
1340 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1341 */
18316c8c 1342 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1343 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1344 else
1345 intel_dp->color_range = 0;
1346 }
1347
3685a8f3 1348 if (intel_dp->color_range)
50f3b016 1349 pipe_config->limited_color_range = true;
a4fc5ed6 1350
36008365 1351 intel_dp->lane_count = lane_count;
a8f3ef61
SJ
1352
1353 intel_dp->link_bw =
1354 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1355
1356 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1357 intel_dp->rate_select =
1358 rate_to_index(supported_rates[clock], sink_rates);
1359 intel_dp->link_bw = 0;
1360 }
1361
657445fe 1362 pipe_config->pipe_bpp = bpp;
a8f3ef61 1363 pipe_config->port_clock = supported_rates[clock];
a4fc5ed6 1364
36008365
DV
1365 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1366 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1367 pipe_config->port_clock, bpp);
36008365
DV
1368 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1369 mode_rate, link_avail);
a4fc5ed6 1370
03afc4a2 1371 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1372 adjusted_mode->crtc_clock,
1373 pipe_config->port_clock,
03afc4a2 1374 &pipe_config->dp_m_n);
9d1a455b 1375
439d7ac0 1376 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1377 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1378 pipe_config->has_drrs = true;
439d7ac0
PB
1379 intel_link_compute_m_n(bpp, lane_count,
1380 intel_connector->panel.downclock_mode->clock,
1381 pipe_config->port_clock,
1382 &pipe_config->dp_m2_n2);
1383 }
1384
5416d871 1385 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
c3346ef6 1386 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
5416d871 1387 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1388 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1389 else
1390 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1391
03afc4a2 1392 return true;
a4fc5ed6
KP
1393}
1394
7c62a164 1395static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1396{
7c62a164
DV
1397 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1398 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1399 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1400 struct drm_i915_private *dev_priv = dev->dev_private;
1401 u32 dpa_ctl;
1402
6e3c9717
ACO
1403 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1404 crtc->config->port_clock);
ea9b6006
DV
1405 dpa_ctl = I915_READ(DP_A);
1406 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1407
6e3c9717 1408 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1409 /* For a long time we've carried around a ILK-DevA w/a for the
1410 * 160MHz clock. If we're really unlucky, it's still required.
1411 */
1412 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1413 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1414 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1415 } else {
1416 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1417 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1418 }
1ce17038 1419
ea9b6006
DV
1420 I915_WRITE(DP_A, dpa_ctl);
1421
1422 POSTING_READ(DP_A);
1423 udelay(500);
1424}
1425
8ac33ed3 1426static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1427{
b934223d 1428 struct drm_device *dev = encoder->base.dev;
417e822d 1429 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1430 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1431 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1432 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1433 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1434
417e822d 1435 /*
1a2eb460 1436 * There are four kinds of DP registers:
417e822d
KP
1437 *
1438 * IBX PCH
1a2eb460
KP
1439 * SNB CPU
1440 * IVB CPU
417e822d
KP
1441 * CPT PCH
1442 *
1443 * IBX PCH and CPU are the same for almost everything,
1444 * except that the CPU DP PLL is configured in this
1445 * register
1446 *
1447 * CPT PCH is quite different, having many bits moved
1448 * to the TRANS_DP_CTL register instead. That
1449 * configuration happens (oddly) in ironlake_pch_enable
1450 */
9c9e7927 1451
417e822d
KP
1452 /* Preserve the BIOS-computed detected bit. This is
1453 * supposed to be read-only.
1454 */
1455 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1456
417e822d 1457 /* Handle DP bits in common between all three register formats */
417e822d 1458 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1459 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1460
6e3c9717 1461 if (crtc->config->has_audio)
ea5b213a 1462 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1463
417e822d 1464 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1465
bc7d38a4 1466 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1467 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1468 intel_dp->DP |= DP_SYNC_HS_HIGH;
1469 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1470 intel_dp->DP |= DP_SYNC_VS_HIGH;
1471 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1472
6aba5b6c 1473 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1474 intel_dp->DP |= DP_ENHANCED_FRAMING;
1475
7c62a164 1476 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1477 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1478 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1479 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1480
1481 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1482 intel_dp->DP |= DP_SYNC_HS_HIGH;
1483 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1484 intel_dp->DP |= DP_SYNC_VS_HIGH;
1485 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1486
6aba5b6c 1487 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1488 intel_dp->DP |= DP_ENHANCED_FRAMING;
1489
44f37d1f
CML
1490 if (!IS_CHERRYVIEW(dev)) {
1491 if (crtc->pipe == 1)
1492 intel_dp->DP |= DP_PIPEB_SELECT;
1493 } else {
1494 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1495 }
417e822d
KP
1496 } else {
1497 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1498 }
a4fc5ed6
KP
1499}
1500
ffd6749d
PZ
1501#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1502#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1503
1a5ef5b7
PZ
1504#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1505#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1506
ffd6749d
PZ
1507#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1508#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1509
4be73780 1510static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1511 u32 mask,
1512 u32 value)
bd943159 1513{
30add22d 1514 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1515 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1516 u32 pp_stat_reg, pp_ctrl_reg;
1517
e39b999a
VS
1518 lockdep_assert_held(&dev_priv->pps_mutex);
1519
bf13e81b
JN
1520 pp_stat_reg = _pp_stat_reg(intel_dp);
1521 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1522
99ea7127 1523 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1524 mask, value,
1525 I915_READ(pp_stat_reg),
1526 I915_READ(pp_ctrl_reg));
32ce697c 1527
453c5420 1528 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1529 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1530 I915_READ(pp_stat_reg),
1531 I915_READ(pp_ctrl_reg));
32ce697c 1532 }
54c136d4
CW
1533
1534 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1535}
32ce697c 1536
4be73780 1537static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1538{
1539 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1540 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1541}
1542
4be73780 1543static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1544{
1545 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1546 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1547}
1548
4be73780 1549static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1550{
1551 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1552
1553 /* When we disable the VDD override bit last we have to do the manual
1554 * wait. */
1555 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1556 intel_dp->panel_power_cycle_delay);
1557
4be73780 1558 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1559}
1560
4be73780 1561static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1562{
1563 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1564 intel_dp->backlight_on_delay);
1565}
1566
4be73780 1567static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1568{
1569 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1570 intel_dp->backlight_off_delay);
1571}
99ea7127 1572
832dd3c1
KP
1573/* Read the current pp_control value, unlocking the register if it
1574 * is locked
1575 */
1576
453c5420 1577static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1578{
453c5420
JB
1579 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1580 struct drm_i915_private *dev_priv = dev->dev_private;
1581 u32 control;
832dd3c1 1582
e39b999a
VS
1583 lockdep_assert_held(&dev_priv->pps_mutex);
1584
bf13e81b 1585 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1586 control &= ~PANEL_UNLOCK_MASK;
1587 control |= PANEL_UNLOCK_REGS;
1588 return control;
bd943159
KP
1589}
1590
951468f3
VS
1591/*
1592 * Must be paired with edp_panel_vdd_off().
1593 * Must hold pps_mutex around the whole on/off sequence.
1594 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1595 */
1e0560e0 1596static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1597{
30add22d 1598 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1599 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1600 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1601 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1602 enum intel_display_power_domain power_domain;
5d613501 1603 u32 pp;
453c5420 1604 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1605 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1606
e39b999a
VS
1607 lockdep_assert_held(&dev_priv->pps_mutex);
1608
97af61f5 1609 if (!is_edp(intel_dp))
adddaaf4 1610 return false;
bd943159 1611
2c623c11 1612 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1613 intel_dp->want_panel_vdd = true;
99ea7127 1614
4be73780 1615 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1616 return need_to_disable;
b0665d57 1617
4e6e1a54
ID
1618 power_domain = intel_display_port_power_domain(intel_encoder);
1619 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1620
3936fcf4
VS
1621 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1622 port_name(intel_dig_port->port));
bd943159 1623
4be73780
DV
1624 if (!edp_have_panel_power(intel_dp))
1625 wait_panel_power_cycle(intel_dp);
99ea7127 1626
453c5420 1627 pp = ironlake_get_pp_control(intel_dp);
5d613501 1628 pp |= EDP_FORCE_VDD;
ebf33b18 1629
bf13e81b
JN
1630 pp_stat_reg = _pp_stat_reg(intel_dp);
1631 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1632
1633 I915_WRITE(pp_ctrl_reg, pp);
1634 POSTING_READ(pp_ctrl_reg);
1635 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1636 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1637 /*
1638 * If the panel wasn't on, delay before accessing aux channel
1639 */
4be73780 1640 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1641 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1642 port_name(intel_dig_port->port));
f01eca2e 1643 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1644 }
adddaaf4
JN
1645
1646 return need_to_disable;
1647}
1648
951468f3
VS
1649/*
1650 * Must be paired with intel_edp_panel_vdd_off() or
1651 * intel_edp_panel_off().
1652 * Nested calls to these functions are not allowed since
1653 * we drop the lock. Caller must use some higher level
1654 * locking to prevent nested calls from other threads.
1655 */
b80d6c78 1656void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1657{
c695b6b6 1658 bool vdd;
adddaaf4 1659
c695b6b6
VS
1660 if (!is_edp(intel_dp))
1661 return;
1662
773538e8 1663 pps_lock(intel_dp);
c695b6b6 1664 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1665 pps_unlock(intel_dp);
c695b6b6 1666
e2c719b7 1667 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1668 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1669}
1670
4be73780 1671static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1672{
30add22d 1673 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1674 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1675 struct intel_digital_port *intel_dig_port =
1676 dp_to_dig_port(intel_dp);
1677 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1678 enum intel_display_power_domain power_domain;
5d613501 1679 u32 pp;
453c5420 1680 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1681
e39b999a 1682 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1683
15e899a0 1684 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1685
15e899a0 1686 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1687 return;
b0665d57 1688
3936fcf4
VS
1689 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1690 port_name(intel_dig_port->port));
bd943159 1691
be2c9196
VS
1692 pp = ironlake_get_pp_control(intel_dp);
1693 pp &= ~EDP_FORCE_VDD;
453c5420 1694
be2c9196
VS
1695 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1696 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1697
be2c9196
VS
1698 I915_WRITE(pp_ctrl_reg, pp);
1699 POSTING_READ(pp_ctrl_reg);
90791a5c 1700
be2c9196
VS
1701 /* Make sure sequencer is idle before allowing subsequent activity */
1702 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1703 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1704
be2c9196
VS
1705 if ((pp & POWER_TARGET_ON) == 0)
1706 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1707
be2c9196
VS
1708 power_domain = intel_display_port_power_domain(intel_encoder);
1709 intel_display_power_put(dev_priv, power_domain);
bd943159 1710}
5d613501 1711
4be73780 1712static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1713{
1714 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1715 struct intel_dp, panel_vdd_work);
bd943159 1716
773538e8 1717 pps_lock(intel_dp);
15e899a0
VS
1718 if (!intel_dp->want_panel_vdd)
1719 edp_panel_vdd_off_sync(intel_dp);
773538e8 1720 pps_unlock(intel_dp);
bd943159
KP
1721}
1722
aba86890
ID
1723static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1724{
1725 unsigned long delay;
1726
1727 /*
1728 * Queue the timer to fire a long time from now (relative to the power
1729 * down delay) to keep the panel power up across a sequence of
1730 * operations.
1731 */
1732 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1733 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1734}
1735
951468f3
VS
1736/*
1737 * Must be paired with edp_panel_vdd_on().
1738 * Must hold pps_mutex around the whole on/off sequence.
1739 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1740 */
4be73780 1741static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1742{
e39b999a
VS
1743 struct drm_i915_private *dev_priv =
1744 intel_dp_to_dev(intel_dp)->dev_private;
1745
1746 lockdep_assert_held(&dev_priv->pps_mutex);
1747
97af61f5
KP
1748 if (!is_edp(intel_dp))
1749 return;
5d613501 1750
e2c719b7 1751 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1752 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1753
bd943159
KP
1754 intel_dp->want_panel_vdd = false;
1755
aba86890 1756 if (sync)
4be73780 1757 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1758 else
1759 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1760}
1761
9f0fb5be 1762static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1763{
30add22d 1764 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1765 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1766 u32 pp;
453c5420 1767 u32 pp_ctrl_reg;
9934c132 1768
9f0fb5be
VS
1769 lockdep_assert_held(&dev_priv->pps_mutex);
1770
97af61f5 1771 if (!is_edp(intel_dp))
bd943159 1772 return;
99ea7127 1773
3936fcf4
VS
1774 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1775 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1776
e7a89ace
VS
1777 if (WARN(edp_have_panel_power(intel_dp),
1778 "eDP port %c panel power already on\n",
1779 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1780 return;
9934c132 1781
4be73780 1782 wait_panel_power_cycle(intel_dp);
37c6c9b0 1783
bf13e81b 1784 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1785 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1786 if (IS_GEN5(dev)) {
1787 /* ILK workaround: disable reset around power sequence */
1788 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1789 I915_WRITE(pp_ctrl_reg, pp);
1790 POSTING_READ(pp_ctrl_reg);
05ce1a49 1791 }
37c6c9b0 1792
1c0ae80a 1793 pp |= POWER_TARGET_ON;
99ea7127
KP
1794 if (!IS_GEN5(dev))
1795 pp |= PANEL_POWER_RESET;
1796
453c5420
JB
1797 I915_WRITE(pp_ctrl_reg, pp);
1798 POSTING_READ(pp_ctrl_reg);
9934c132 1799
4be73780 1800 wait_panel_on(intel_dp);
dce56b3c 1801 intel_dp->last_power_on = jiffies;
9934c132 1802
05ce1a49
KP
1803 if (IS_GEN5(dev)) {
1804 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1805 I915_WRITE(pp_ctrl_reg, pp);
1806 POSTING_READ(pp_ctrl_reg);
05ce1a49 1807 }
9f0fb5be 1808}
e39b999a 1809
9f0fb5be
VS
1810void intel_edp_panel_on(struct intel_dp *intel_dp)
1811{
1812 if (!is_edp(intel_dp))
1813 return;
1814
1815 pps_lock(intel_dp);
1816 edp_panel_on(intel_dp);
773538e8 1817 pps_unlock(intel_dp);
9934c132
JB
1818}
1819
9f0fb5be
VS
1820
1821static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1822{
4e6e1a54
ID
1823 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1824 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1825 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1826 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1827 enum intel_display_power_domain power_domain;
99ea7127 1828 u32 pp;
453c5420 1829 u32 pp_ctrl_reg;
9934c132 1830
9f0fb5be
VS
1831 lockdep_assert_held(&dev_priv->pps_mutex);
1832
97af61f5
KP
1833 if (!is_edp(intel_dp))
1834 return;
37c6c9b0 1835
3936fcf4
VS
1836 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1837 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1838
3936fcf4
VS
1839 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1840 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1841
453c5420 1842 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1843 /* We need to switch off panel power _and_ force vdd, for otherwise some
1844 * panels get very unhappy and cease to work. */
b3064154
PJ
1845 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1846 EDP_BLC_ENABLE);
453c5420 1847
bf13e81b 1848 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1849
849e39f5
PZ
1850 intel_dp->want_panel_vdd = false;
1851
453c5420
JB
1852 I915_WRITE(pp_ctrl_reg, pp);
1853 POSTING_READ(pp_ctrl_reg);
9934c132 1854
dce56b3c 1855 intel_dp->last_power_cycle = jiffies;
4be73780 1856 wait_panel_off(intel_dp);
849e39f5
PZ
1857
1858 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1859 power_domain = intel_display_port_power_domain(intel_encoder);
1860 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1861}
e39b999a 1862
9f0fb5be
VS
1863void intel_edp_panel_off(struct intel_dp *intel_dp)
1864{
1865 if (!is_edp(intel_dp))
1866 return;
e39b999a 1867
9f0fb5be
VS
1868 pps_lock(intel_dp);
1869 edp_panel_off(intel_dp);
773538e8 1870 pps_unlock(intel_dp);
9934c132
JB
1871}
1872
1250d107
JN
1873/* Enable backlight in the panel power control. */
1874static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1875{
da63a9f2
PZ
1876 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1877 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1878 struct drm_i915_private *dev_priv = dev->dev_private;
1879 u32 pp;
453c5420 1880 u32 pp_ctrl_reg;
32f9d658 1881
01cb9ea6
JB
1882 /*
1883 * If we enable the backlight right away following a panel power
1884 * on, we may see slight flicker as the panel syncs with the eDP
1885 * link. So delay a bit to make sure the image is solid before
1886 * allowing it to appear.
1887 */
4be73780 1888 wait_backlight_on(intel_dp);
e39b999a 1889
773538e8 1890 pps_lock(intel_dp);
e39b999a 1891
453c5420 1892 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1893 pp |= EDP_BLC_ENABLE;
453c5420 1894
bf13e81b 1895 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1896
1897 I915_WRITE(pp_ctrl_reg, pp);
1898 POSTING_READ(pp_ctrl_reg);
e39b999a 1899
773538e8 1900 pps_unlock(intel_dp);
32f9d658
ZW
1901}
1902
1250d107
JN
1903/* Enable backlight PWM and backlight PP control. */
1904void intel_edp_backlight_on(struct intel_dp *intel_dp)
1905{
1906 if (!is_edp(intel_dp))
1907 return;
1908
1909 DRM_DEBUG_KMS("\n");
1910
1911 intel_panel_enable_backlight(intel_dp->attached_connector);
1912 _intel_edp_backlight_on(intel_dp);
1913}
1914
1915/* Disable backlight in the panel power control. */
1916static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1917{
30add22d 1918 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1919 struct drm_i915_private *dev_priv = dev->dev_private;
1920 u32 pp;
453c5420 1921 u32 pp_ctrl_reg;
32f9d658 1922
f01eca2e
KP
1923 if (!is_edp(intel_dp))
1924 return;
1925
773538e8 1926 pps_lock(intel_dp);
e39b999a 1927
453c5420 1928 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1929 pp &= ~EDP_BLC_ENABLE;
453c5420 1930
bf13e81b 1931 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1932
1933 I915_WRITE(pp_ctrl_reg, pp);
1934 POSTING_READ(pp_ctrl_reg);
f7d2323c 1935
773538e8 1936 pps_unlock(intel_dp);
e39b999a
VS
1937
1938 intel_dp->last_backlight_off = jiffies;
f7d2323c 1939 edp_wait_backlight_off(intel_dp);
1250d107 1940}
f7d2323c 1941
1250d107
JN
1942/* Disable backlight PP control and backlight PWM. */
1943void intel_edp_backlight_off(struct intel_dp *intel_dp)
1944{
1945 if (!is_edp(intel_dp))
1946 return;
1947
1948 DRM_DEBUG_KMS("\n");
f7d2323c 1949
1250d107 1950 _intel_edp_backlight_off(intel_dp);
f7d2323c 1951 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 1952}
a4fc5ed6 1953
73580fb7
JN
1954/*
1955 * Hook for controlling the panel power control backlight through the bl_power
1956 * sysfs attribute. Take care to handle multiple calls.
1957 */
1958static void intel_edp_backlight_power(struct intel_connector *connector,
1959 bool enable)
1960{
1961 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
1962 bool is_enabled;
1963
773538e8 1964 pps_lock(intel_dp);
e39b999a 1965 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 1966 pps_unlock(intel_dp);
73580fb7
JN
1967
1968 if (is_enabled == enable)
1969 return;
1970
23ba9373
JN
1971 DRM_DEBUG_KMS("panel power control backlight %s\n",
1972 enable ? "enable" : "disable");
73580fb7
JN
1973
1974 if (enable)
1975 _intel_edp_backlight_on(intel_dp);
1976 else
1977 _intel_edp_backlight_off(intel_dp);
1978}
1979
2bd2ad64 1980static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 1981{
da63a9f2
PZ
1982 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1983 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1984 struct drm_device *dev = crtc->dev;
d240f20f
JB
1985 struct drm_i915_private *dev_priv = dev->dev_private;
1986 u32 dpa_ctl;
1987
2bd2ad64
DV
1988 assert_pipe_disabled(dev_priv,
1989 to_intel_crtc(crtc)->pipe);
1990
d240f20f
JB
1991 DRM_DEBUG_KMS("\n");
1992 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1993 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1994 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1995
1996 /* We don't adjust intel_dp->DP while tearing down the link, to
1997 * facilitate link retraining (e.g. after hotplug). Hence clear all
1998 * enable bits here to ensure that we don't enable too much. */
1999 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2000 intel_dp->DP |= DP_PLL_ENABLE;
2001 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2002 POSTING_READ(DP_A);
2003 udelay(200);
d240f20f
JB
2004}
2005
2bd2ad64 2006static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2007{
da63a9f2
PZ
2008 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2009 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2010 struct drm_device *dev = crtc->dev;
d240f20f
JB
2011 struct drm_i915_private *dev_priv = dev->dev_private;
2012 u32 dpa_ctl;
2013
2bd2ad64
DV
2014 assert_pipe_disabled(dev_priv,
2015 to_intel_crtc(crtc)->pipe);
2016
d240f20f 2017 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2018 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2019 "dp pll off, should be on\n");
2020 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2021
2022 /* We can't rely on the value tracked for the DP register in
2023 * intel_dp->DP because link_down must not change that (otherwise link
2024 * re-training will fail. */
298b0b39 2025 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2026 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2027 POSTING_READ(DP_A);
d240f20f
JB
2028 udelay(200);
2029}
2030
c7ad3810 2031/* If the sink supports it, try to set the power state appropriately */
c19b0669 2032void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2033{
2034 int ret, i;
2035
2036 /* Should have a valid DPCD by this point */
2037 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2038 return;
2039
2040 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2041 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2042 DP_SET_POWER_D3);
c7ad3810
JB
2043 } else {
2044 /*
2045 * When turning on, we need to retry for 1ms to give the sink
2046 * time to wake up.
2047 */
2048 for (i = 0; i < 3; i++) {
9d1a1031
JN
2049 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2050 DP_SET_POWER_D0);
c7ad3810
JB
2051 if (ret == 1)
2052 break;
2053 msleep(1);
2054 }
2055 }
f9cac721
JN
2056
2057 if (ret != 1)
2058 DRM_DEBUG_KMS("failed to %s sink power state\n",
2059 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2060}
2061
19d8fe15
DV
2062static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2063 enum pipe *pipe)
d240f20f 2064{
19d8fe15 2065 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2066 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2067 struct drm_device *dev = encoder->base.dev;
2068 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2069 enum intel_display_power_domain power_domain;
2070 u32 tmp;
2071
2072 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2073 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2074 return false;
2075
2076 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2077
2078 if (!(tmp & DP_PORT_EN))
2079 return false;
2080
bc7d38a4 2081 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2082 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2083 } else if (IS_CHERRYVIEW(dev)) {
2084 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2085 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2086 *pipe = PORT_TO_PIPE(tmp);
2087 } else {
2088 u32 trans_sel;
2089 u32 trans_dp;
2090 int i;
2091
2092 switch (intel_dp->output_reg) {
2093 case PCH_DP_B:
2094 trans_sel = TRANS_DP_PORT_SEL_B;
2095 break;
2096 case PCH_DP_C:
2097 trans_sel = TRANS_DP_PORT_SEL_C;
2098 break;
2099 case PCH_DP_D:
2100 trans_sel = TRANS_DP_PORT_SEL_D;
2101 break;
2102 default:
2103 return true;
2104 }
2105
055e393f 2106 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2107 trans_dp = I915_READ(TRANS_DP_CTL(i));
2108 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2109 *pipe = i;
2110 return true;
2111 }
2112 }
19d8fe15 2113
4a0833ec
DV
2114 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2115 intel_dp->output_reg);
2116 }
d240f20f 2117
19d8fe15
DV
2118 return true;
2119}
d240f20f 2120
045ac3b5 2121static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2122 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2123{
2124 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2125 u32 tmp, flags = 0;
63000ef6
XZ
2126 struct drm_device *dev = encoder->base.dev;
2127 struct drm_i915_private *dev_priv = dev->dev_private;
2128 enum port port = dp_to_dig_port(intel_dp)->port;
2129 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2130 int dotclock;
045ac3b5 2131
9ed109a7
DV
2132 tmp = I915_READ(intel_dp->output_reg);
2133 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2134 pipe_config->has_audio = true;
2135
63000ef6 2136 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2137 if (tmp & DP_SYNC_HS_HIGH)
2138 flags |= DRM_MODE_FLAG_PHSYNC;
2139 else
2140 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2141
63000ef6
XZ
2142 if (tmp & DP_SYNC_VS_HIGH)
2143 flags |= DRM_MODE_FLAG_PVSYNC;
2144 else
2145 flags |= DRM_MODE_FLAG_NVSYNC;
2146 } else {
2147 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2148 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2149 flags |= DRM_MODE_FLAG_PHSYNC;
2150 else
2151 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2152
63000ef6
XZ
2153 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2154 flags |= DRM_MODE_FLAG_PVSYNC;
2155 else
2156 flags |= DRM_MODE_FLAG_NVSYNC;
2157 }
045ac3b5 2158
2d112de7 2159 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2160
8c875fca
VS
2161 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2162 tmp & DP_COLOR_RANGE_16_235)
2163 pipe_config->limited_color_range = true;
2164
eb14cb74
VS
2165 pipe_config->has_dp_encoder = true;
2166
2167 intel_dp_get_m_n(crtc, pipe_config);
2168
18442d08 2169 if (port == PORT_A) {
f1f644dc
JB
2170 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2171 pipe_config->port_clock = 162000;
2172 else
2173 pipe_config->port_clock = 270000;
2174 }
18442d08
VS
2175
2176 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2177 &pipe_config->dp_m_n);
2178
2179 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2180 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2181
2d112de7 2182 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2183
c6cd2ee2
JN
2184 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2185 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2186 /*
2187 * This is a big fat ugly hack.
2188 *
2189 * Some machines in UEFI boot mode provide us a VBT that has 18
2190 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2191 * unknown we fail to light up. Yet the same BIOS boots up with
2192 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2193 * max, not what it tells us to use.
2194 *
2195 * Note: This will still be broken if the eDP panel is not lit
2196 * up by the BIOS, and thus we can't get the mode at module
2197 * load.
2198 */
2199 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2200 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2201 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2202 }
045ac3b5
JB
2203}
2204
e8cb4558 2205static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2206{
e8cb4558 2207 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2208 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2209 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2210
6e3c9717 2211 if (crtc->config->has_audio)
495a5bb8 2212 intel_audio_codec_disable(encoder);
6cb49835 2213
b32c6f48
RV
2214 if (HAS_PSR(dev) && !HAS_DDI(dev))
2215 intel_psr_disable(intel_dp);
2216
6cb49835
DV
2217 /* Make sure the panel is off before trying to change the mode. But also
2218 * ensure that we have vdd while we switch off the panel. */
24f3e092 2219 intel_edp_panel_vdd_on(intel_dp);
4be73780 2220 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2221 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2222 intel_edp_panel_off(intel_dp);
3739850b 2223
08aff3fe
VS
2224 /* disable the port before the pipe on g4x */
2225 if (INTEL_INFO(dev)->gen < 5)
3739850b 2226 intel_dp_link_down(intel_dp);
d240f20f
JB
2227}
2228
08aff3fe 2229static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2230{
2bd2ad64 2231 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2232 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2233
49277c31 2234 intel_dp_link_down(intel_dp);
08aff3fe
VS
2235 if (port == PORT_A)
2236 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2237}
2238
2239static void vlv_post_disable_dp(struct intel_encoder *encoder)
2240{
2241 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2242
2243 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2244}
2245
580d3811
VS
2246static void chv_post_disable_dp(struct intel_encoder *encoder)
2247{
2248 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2249 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2250 struct drm_device *dev = encoder->base.dev;
2251 struct drm_i915_private *dev_priv = dev->dev_private;
2252 struct intel_crtc *intel_crtc =
2253 to_intel_crtc(encoder->base.crtc);
2254 enum dpio_channel ch = vlv_dport_to_channel(dport);
2255 enum pipe pipe = intel_crtc->pipe;
2256 u32 val;
2257
2258 intel_dp_link_down(intel_dp);
2259
2260 mutex_lock(&dev_priv->dpio_lock);
2261
2262 /* Propagate soft reset to data lane reset */
97fd4d5c 2263 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2264 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2265 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2266
97fd4d5c
VS
2267 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2268 val |= CHV_PCS_REQ_SOFTRESET_EN;
2269 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2270
2271 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2272 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2273 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2274
2275 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2276 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2277 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2278
2279 mutex_unlock(&dev_priv->dpio_lock);
2280}
2281
7b13b58a
VS
2282static void
2283_intel_dp_set_link_train(struct intel_dp *intel_dp,
2284 uint32_t *DP,
2285 uint8_t dp_train_pat)
2286{
2287 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2288 struct drm_device *dev = intel_dig_port->base.base.dev;
2289 struct drm_i915_private *dev_priv = dev->dev_private;
2290 enum port port = intel_dig_port->port;
2291
2292 if (HAS_DDI(dev)) {
2293 uint32_t temp = I915_READ(DP_TP_CTL(port));
2294
2295 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2296 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2297 else
2298 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2299
2300 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2301 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2302 case DP_TRAINING_PATTERN_DISABLE:
2303 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2304
2305 break;
2306 case DP_TRAINING_PATTERN_1:
2307 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2308 break;
2309 case DP_TRAINING_PATTERN_2:
2310 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2311 break;
2312 case DP_TRAINING_PATTERN_3:
2313 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2314 break;
2315 }
2316 I915_WRITE(DP_TP_CTL(port), temp);
2317
2318 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2319 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2320
2321 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2322 case DP_TRAINING_PATTERN_DISABLE:
2323 *DP |= DP_LINK_TRAIN_OFF_CPT;
2324 break;
2325 case DP_TRAINING_PATTERN_1:
2326 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2327 break;
2328 case DP_TRAINING_PATTERN_2:
2329 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2330 break;
2331 case DP_TRAINING_PATTERN_3:
2332 DRM_ERROR("DP training pattern 3 not supported\n");
2333 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2334 break;
2335 }
2336
2337 } else {
2338 if (IS_CHERRYVIEW(dev))
2339 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2340 else
2341 *DP &= ~DP_LINK_TRAIN_MASK;
2342
2343 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2344 case DP_TRAINING_PATTERN_DISABLE:
2345 *DP |= DP_LINK_TRAIN_OFF;
2346 break;
2347 case DP_TRAINING_PATTERN_1:
2348 *DP |= DP_LINK_TRAIN_PAT_1;
2349 break;
2350 case DP_TRAINING_PATTERN_2:
2351 *DP |= DP_LINK_TRAIN_PAT_2;
2352 break;
2353 case DP_TRAINING_PATTERN_3:
2354 if (IS_CHERRYVIEW(dev)) {
2355 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2356 } else {
2357 DRM_ERROR("DP training pattern 3 not supported\n");
2358 *DP |= DP_LINK_TRAIN_PAT_2;
2359 }
2360 break;
2361 }
2362 }
2363}
2364
2365static void intel_dp_enable_port(struct intel_dp *intel_dp)
2366{
2367 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2368 struct drm_i915_private *dev_priv = dev->dev_private;
2369
7b13b58a
VS
2370 /* enable with pattern 1 (as per spec) */
2371 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2372 DP_TRAINING_PATTERN_1);
2373
2374 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2375 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2376
2377 /*
2378 * Magic for VLV/CHV. We _must_ first set up the register
2379 * without actually enabling the port, and then do another
2380 * write to enable the port. Otherwise link training will
2381 * fail when the power sequencer is freshly used for this port.
2382 */
2383 intel_dp->DP |= DP_PORT_EN;
2384
2385 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2386 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2387}
2388
e8cb4558 2389static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2390{
e8cb4558
DV
2391 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2392 struct drm_device *dev = encoder->base.dev;
2393 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2394 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2395 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2396
0c33d8d7
DV
2397 if (WARN_ON(dp_reg & DP_PORT_EN))
2398 return;
5d613501 2399
093e3f13
VS
2400 pps_lock(intel_dp);
2401
2402 if (IS_VALLEYVIEW(dev))
2403 vlv_init_panel_power_sequencer(intel_dp);
2404
7b13b58a 2405 intel_dp_enable_port(intel_dp);
093e3f13
VS
2406
2407 edp_panel_vdd_on(intel_dp);
2408 edp_panel_on(intel_dp);
2409 edp_panel_vdd_off(intel_dp, true);
2410
2411 pps_unlock(intel_dp);
2412
61234fa5
VS
2413 if (IS_VALLEYVIEW(dev))
2414 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2415
f01eca2e 2416 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2417 intel_dp_start_link_train(intel_dp);
33a34e4e 2418 intel_dp_complete_link_train(intel_dp);
3ab9c637 2419 intel_dp_stop_link_train(intel_dp);
c1dec79a 2420
6e3c9717 2421 if (crtc->config->has_audio) {
c1dec79a
JN
2422 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2423 pipe_name(crtc->pipe));
2424 intel_audio_codec_enable(encoder);
2425 }
ab1f90f9 2426}
89b667f8 2427
ecff4f3b
JN
2428static void g4x_enable_dp(struct intel_encoder *encoder)
2429{
828f5c6e
JN
2430 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2431
ecff4f3b 2432 intel_enable_dp(encoder);
4be73780 2433 intel_edp_backlight_on(intel_dp);
ab1f90f9 2434}
89b667f8 2435
ab1f90f9
JN
2436static void vlv_enable_dp(struct intel_encoder *encoder)
2437{
828f5c6e
JN
2438 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2439
4be73780 2440 intel_edp_backlight_on(intel_dp);
b32c6f48 2441 intel_psr_enable(intel_dp);
d240f20f
JB
2442}
2443
ecff4f3b 2444static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2445{
2446 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2447 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2448
8ac33ed3
DV
2449 intel_dp_prepare(encoder);
2450
d41f1efb
DV
2451 /* Only ilk+ has port A */
2452 if (dport->port == PORT_A) {
2453 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2454 ironlake_edp_pll_on(intel_dp);
d41f1efb 2455 }
ab1f90f9
JN
2456}
2457
83b84597
VS
2458static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2459{
2460 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2461 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2462 enum pipe pipe = intel_dp->pps_pipe;
2463 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2464
2465 edp_panel_vdd_off_sync(intel_dp);
2466
2467 /*
2468 * VLV seems to get confused when multiple power seqeuencers
2469 * have the same port selected (even if only one has power/vdd
2470 * enabled). The failure manifests as vlv_wait_port_ready() failing
2471 * CHV on the other hand doesn't seem to mind having the same port
2472 * selected in multiple power seqeuencers, but let's clear the
2473 * port select always when logically disconnecting a power sequencer
2474 * from a port.
2475 */
2476 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2477 pipe_name(pipe), port_name(intel_dig_port->port));
2478 I915_WRITE(pp_on_reg, 0);
2479 POSTING_READ(pp_on_reg);
2480
2481 intel_dp->pps_pipe = INVALID_PIPE;
2482}
2483
a4a5d2f8
VS
2484static void vlv_steal_power_sequencer(struct drm_device *dev,
2485 enum pipe pipe)
2486{
2487 struct drm_i915_private *dev_priv = dev->dev_private;
2488 struct intel_encoder *encoder;
2489
2490 lockdep_assert_held(&dev_priv->pps_mutex);
2491
ac3c12e4
VS
2492 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2493 return;
2494
a4a5d2f8
VS
2495 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2496 base.head) {
2497 struct intel_dp *intel_dp;
773538e8 2498 enum port port;
a4a5d2f8
VS
2499
2500 if (encoder->type != INTEL_OUTPUT_EDP)
2501 continue;
2502
2503 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2504 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2505
2506 if (intel_dp->pps_pipe != pipe)
2507 continue;
2508
2509 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2510 pipe_name(pipe), port_name(port));
a4a5d2f8 2511
034e43c6
VS
2512 WARN(encoder->connectors_active,
2513 "stealing pipe %c power sequencer from active eDP port %c\n",
2514 pipe_name(pipe), port_name(port));
a4a5d2f8 2515
a4a5d2f8 2516 /* make sure vdd is off before we steal it */
83b84597 2517 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2518 }
2519}
2520
2521static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2522{
2523 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2524 struct intel_encoder *encoder = &intel_dig_port->base;
2525 struct drm_device *dev = encoder->base.dev;
2526 struct drm_i915_private *dev_priv = dev->dev_private;
2527 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2528
2529 lockdep_assert_held(&dev_priv->pps_mutex);
2530
093e3f13
VS
2531 if (!is_edp(intel_dp))
2532 return;
2533
a4a5d2f8
VS
2534 if (intel_dp->pps_pipe == crtc->pipe)
2535 return;
2536
2537 /*
2538 * If another power sequencer was being used on this
2539 * port previously make sure to turn off vdd there while
2540 * we still have control of it.
2541 */
2542 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2543 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2544
2545 /*
2546 * We may be stealing the power
2547 * sequencer from another port.
2548 */
2549 vlv_steal_power_sequencer(dev, crtc->pipe);
2550
2551 /* now it's all ours */
2552 intel_dp->pps_pipe = crtc->pipe;
2553
2554 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2555 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2556
2557 /* init power sequencer on this pipe and port */
36b5f425
VS
2558 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2559 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2560}
2561
ab1f90f9 2562static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2563{
2bd2ad64 2564 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2565 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2566 struct drm_device *dev = encoder->base.dev;
89b667f8 2567 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2568 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2569 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2570 int pipe = intel_crtc->pipe;
2571 u32 val;
a4fc5ed6 2572
ab1f90f9 2573 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2574
ab3c759a 2575 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2576 val = 0;
2577 if (pipe)
2578 val |= (1<<21);
2579 else
2580 val &= ~(1<<21);
2581 val |= 0x001000c4;
ab3c759a
CML
2582 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2583 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2584 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2585
ab1f90f9
JN
2586 mutex_unlock(&dev_priv->dpio_lock);
2587
2588 intel_enable_dp(encoder);
89b667f8
JB
2589}
2590
ecff4f3b 2591static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2592{
2593 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2594 struct drm_device *dev = encoder->base.dev;
2595 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2596 struct intel_crtc *intel_crtc =
2597 to_intel_crtc(encoder->base.crtc);
e4607fcf 2598 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2599 int pipe = intel_crtc->pipe;
89b667f8 2600
8ac33ed3
DV
2601 intel_dp_prepare(encoder);
2602
89b667f8 2603 /* Program Tx lane resets to default */
0980a60f 2604 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2605 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2606 DPIO_PCS_TX_LANE2_RESET |
2607 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2608 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2609 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2610 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2611 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2612 DPIO_PCS_CLK_SOFT_RESET);
2613
2614 /* Fix up inter-pair skew failure */
ab3c759a
CML
2615 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2616 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2617 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2618 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2619}
2620
e4a1d846
CML
2621static void chv_pre_enable_dp(struct intel_encoder *encoder)
2622{
2623 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2624 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2625 struct drm_device *dev = encoder->base.dev;
2626 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2627 struct intel_crtc *intel_crtc =
2628 to_intel_crtc(encoder->base.crtc);
2629 enum dpio_channel ch = vlv_dport_to_channel(dport);
2630 int pipe = intel_crtc->pipe;
2631 int data, i;
949c1d43 2632 u32 val;
e4a1d846 2633
e4a1d846 2634 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2635
570e2a74
VS
2636 /* allow hardware to manage TX FIFO reset source */
2637 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2638 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2639 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2640
2641 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2642 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2643 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2644
949c1d43 2645 /* Deassert soft data lane reset*/
97fd4d5c 2646 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2647 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2648 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2649
2650 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2651 val |= CHV_PCS_REQ_SOFTRESET_EN;
2652 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2653
2654 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2655 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2656 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2657
97fd4d5c 2658 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2659 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2660 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2661
2662 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2663 for (i = 0; i < 4; i++) {
2664 /* Set the latency optimal bit */
2665 data = (i == 1) ? 0x0 : 0x6;
2666 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2667 data << DPIO_FRC_LATENCY_SHFIT);
2668
2669 /* Set the upar bit */
2670 data = (i == 1) ? 0x0 : 0x1;
2671 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2672 data << DPIO_UPAR_SHIFT);
2673 }
2674
2675 /* Data lane stagger programming */
2676 /* FIXME: Fix up value only after power analysis */
2677
2678 mutex_unlock(&dev_priv->dpio_lock);
2679
e4a1d846 2680 intel_enable_dp(encoder);
e4a1d846
CML
2681}
2682
9197c88b
VS
2683static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2684{
2685 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2686 struct drm_device *dev = encoder->base.dev;
2687 struct drm_i915_private *dev_priv = dev->dev_private;
2688 struct intel_crtc *intel_crtc =
2689 to_intel_crtc(encoder->base.crtc);
2690 enum dpio_channel ch = vlv_dport_to_channel(dport);
2691 enum pipe pipe = intel_crtc->pipe;
2692 u32 val;
2693
625695f8
VS
2694 intel_dp_prepare(encoder);
2695
9197c88b
VS
2696 mutex_lock(&dev_priv->dpio_lock);
2697
b9e5ac3c
VS
2698 /* program left/right clock distribution */
2699 if (pipe != PIPE_B) {
2700 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2701 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2702 if (ch == DPIO_CH0)
2703 val |= CHV_BUFLEFTENA1_FORCE;
2704 if (ch == DPIO_CH1)
2705 val |= CHV_BUFRIGHTENA1_FORCE;
2706 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2707 } else {
2708 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2709 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2710 if (ch == DPIO_CH0)
2711 val |= CHV_BUFLEFTENA2_FORCE;
2712 if (ch == DPIO_CH1)
2713 val |= CHV_BUFRIGHTENA2_FORCE;
2714 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2715 }
2716
9197c88b
VS
2717 /* program clock channel usage */
2718 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2719 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2720 if (pipe != PIPE_B)
2721 val &= ~CHV_PCS_USEDCLKCHANNEL;
2722 else
2723 val |= CHV_PCS_USEDCLKCHANNEL;
2724 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2725
2726 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2727 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2728 if (pipe != PIPE_B)
2729 val &= ~CHV_PCS_USEDCLKCHANNEL;
2730 else
2731 val |= CHV_PCS_USEDCLKCHANNEL;
2732 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2733
2734 /*
2735 * This a a bit weird since generally CL
2736 * matches the pipe, but here we need to
2737 * pick the CL based on the port.
2738 */
2739 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2740 if (pipe != PIPE_B)
2741 val &= ~CHV_CMN_USEDCLKCHANNEL;
2742 else
2743 val |= CHV_CMN_USEDCLKCHANNEL;
2744 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2745
2746 mutex_unlock(&dev_priv->dpio_lock);
2747}
2748
a4fc5ed6 2749/*
df0c237d
JB
2750 * Native read with retry for link status and receiver capability reads for
2751 * cases where the sink may still be asleep.
9d1a1031
JN
2752 *
2753 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2754 * supposed to retry 3 times per the spec.
a4fc5ed6 2755 */
9d1a1031
JN
2756static ssize_t
2757intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2758 void *buffer, size_t size)
a4fc5ed6 2759{
9d1a1031
JN
2760 ssize_t ret;
2761 int i;
61da5fab 2762
f6a19066
VS
2763 /*
2764 * Sometime we just get the same incorrect byte repeated
2765 * over the entire buffer. Doing just one throw away read
2766 * initially seems to "solve" it.
2767 */
2768 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2769
61da5fab 2770 for (i = 0; i < 3; i++) {
9d1a1031
JN
2771 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2772 if (ret == size)
2773 return ret;
61da5fab
JB
2774 msleep(1);
2775 }
a4fc5ed6 2776
9d1a1031 2777 return ret;
a4fc5ed6
KP
2778}
2779
2780/*
2781 * Fetch AUX CH registers 0x202 - 0x207 which contain
2782 * link status information
2783 */
2784static bool
93f62dad 2785intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2786{
9d1a1031
JN
2787 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2788 DP_LANE0_1_STATUS,
2789 link_status,
2790 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2791}
2792
1100244e 2793/* These are source-specific values. */
a4fc5ed6 2794static uint8_t
1a2eb460 2795intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2796{
30add22d 2797 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2798 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2799 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2800
7ad14a29
SJ
2801 if (INTEL_INFO(dev)->gen >= 9) {
2802 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2803 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2804 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2805 } else if (IS_VALLEYVIEW(dev))
bd60018a 2806 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2807 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2808 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2809 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2810 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2811 else
bd60018a 2812 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2813}
2814
2815static uint8_t
2816intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2817{
30add22d 2818 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2819 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2820
5a9d1f1a
DL
2821 if (INTEL_INFO(dev)->gen >= 9) {
2822 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2823 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2824 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2825 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2826 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2827 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2828 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2829 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2830 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2831 default:
2832 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2833 }
2834 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2835 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2836 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2837 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2838 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2839 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2840 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2841 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2842 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2843 default:
bd60018a 2844 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2845 }
e2fa6fba
P
2846 } else if (IS_VALLEYVIEW(dev)) {
2847 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2848 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2849 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2850 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2851 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2853 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2855 default:
bd60018a 2856 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2857 }
bc7d38a4 2858 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2859 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2860 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2861 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2864 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2865 default:
bd60018a 2866 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2867 }
2868 } else {
2869 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2870 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2871 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2872 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2873 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2874 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2875 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2876 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2877 default:
bd60018a 2878 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2879 }
a4fc5ed6
KP
2880 }
2881}
2882
e2fa6fba
P
2883static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2884{
2885 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2886 struct drm_i915_private *dev_priv = dev->dev_private;
2887 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2888 struct intel_crtc *intel_crtc =
2889 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2890 unsigned long demph_reg_value, preemph_reg_value,
2891 uniqtranscale_reg_value;
2892 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2893 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2894 int pipe = intel_crtc->pipe;
e2fa6fba
P
2895
2896 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2897 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2898 preemph_reg_value = 0x0004000;
2899 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2900 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2901 demph_reg_value = 0x2B405555;
2902 uniqtranscale_reg_value = 0x552AB83A;
2903 break;
bd60018a 2904 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2905 demph_reg_value = 0x2B404040;
2906 uniqtranscale_reg_value = 0x5548B83A;
2907 break;
bd60018a 2908 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2909 demph_reg_value = 0x2B245555;
2910 uniqtranscale_reg_value = 0x5560B83A;
2911 break;
bd60018a 2912 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2913 demph_reg_value = 0x2B405555;
2914 uniqtranscale_reg_value = 0x5598DA3A;
2915 break;
2916 default:
2917 return 0;
2918 }
2919 break;
bd60018a 2920 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2921 preemph_reg_value = 0x0002000;
2922 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2924 demph_reg_value = 0x2B404040;
2925 uniqtranscale_reg_value = 0x5552B83A;
2926 break;
bd60018a 2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2928 demph_reg_value = 0x2B404848;
2929 uniqtranscale_reg_value = 0x5580B83A;
2930 break;
bd60018a 2931 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2932 demph_reg_value = 0x2B404040;
2933 uniqtranscale_reg_value = 0x55ADDA3A;
2934 break;
2935 default:
2936 return 0;
2937 }
2938 break;
bd60018a 2939 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
2940 preemph_reg_value = 0x0000000;
2941 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2942 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2943 demph_reg_value = 0x2B305555;
2944 uniqtranscale_reg_value = 0x5570B83A;
2945 break;
bd60018a 2946 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2947 demph_reg_value = 0x2B2B4040;
2948 uniqtranscale_reg_value = 0x55ADDA3A;
2949 break;
2950 default:
2951 return 0;
2952 }
2953 break;
bd60018a 2954 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
2955 preemph_reg_value = 0x0006000;
2956 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2958 demph_reg_value = 0x1B405555;
2959 uniqtranscale_reg_value = 0x55ADDA3A;
2960 break;
2961 default:
2962 return 0;
2963 }
2964 break;
2965 default:
2966 return 0;
2967 }
2968
0980a60f 2969 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
2970 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2971 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2972 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 2973 uniqtranscale_reg_value);
ab3c759a
CML
2974 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2975 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2976 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2977 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 2978 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
2979
2980 return 0;
2981}
2982
e4a1d846
CML
2983static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2984{
2985 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2986 struct drm_i915_private *dev_priv = dev->dev_private;
2987 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2988 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 2989 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
2990 uint8_t train_set = intel_dp->train_set[0];
2991 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
2992 enum pipe pipe = intel_crtc->pipe;
2993 int i;
e4a1d846
CML
2994
2995 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2996 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 2997 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2998 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
2999 deemph_reg_value = 128;
3000 margin_reg_value = 52;
3001 break;
bd60018a 3002 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3003 deemph_reg_value = 128;
3004 margin_reg_value = 77;
3005 break;
bd60018a 3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3007 deemph_reg_value = 128;
3008 margin_reg_value = 102;
3009 break;
bd60018a 3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3011 deemph_reg_value = 128;
3012 margin_reg_value = 154;
3013 /* FIXME extra to set for 1200 */
3014 break;
3015 default:
3016 return 0;
3017 }
3018 break;
bd60018a 3019 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3020 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3021 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3022 deemph_reg_value = 85;
3023 margin_reg_value = 78;
3024 break;
bd60018a 3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3026 deemph_reg_value = 85;
3027 margin_reg_value = 116;
3028 break;
bd60018a 3029 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3030 deemph_reg_value = 85;
3031 margin_reg_value = 154;
3032 break;
3033 default:
3034 return 0;
3035 }
3036 break;
bd60018a 3037 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3038 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3040 deemph_reg_value = 64;
3041 margin_reg_value = 104;
3042 break;
bd60018a 3043 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3044 deemph_reg_value = 64;
3045 margin_reg_value = 154;
3046 break;
3047 default:
3048 return 0;
3049 }
3050 break;
bd60018a 3051 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3052 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3053 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3054 deemph_reg_value = 43;
3055 margin_reg_value = 154;
3056 break;
3057 default:
3058 return 0;
3059 }
3060 break;
3061 default:
3062 return 0;
3063 }
3064
3065 mutex_lock(&dev_priv->dpio_lock);
3066
3067 /* Clear calc init */
1966e59e
VS
3068 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3069 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3070 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3071 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3072 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3073
3074 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3075 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3076 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3077 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3078 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3079
a02ef3c7
VS
3080 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3081 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3082 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3083 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3084
3085 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3086 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3087 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3088 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3089
e4a1d846 3090 /* Program swing deemph */
f72df8db
VS
3091 for (i = 0; i < 4; i++) {
3092 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3093 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3094 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3095 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3096 }
e4a1d846
CML
3097
3098 /* Program swing margin */
f72df8db
VS
3099 for (i = 0; i < 4; i++) {
3100 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3101 val &= ~DPIO_SWING_MARGIN000_MASK;
3102 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3103 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3104 }
e4a1d846
CML
3105
3106 /* Disable unique transition scale */
f72df8db
VS
3107 for (i = 0; i < 4; i++) {
3108 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3109 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3110 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3111 }
e4a1d846
CML
3112
3113 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3114 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3115 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3116 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3117
3118 /*
3119 * The document said it needs to set bit 27 for ch0 and bit 26
3120 * for ch1. Might be a typo in the doc.
3121 * For now, for this unique transition scale selection, set bit
3122 * 27 for ch0 and ch1.
3123 */
f72df8db
VS
3124 for (i = 0; i < 4; i++) {
3125 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3126 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3127 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3128 }
e4a1d846 3129
f72df8db
VS
3130 for (i = 0; i < 4; i++) {
3131 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3132 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3133 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3134 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3135 }
e4a1d846
CML
3136 }
3137
3138 /* Start swing calculation */
1966e59e
VS
3139 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3140 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3141 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3142
3143 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3144 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3145 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3146
3147 /* LRC Bypass */
3148 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3149 val |= DPIO_LRC_BYPASS;
3150 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3151
3152 mutex_unlock(&dev_priv->dpio_lock);
3153
3154 return 0;
3155}
3156
a4fc5ed6 3157static void
0301b3ac
JN
3158intel_get_adjust_train(struct intel_dp *intel_dp,
3159 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3160{
3161 uint8_t v = 0;
3162 uint8_t p = 0;
3163 int lane;
1a2eb460
KP
3164 uint8_t voltage_max;
3165 uint8_t preemph_max;
a4fc5ed6 3166
33a34e4e 3167 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3168 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3169 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3170
3171 if (this_v > v)
3172 v = this_v;
3173 if (this_p > p)
3174 p = this_p;
3175 }
3176
1a2eb460 3177 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3178 if (v >= voltage_max)
3179 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3180
1a2eb460
KP
3181 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3182 if (p >= preemph_max)
3183 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3184
3185 for (lane = 0; lane < 4; lane++)
33a34e4e 3186 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3187}
3188
3189static uint32_t
f0a3424e 3190intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3191{
3cf2efb1 3192 uint32_t signal_levels = 0;
a4fc5ed6 3193
3cf2efb1 3194 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3195 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3196 default:
3197 signal_levels |= DP_VOLTAGE_0_4;
3198 break;
bd60018a 3199 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3200 signal_levels |= DP_VOLTAGE_0_6;
3201 break;
bd60018a 3202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3203 signal_levels |= DP_VOLTAGE_0_8;
3204 break;
bd60018a 3205 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3206 signal_levels |= DP_VOLTAGE_1_2;
3207 break;
3208 }
3cf2efb1 3209 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3210 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3211 default:
3212 signal_levels |= DP_PRE_EMPHASIS_0;
3213 break;
bd60018a 3214 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3215 signal_levels |= DP_PRE_EMPHASIS_3_5;
3216 break;
bd60018a 3217 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3218 signal_levels |= DP_PRE_EMPHASIS_6;
3219 break;
bd60018a 3220 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3221 signal_levels |= DP_PRE_EMPHASIS_9_5;
3222 break;
3223 }
3224 return signal_levels;
3225}
3226
e3421a18
ZW
3227/* Gen6's DP voltage swing and pre-emphasis control */
3228static uint32_t
3229intel_gen6_edp_signal_levels(uint8_t train_set)
3230{
3c5a62b5
YL
3231 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3232 DP_TRAIN_PRE_EMPHASIS_MASK);
3233 switch (signal_levels) {
bd60018a
SJ
3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3235 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3236 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3238 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3239 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3241 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3242 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3244 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3247 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3248 default:
3c5a62b5
YL
3249 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3250 "0x%x\n", signal_levels);
3251 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3252 }
3253}
3254
1a2eb460
KP
3255/* Gen7's DP voltage swing and pre-emphasis control */
3256static uint32_t
3257intel_gen7_edp_signal_levels(uint8_t train_set)
3258{
3259 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3260 DP_TRAIN_PRE_EMPHASIS_MASK);
3261 switch (signal_levels) {
bd60018a 3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3263 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3265 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3267 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3268
bd60018a 3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3270 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3272 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3273
bd60018a 3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3275 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3277 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3278
3279 default:
3280 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3281 "0x%x\n", signal_levels);
3282 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3283 }
3284}
3285
d6c0d722
PZ
3286/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3287static uint32_t
f0a3424e 3288intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3289{
d6c0d722
PZ
3290 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3291 DP_TRAIN_PRE_EMPHASIS_MASK);
3292 switch (signal_levels) {
bd60018a 3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3294 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3296 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3298 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3300 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3301
bd60018a 3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3303 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3305 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3307 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3308
bd60018a 3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3310 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3312 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3313
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3315 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3316 default:
3317 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3318 "0x%x\n", signal_levels);
c5fe6a06 3319 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3320 }
a4fc5ed6
KP
3321}
3322
f0a3424e
PZ
3323/* Properly updates "DP" with the correct signal levels. */
3324static void
3325intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3326{
3327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3328 enum port port = intel_dig_port->port;
f0a3424e
PZ
3329 struct drm_device *dev = intel_dig_port->base.base.dev;
3330 uint32_t signal_levels, mask;
3331 uint8_t train_set = intel_dp->train_set[0];
3332
5a9d1f1a 3333 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3334 signal_levels = intel_hsw_signal_levels(train_set);
3335 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3336 } else if (IS_CHERRYVIEW(dev)) {
3337 signal_levels = intel_chv_signal_levels(intel_dp);
3338 mask = 0;
e2fa6fba
P
3339 } else if (IS_VALLEYVIEW(dev)) {
3340 signal_levels = intel_vlv_signal_levels(intel_dp);
3341 mask = 0;
bc7d38a4 3342 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3343 signal_levels = intel_gen7_edp_signal_levels(train_set);
3344 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3345 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3346 signal_levels = intel_gen6_edp_signal_levels(train_set);
3347 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3348 } else {
3349 signal_levels = intel_gen4_signal_levels(train_set);
3350 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3351 }
3352
3353 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3354
3355 *DP = (*DP & ~mask) | signal_levels;
3356}
3357
a4fc5ed6 3358static bool
ea5b213a 3359intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3360 uint32_t *DP,
58e10eb9 3361 uint8_t dp_train_pat)
a4fc5ed6 3362{
174edf1f
PZ
3363 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3364 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3365 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3366 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3367 int ret, len;
a4fc5ed6 3368
7b13b58a 3369 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3370
70aff66c 3371 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3372 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3373
2cdfe6c8
JN
3374 buf[0] = dp_train_pat;
3375 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3376 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3377 /* don't write DP_TRAINING_LANEx_SET on disable */
3378 len = 1;
3379 } else {
3380 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3381 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3382 len = intel_dp->lane_count + 1;
47ea7542 3383 }
a4fc5ed6 3384
9d1a1031
JN
3385 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3386 buf, len);
2cdfe6c8
JN
3387
3388 return ret == len;
a4fc5ed6
KP
3389}
3390
70aff66c
JN
3391static bool
3392intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3393 uint8_t dp_train_pat)
3394{
953d22e8 3395 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3396 intel_dp_set_signal_levels(intel_dp, DP);
3397 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3398}
3399
3400static bool
3401intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3402 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3403{
3404 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3405 struct drm_device *dev = intel_dig_port->base.base.dev;
3406 struct drm_i915_private *dev_priv = dev->dev_private;
3407 int ret;
3408
3409 intel_get_adjust_train(intel_dp, link_status);
3410 intel_dp_set_signal_levels(intel_dp, DP);
3411
3412 I915_WRITE(intel_dp->output_reg, *DP);
3413 POSTING_READ(intel_dp->output_reg);
3414
9d1a1031
JN
3415 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3416 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3417
3418 return ret == intel_dp->lane_count;
3419}
3420
3ab9c637
ID
3421static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3422{
3423 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3424 struct drm_device *dev = intel_dig_port->base.base.dev;
3425 struct drm_i915_private *dev_priv = dev->dev_private;
3426 enum port port = intel_dig_port->port;
3427 uint32_t val;
3428
3429 if (!HAS_DDI(dev))
3430 return;
3431
3432 val = I915_READ(DP_TP_CTL(port));
3433 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3434 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3435 I915_WRITE(DP_TP_CTL(port), val);
3436
3437 /*
3438 * On PORT_A we can have only eDP in SST mode. There the only reason
3439 * we need to set idle transmission mode is to work around a HW issue
3440 * where we enable the pipe while not in idle link-training mode.
3441 * In this case there is requirement to wait for a minimum number of
3442 * idle patterns to be sent.
3443 */
3444 if (port == PORT_A)
3445 return;
3446
3447 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3448 1))
3449 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3450}
3451
33a34e4e 3452/* Enable corresponding port and start training pattern 1 */
c19b0669 3453void
33a34e4e 3454intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3455{
da63a9f2 3456 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3457 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3458 int i;
3459 uint8_t voltage;
cdb0e95b 3460 int voltage_tries, loop_tries;
ea5b213a 3461 uint32_t DP = intel_dp->DP;
6aba5b6c 3462 uint8_t link_config[2];
a4fc5ed6 3463
affa9354 3464 if (HAS_DDI(dev))
c19b0669
PZ
3465 intel_ddi_prepare_link_retrain(encoder);
3466
3cf2efb1 3467 /* Write the link configuration data */
6aba5b6c
JN
3468 link_config[0] = intel_dp->link_bw;
3469 link_config[1] = intel_dp->lane_count;
3470 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3471 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3472 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
a8f3ef61
SJ
3473 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
3474 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3475 &intel_dp->rate_select, 1);
6aba5b6c
JN
3476
3477 link_config[0] = 0;
3478 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3479 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3480
3481 DP |= DP_PORT_EN;
1a2eb460 3482
70aff66c
JN
3483 /* clock recovery */
3484 if (!intel_dp_reset_link_train(intel_dp, &DP,
3485 DP_TRAINING_PATTERN_1 |
3486 DP_LINK_SCRAMBLING_DISABLE)) {
3487 DRM_ERROR("failed to enable link training\n");
3488 return;
3489 }
3490
a4fc5ed6 3491 voltage = 0xff;
cdb0e95b
KP
3492 voltage_tries = 0;
3493 loop_tries = 0;
a4fc5ed6 3494 for (;;) {
70aff66c 3495 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3496
a7c9655f 3497 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3498 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3499 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3500 break;
93f62dad 3501 }
a4fc5ed6 3502
01916270 3503 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3504 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3505 break;
3506 }
3507
3508 /* Check to see if we've tried the max voltage */
3509 for (i = 0; i < intel_dp->lane_count; i++)
3510 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3511 break;
3b4f819d 3512 if (i == intel_dp->lane_count) {
b06fbda3
DV
3513 ++loop_tries;
3514 if (loop_tries == 5) {
3def84b3 3515 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3516 break;
3517 }
70aff66c
JN
3518 intel_dp_reset_link_train(intel_dp, &DP,
3519 DP_TRAINING_PATTERN_1 |
3520 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3521 voltage_tries = 0;
3522 continue;
3523 }
a4fc5ed6 3524
3cf2efb1 3525 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3526 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3527 ++voltage_tries;
b06fbda3 3528 if (voltage_tries == 5) {
3def84b3 3529 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3530 break;
3531 }
3532 } else
3533 voltage_tries = 0;
3534 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3535
70aff66c
JN
3536 /* Update training set as requested by target */
3537 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3538 DRM_ERROR("failed to update link training\n");
3539 break;
3540 }
a4fc5ed6
KP
3541 }
3542
33a34e4e
JB
3543 intel_dp->DP = DP;
3544}
3545
c19b0669 3546void
33a34e4e
JB
3547intel_dp_complete_link_train(struct intel_dp *intel_dp)
3548{
33a34e4e 3549 bool channel_eq = false;
37f80975 3550 int tries, cr_tries;
33a34e4e 3551 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3552 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3553
3554 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3555 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3556 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3557
a4fc5ed6 3558 /* channel equalization */
70aff66c 3559 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3560 training_pattern |
70aff66c
JN
3561 DP_LINK_SCRAMBLING_DISABLE)) {
3562 DRM_ERROR("failed to start channel equalization\n");
3563 return;
3564 }
3565
a4fc5ed6 3566 tries = 0;
37f80975 3567 cr_tries = 0;
a4fc5ed6
KP
3568 channel_eq = false;
3569 for (;;) {
70aff66c 3570 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3571
37f80975
JB
3572 if (cr_tries > 5) {
3573 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3574 break;
3575 }
3576
a7c9655f 3577 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3578 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3579 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3580 break;
70aff66c 3581 }
a4fc5ed6 3582
37f80975 3583 /* Make sure clock is still ok */
01916270 3584 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3585 intel_dp_start_link_train(intel_dp);
70aff66c 3586 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3587 training_pattern |
70aff66c 3588 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3589 cr_tries++;
3590 continue;
3591 }
3592
1ffdff13 3593 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3594 channel_eq = true;
3595 break;
3596 }
a4fc5ed6 3597
37f80975
JB
3598 /* Try 5 times, then try clock recovery if that fails */
3599 if (tries > 5) {
37f80975 3600 intel_dp_start_link_train(intel_dp);
70aff66c 3601 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3602 training_pattern |
70aff66c 3603 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3604 tries = 0;
3605 cr_tries++;
3606 continue;
3607 }
a4fc5ed6 3608
70aff66c
JN
3609 /* Update training set as requested by target */
3610 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3611 DRM_ERROR("failed to update link training\n");
3612 break;
3613 }
3cf2efb1 3614 ++tries;
869184a6 3615 }
3cf2efb1 3616
3ab9c637
ID
3617 intel_dp_set_idle_link_train(intel_dp);
3618
3619 intel_dp->DP = DP;
3620
d6c0d722 3621 if (channel_eq)
07f42258 3622 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3623
3ab9c637
ID
3624}
3625
3626void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3627{
70aff66c 3628 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3629 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3630}
3631
3632static void
ea5b213a 3633intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3634{
da63a9f2 3635 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3636 enum port port = intel_dig_port->port;
da63a9f2 3637 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3638 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3639 uint32_t DP = intel_dp->DP;
a4fc5ed6 3640
bc76e320 3641 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3642 return;
3643
0c33d8d7 3644 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3645 return;
3646
28c97730 3647 DRM_DEBUG_KMS("\n");
32f9d658 3648
bc7d38a4 3649 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3650 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3651 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3652 } else {
aad3d14d
VS
3653 if (IS_CHERRYVIEW(dev))
3654 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3655 else
3656 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3657 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3658 }
fe255d00 3659 POSTING_READ(intel_dp->output_reg);
5eb08b69 3660
493a7081 3661 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3662 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3663 /* Hardware workaround: leaving our transcoder select
3664 * set to transcoder B while it's off will prevent the
3665 * corresponding HDMI output on transcoder A.
3666 *
3667 * Combine this with another hardware workaround:
3668 * transcoder select bit can only be cleared while the
3669 * port is enabled.
3670 */
3671 DP &= ~DP_PIPEB_SELECT;
3672 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3673 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3674 }
3675
832afda6 3676 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3677 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3678 POSTING_READ(intel_dp->output_reg);
f01eca2e 3679 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3680}
3681
26d61aad
KP
3682static bool
3683intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3684{
a031d709
RV
3685 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3686 struct drm_device *dev = dig_port->base.base.dev;
3687 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3688 uint8_t rev;
a031d709 3689
9d1a1031
JN
3690 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3691 sizeof(intel_dp->dpcd)) < 0)
edb39244 3692 return false; /* aux transfer failed */
92fd8fd1 3693
a8e98153 3694 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3695
edb39244
AJ
3696 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3697 return false; /* DPCD not present */
3698
2293bb5c
SK
3699 /* Check if the panel supports PSR */
3700 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3701 if (is_edp(intel_dp)) {
9d1a1031
JN
3702 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3703 intel_dp->psr_dpcd,
3704 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3705 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3706 dev_priv->psr.sink_support = true;
50003939 3707 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3708 }
50003939
JN
3709 }
3710
7809a611 3711 /* Training Pattern 3 support, both source and sink */
06ea66b6 3712 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3713 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3714 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3715 intel_dp->use_tps3 = true;
f8d8a672 3716 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3717 } else
3718 intel_dp->use_tps3 = false;
3719
fc0f8e25
SJ
3720 /* Intermediate frequency support */
3721 if (is_edp(intel_dp) &&
3722 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3723 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3724 (rev >= 0x03)) { /* eDp v1.4 or higher */
ea2d8a42
VS
3725 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3726 int i;
3727
fc0f8e25
SJ
3728 intel_dp_dpcd_read_wake(&intel_dp->aux,
3729 DP_SUPPORTED_LINK_RATES,
ea2d8a42
VS
3730 supported_rates,
3731 sizeof(supported_rates));
3732
3733 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3734 int val = le16_to_cpu(supported_rates[i]);
3735
3736 if (val == 0)
3737 break;
3738
3739 intel_dp->supported_rates[i] = val * 200;
3740 }
3741 intel_dp->num_supported_rates = i;
fc0f8e25 3742 }
edb39244
AJ
3743 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3744 DP_DWN_STRM_PORT_PRESENT))
3745 return true; /* native DP sink */
3746
3747 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3748 return true; /* no per-port downstream info */
3749
9d1a1031
JN
3750 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3751 intel_dp->downstream_ports,
3752 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3753 return false; /* downstream port status fetch failed */
3754
3755 return true;
92fd8fd1
KP
3756}
3757
0d198328
AJ
3758static void
3759intel_dp_probe_oui(struct intel_dp *intel_dp)
3760{
3761 u8 buf[3];
3762
3763 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3764 return;
3765
9d1a1031 3766 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3767 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3768 buf[0], buf[1], buf[2]);
3769
9d1a1031 3770 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3771 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3772 buf[0], buf[1], buf[2]);
3773}
3774
0e32b39c
DA
3775static bool
3776intel_dp_probe_mst(struct intel_dp *intel_dp)
3777{
3778 u8 buf[1];
3779
3780 if (!intel_dp->can_mst)
3781 return false;
3782
3783 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3784 return false;
3785
0e32b39c
DA
3786 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3787 if (buf[0] & DP_MST_CAP) {
3788 DRM_DEBUG_KMS("Sink is MST capable\n");
3789 intel_dp->is_mst = true;
3790 } else {
3791 DRM_DEBUG_KMS("Sink is not MST capable\n");
3792 intel_dp->is_mst = false;
3793 }
3794 }
0e32b39c
DA
3795
3796 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3797 return intel_dp->is_mst;
3798}
3799
d2e216d0
RV
3800int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3801{
3802 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3803 struct drm_device *dev = intel_dig_port->base.base.dev;
3804 struct intel_crtc *intel_crtc =
3805 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3806 u8 buf;
3807 int test_crc_count;
3808 int attempts = 6;
d2e216d0 3809
ad9dc91b 3810 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3811 return -EIO;
d2e216d0 3812
ad9dc91b 3813 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3814 return -ENOTTY;
3815
1dda5f93
RV
3816 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3817 return -EIO;
3818
9d1a1031 3819 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3820 buf | DP_TEST_SINK_START) < 0)
bda0381e 3821 return -EIO;
d2e216d0 3822
1dda5f93 3823 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3824 return -EIO;
ad9dc91b 3825 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3826
ad9dc91b 3827 do {
1dda5f93
RV
3828 if (drm_dp_dpcd_readb(&intel_dp->aux,
3829 DP_TEST_SINK_MISC, &buf) < 0)
3830 return -EIO;
ad9dc91b
RV
3831 intel_wait_for_vblank(dev, intel_crtc->pipe);
3832 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3833
3834 if (attempts == 0) {
90bd1f46
DV
3835 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3836 return -ETIMEDOUT;
ad9dc91b 3837 }
d2e216d0 3838
9d1a1031 3839 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3840 return -EIO;
d2e216d0 3841
1dda5f93
RV
3842 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3843 return -EIO;
3844 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3845 buf & ~DP_TEST_SINK_START) < 0)
3846 return -EIO;
ce31d9f4 3847
d2e216d0
RV
3848 return 0;
3849}
3850
a60f0e38
JB
3851static bool
3852intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3853{
9d1a1031
JN
3854 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3855 DP_DEVICE_SERVICE_IRQ_VECTOR,
3856 sink_irq_vector, 1) == 1;
a60f0e38
JB
3857}
3858
0e32b39c
DA
3859static bool
3860intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3861{
3862 int ret;
3863
3864 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3865 DP_SINK_COUNT_ESI,
3866 sink_irq_vector, 14);
3867 if (ret != 14)
3868 return false;
3869
3870 return true;
3871}
3872
a60f0e38
JB
3873static void
3874intel_dp_handle_test_request(struct intel_dp *intel_dp)
3875{
3876 /* NAK by default */
9d1a1031 3877 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3878}
3879
0e32b39c
DA
3880static int
3881intel_dp_check_mst_status(struct intel_dp *intel_dp)
3882{
3883 bool bret;
3884
3885 if (intel_dp->is_mst) {
3886 u8 esi[16] = { 0 };
3887 int ret = 0;
3888 int retry;
3889 bool handled;
3890 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3891go_again:
3892 if (bret == true) {
3893
3894 /* check link status - esi[10] = 0x200c */
3895 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3896 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3897 intel_dp_start_link_train(intel_dp);
3898 intel_dp_complete_link_train(intel_dp);
3899 intel_dp_stop_link_train(intel_dp);
3900 }
3901
6f34cc39 3902 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3903 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3904
3905 if (handled) {
3906 for (retry = 0; retry < 3; retry++) {
3907 int wret;
3908 wret = drm_dp_dpcd_write(&intel_dp->aux,
3909 DP_SINK_COUNT_ESI+1,
3910 &esi[1], 3);
3911 if (wret == 3) {
3912 break;
3913 }
3914 }
3915
3916 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3917 if (bret == true) {
6f34cc39 3918 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3919 goto go_again;
3920 }
3921 } else
3922 ret = 0;
3923
3924 return ret;
3925 } else {
3926 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3927 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3928 intel_dp->is_mst = false;
3929 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3930 /* send a hotplug event */
3931 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3932 }
3933 }
3934 return -EINVAL;
3935}
3936
a4fc5ed6
KP
3937/*
3938 * According to DP spec
3939 * 5.1.2:
3940 * 1. Read DPCD
3941 * 2. Configure link according to Receiver Capabilities
3942 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3943 * 4. Check link status on receipt of hot-plug interrupt
3944 */
a5146200 3945static void
ea5b213a 3946intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 3947{
5b215bcf 3948 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 3949 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 3950 u8 sink_irq_vector;
93f62dad 3951 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 3952
5b215bcf
DA
3953 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3954
da63a9f2 3955 if (!intel_encoder->connectors_active)
d2b996ac 3956 return;
59cd09e1 3957
da63a9f2 3958 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
3959 return;
3960
1a125d8a
ID
3961 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3962 return;
3963
92fd8fd1 3964 /* Try to read receiver status if the link appears to be up */
93f62dad 3965 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
3966 return;
3967 }
3968
92fd8fd1 3969 /* Now read the DPCD to see if it's actually running */
26d61aad 3970 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
3971 return;
3972 }
3973
a60f0e38
JB
3974 /* Try to read the source of the interrupt */
3975 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3976 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3977 /* Clear interrupt source */
9d1a1031
JN
3978 drm_dp_dpcd_writeb(&intel_dp->aux,
3979 DP_DEVICE_SERVICE_IRQ_VECTOR,
3980 sink_irq_vector);
a60f0e38
JB
3981
3982 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3983 intel_dp_handle_test_request(intel_dp);
3984 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3985 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3986 }
3987
1ffdff13 3988 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 3989 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 3990 intel_encoder->base.name);
33a34e4e
JB
3991 intel_dp_start_link_train(intel_dp);
3992 intel_dp_complete_link_train(intel_dp);
3ab9c637 3993 intel_dp_stop_link_train(intel_dp);
33a34e4e 3994 }
a4fc5ed6 3995}
a4fc5ed6 3996
caf9ab24 3997/* XXX this is probably wrong for multiple downstream ports */
71ba9000 3998static enum drm_connector_status
26d61aad 3999intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4000{
caf9ab24 4001 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4002 uint8_t type;
4003
4004 if (!intel_dp_get_dpcd(intel_dp))
4005 return connector_status_disconnected;
4006
4007 /* if there's no downstream port, we're done */
4008 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4009 return connector_status_connected;
caf9ab24
AJ
4010
4011 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4012 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4013 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4014 uint8_t reg;
9d1a1031
JN
4015
4016 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4017 &reg, 1) < 0)
caf9ab24 4018 return connector_status_unknown;
9d1a1031 4019
23235177
AJ
4020 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4021 : connector_status_disconnected;
caf9ab24
AJ
4022 }
4023
4024 /* If no HPD, poke DDC gently */
0b99836f 4025 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4026 return connector_status_connected;
caf9ab24
AJ
4027
4028 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4029 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4030 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4031 if (type == DP_DS_PORT_TYPE_VGA ||
4032 type == DP_DS_PORT_TYPE_NON_EDID)
4033 return connector_status_unknown;
4034 } else {
4035 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4036 DP_DWN_STRM_PORT_TYPE_MASK;
4037 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4038 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4039 return connector_status_unknown;
4040 }
caf9ab24
AJ
4041
4042 /* Anything else is out of spec, warn and ignore */
4043 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4044 return connector_status_disconnected;
71ba9000
AJ
4045}
4046
d410b56d
CW
4047static enum drm_connector_status
4048edp_detect(struct intel_dp *intel_dp)
4049{
4050 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4051 enum drm_connector_status status;
4052
4053 status = intel_panel_detect(dev);
4054 if (status == connector_status_unknown)
4055 status = connector_status_connected;
4056
4057 return status;
4058}
4059
5eb08b69 4060static enum drm_connector_status
a9756bb5 4061ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4062{
30add22d 4063 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4064 struct drm_i915_private *dev_priv = dev->dev_private;
4065 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4066
1b469639
DL
4067 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4068 return connector_status_disconnected;
4069
26d61aad 4070 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4071}
4072
2a592bec
DA
4073static int g4x_digital_port_connected(struct drm_device *dev,
4074 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4075{
a4fc5ed6 4076 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4077 uint32_t bit;
5eb08b69 4078
232a6ee9
TP
4079 if (IS_VALLEYVIEW(dev)) {
4080 switch (intel_dig_port->port) {
4081 case PORT_B:
4082 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4083 break;
4084 case PORT_C:
4085 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4086 break;
4087 case PORT_D:
4088 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4089 break;
4090 default:
2a592bec 4091 return -EINVAL;
232a6ee9
TP
4092 }
4093 } else {
4094 switch (intel_dig_port->port) {
4095 case PORT_B:
4096 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4097 break;
4098 case PORT_C:
4099 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4100 break;
4101 case PORT_D:
4102 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4103 break;
4104 default:
2a592bec 4105 return -EINVAL;
232a6ee9 4106 }
a4fc5ed6
KP
4107 }
4108
10f76a38 4109 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4110 return 0;
4111 return 1;
4112}
4113
4114static enum drm_connector_status
4115g4x_dp_detect(struct intel_dp *intel_dp)
4116{
4117 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4118 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4119 int ret;
4120
4121 /* Can't disconnect eDP, but you can close the lid... */
4122 if (is_edp(intel_dp)) {
4123 enum drm_connector_status status;
4124
4125 status = intel_panel_detect(dev);
4126 if (status == connector_status_unknown)
4127 status = connector_status_connected;
4128 return status;
4129 }
4130
4131 ret = g4x_digital_port_connected(dev, intel_dig_port);
4132 if (ret == -EINVAL)
4133 return connector_status_unknown;
4134 else if (ret == 0)
a4fc5ed6
KP
4135 return connector_status_disconnected;
4136
26d61aad 4137 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4138}
4139
8c241fef 4140static struct edid *
beb60608 4141intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4142{
beb60608 4143 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4144
9cd300e0
JN
4145 /* use cached edid if we have one */
4146 if (intel_connector->edid) {
9cd300e0
JN
4147 /* invalid edid */
4148 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4149 return NULL;
4150
55e9edeb 4151 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4152 } else
4153 return drm_get_edid(&intel_connector->base,
4154 &intel_dp->aux.ddc);
4155}
8c241fef 4156
beb60608
CW
4157static void
4158intel_dp_set_edid(struct intel_dp *intel_dp)
4159{
4160 struct intel_connector *intel_connector = intel_dp->attached_connector;
4161 struct edid *edid;
8c241fef 4162
beb60608
CW
4163 edid = intel_dp_get_edid(intel_dp);
4164 intel_connector->detect_edid = edid;
4165
4166 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4167 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4168 else
4169 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4170}
4171
beb60608
CW
4172static void
4173intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4174{
beb60608 4175 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4176
beb60608
CW
4177 kfree(intel_connector->detect_edid);
4178 intel_connector->detect_edid = NULL;
9cd300e0 4179
beb60608
CW
4180 intel_dp->has_audio = false;
4181}
d6f24d0f 4182
beb60608
CW
4183static enum intel_display_power_domain
4184intel_dp_power_get(struct intel_dp *dp)
4185{
4186 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4187 enum intel_display_power_domain power_domain;
4188
4189 power_domain = intel_display_port_power_domain(encoder);
4190 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4191
4192 return power_domain;
4193}
d6f24d0f 4194
beb60608
CW
4195static void
4196intel_dp_power_put(struct intel_dp *dp,
4197 enum intel_display_power_domain power_domain)
4198{
4199 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4200 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4201}
4202
a9756bb5
ZW
4203static enum drm_connector_status
4204intel_dp_detect(struct drm_connector *connector, bool force)
4205{
4206 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4207 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4208 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4209 struct drm_device *dev = connector->dev;
a9756bb5 4210 enum drm_connector_status status;
671dedd2 4211 enum intel_display_power_domain power_domain;
0e32b39c 4212 bool ret;
a9756bb5 4213
164c8598 4214 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4215 connector->base.id, connector->name);
beb60608 4216 intel_dp_unset_edid(intel_dp);
164c8598 4217
0e32b39c
DA
4218 if (intel_dp->is_mst) {
4219 /* MST devices are disconnected from a monitor POV */
4220 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4221 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4222 return connector_status_disconnected;
0e32b39c
DA
4223 }
4224
beb60608 4225 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4226
d410b56d
CW
4227 /* Can't disconnect eDP, but you can close the lid... */
4228 if (is_edp(intel_dp))
4229 status = edp_detect(intel_dp);
4230 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4231 status = ironlake_dp_detect(intel_dp);
4232 else
4233 status = g4x_dp_detect(intel_dp);
4234 if (status != connector_status_connected)
c8c8fb33 4235 goto out;
a9756bb5 4236
0d198328
AJ
4237 intel_dp_probe_oui(intel_dp);
4238
0e32b39c
DA
4239 ret = intel_dp_probe_mst(intel_dp);
4240 if (ret) {
4241 /* if we are in MST mode then this connector
4242 won't appear connected or have anything with EDID on it */
4243 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4244 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4245 status = connector_status_disconnected;
4246 goto out;
4247 }
4248
beb60608 4249 intel_dp_set_edid(intel_dp);
a9756bb5 4250
d63885da
PZ
4251 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4252 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4253 status = connector_status_connected;
4254
4255out:
beb60608 4256 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4257 return status;
a4fc5ed6
KP
4258}
4259
beb60608
CW
4260static void
4261intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4262{
df0e9248 4263 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4264 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4265 enum intel_display_power_domain power_domain;
a4fc5ed6 4266
beb60608
CW
4267 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4268 connector->base.id, connector->name);
4269 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4270
beb60608
CW
4271 if (connector->status != connector_status_connected)
4272 return;
671dedd2 4273
beb60608
CW
4274 power_domain = intel_dp_power_get(intel_dp);
4275
4276 intel_dp_set_edid(intel_dp);
4277
4278 intel_dp_power_put(intel_dp, power_domain);
4279
4280 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4281 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4282}
4283
4284static int intel_dp_get_modes(struct drm_connector *connector)
4285{
4286 struct intel_connector *intel_connector = to_intel_connector(connector);
4287 struct edid *edid;
4288
4289 edid = intel_connector->detect_edid;
4290 if (edid) {
4291 int ret = intel_connector_update_modes(connector, edid);
4292 if (ret)
4293 return ret;
4294 }
32f9d658 4295
f8779fda 4296 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4297 if (is_edp(intel_attached_dp(connector)) &&
4298 intel_connector->panel.fixed_mode) {
f8779fda 4299 struct drm_display_mode *mode;
beb60608
CW
4300
4301 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4302 intel_connector->panel.fixed_mode);
f8779fda 4303 if (mode) {
32f9d658
ZW
4304 drm_mode_probed_add(connector, mode);
4305 return 1;
4306 }
4307 }
beb60608 4308
32f9d658 4309 return 0;
a4fc5ed6
KP
4310}
4311
1aad7ac0
CW
4312static bool
4313intel_dp_detect_audio(struct drm_connector *connector)
4314{
1aad7ac0 4315 bool has_audio = false;
beb60608 4316 struct edid *edid;
1aad7ac0 4317
beb60608
CW
4318 edid = to_intel_connector(connector)->detect_edid;
4319 if (edid)
1aad7ac0 4320 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4321
1aad7ac0
CW
4322 return has_audio;
4323}
4324
f684960e
CW
4325static int
4326intel_dp_set_property(struct drm_connector *connector,
4327 struct drm_property *property,
4328 uint64_t val)
4329{
e953fd7b 4330 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4331 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4332 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4333 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4334 int ret;
4335
662595df 4336 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4337 if (ret)
4338 return ret;
4339
3f43c48d 4340 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4341 int i = val;
4342 bool has_audio;
4343
4344 if (i == intel_dp->force_audio)
f684960e
CW
4345 return 0;
4346
1aad7ac0 4347 intel_dp->force_audio = i;
f684960e 4348
c3e5f67b 4349 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4350 has_audio = intel_dp_detect_audio(connector);
4351 else
c3e5f67b 4352 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4353
4354 if (has_audio == intel_dp->has_audio)
f684960e
CW
4355 return 0;
4356
1aad7ac0 4357 intel_dp->has_audio = has_audio;
f684960e
CW
4358 goto done;
4359 }
4360
e953fd7b 4361 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4362 bool old_auto = intel_dp->color_range_auto;
4363 uint32_t old_range = intel_dp->color_range;
4364
55bc60db
VS
4365 switch (val) {
4366 case INTEL_BROADCAST_RGB_AUTO:
4367 intel_dp->color_range_auto = true;
4368 break;
4369 case INTEL_BROADCAST_RGB_FULL:
4370 intel_dp->color_range_auto = false;
4371 intel_dp->color_range = 0;
4372 break;
4373 case INTEL_BROADCAST_RGB_LIMITED:
4374 intel_dp->color_range_auto = false;
4375 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4376 break;
4377 default:
4378 return -EINVAL;
4379 }
ae4edb80
DV
4380
4381 if (old_auto == intel_dp->color_range_auto &&
4382 old_range == intel_dp->color_range)
4383 return 0;
4384
e953fd7b
CW
4385 goto done;
4386 }
4387
53b41837
YN
4388 if (is_edp(intel_dp) &&
4389 property == connector->dev->mode_config.scaling_mode_property) {
4390 if (val == DRM_MODE_SCALE_NONE) {
4391 DRM_DEBUG_KMS("no scaling not supported\n");
4392 return -EINVAL;
4393 }
4394
4395 if (intel_connector->panel.fitting_mode == val) {
4396 /* the eDP scaling property is not changed */
4397 return 0;
4398 }
4399 intel_connector->panel.fitting_mode = val;
4400
4401 goto done;
4402 }
4403
f684960e
CW
4404 return -EINVAL;
4405
4406done:
c0c36b94
CW
4407 if (intel_encoder->base.crtc)
4408 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4409
4410 return 0;
4411}
4412
a4fc5ed6 4413static void
73845adf 4414intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4415{
1d508706 4416 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4417
10e972d3 4418 kfree(intel_connector->detect_edid);
beb60608 4419
9cd300e0
JN
4420 if (!IS_ERR_OR_NULL(intel_connector->edid))
4421 kfree(intel_connector->edid);
4422
acd8db10
PZ
4423 /* Can't call is_edp() since the encoder may have been destroyed
4424 * already. */
4425 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4426 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4427
a4fc5ed6 4428 drm_connector_cleanup(connector);
55f78c43 4429 kfree(connector);
a4fc5ed6
KP
4430}
4431
00c09d70 4432void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4433{
da63a9f2
PZ
4434 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4435 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4436
4f71d0cb 4437 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4438 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4439 if (is_edp(intel_dp)) {
4440 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4441 /*
4442 * vdd might still be enabled do to the delayed vdd off.
4443 * Make sure vdd is actually turned off here.
4444 */
773538e8 4445 pps_lock(intel_dp);
4be73780 4446 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4447 pps_unlock(intel_dp);
4448
01527b31
CT
4449 if (intel_dp->edp_notifier.notifier_call) {
4450 unregister_reboot_notifier(&intel_dp->edp_notifier);
4451 intel_dp->edp_notifier.notifier_call = NULL;
4452 }
bd943159 4453 }
c8bd0e49 4454 drm_encoder_cleanup(encoder);
da63a9f2 4455 kfree(intel_dig_port);
24d05927
DV
4456}
4457
07f9cd0b
ID
4458static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4459{
4460 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4461
4462 if (!is_edp(intel_dp))
4463 return;
4464
951468f3
VS
4465 /*
4466 * vdd might still be enabled do to the delayed vdd off.
4467 * Make sure vdd is actually turned off here.
4468 */
afa4e53a 4469 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4470 pps_lock(intel_dp);
07f9cd0b 4471 edp_panel_vdd_off_sync(intel_dp);
773538e8 4472 pps_unlock(intel_dp);
07f9cd0b
ID
4473}
4474
49e6bc51
VS
4475static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4476{
4477 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4478 struct drm_device *dev = intel_dig_port->base.base.dev;
4479 struct drm_i915_private *dev_priv = dev->dev_private;
4480 enum intel_display_power_domain power_domain;
4481
4482 lockdep_assert_held(&dev_priv->pps_mutex);
4483
4484 if (!edp_have_panel_vdd(intel_dp))
4485 return;
4486
4487 /*
4488 * The VDD bit needs a power domain reference, so if the bit is
4489 * already enabled when we boot or resume, grab this reference and
4490 * schedule a vdd off, so we don't hold on to the reference
4491 * indefinitely.
4492 */
4493 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4494 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4495 intel_display_power_get(dev_priv, power_domain);
4496
4497 edp_panel_vdd_schedule_off(intel_dp);
4498}
4499
6d93c0c4
ID
4500static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4501{
49e6bc51
VS
4502 struct intel_dp *intel_dp;
4503
4504 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4505 return;
4506
4507 intel_dp = enc_to_intel_dp(encoder);
4508
4509 pps_lock(intel_dp);
4510
4511 /*
4512 * Read out the current power sequencer assignment,
4513 * in case the BIOS did something with it.
4514 */
4515 if (IS_VALLEYVIEW(encoder->dev))
4516 vlv_initial_power_sequencer_setup(intel_dp);
4517
4518 intel_edp_panel_vdd_sanitize(intel_dp);
4519
4520 pps_unlock(intel_dp);
6d93c0c4
ID
4521}
4522
a4fc5ed6 4523static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4524 .dpms = intel_connector_dpms,
a4fc5ed6 4525 .detect = intel_dp_detect,
beb60608 4526 .force = intel_dp_force,
a4fc5ed6 4527 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4528 .set_property = intel_dp_set_property,
2545e4a6 4529 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4530 .destroy = intel_dp_connector_destroy,
c6f95f27 4531 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4532};
4533
4534static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4535 .get_modes = intel_dp_get_modes,
4536 .mode_valid = intel_dp_mode_valid,
df0e9248 4537 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4538};
4539
a4fc5ed6 4540static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4541 .reset = intel_dp_encoder_reset,
24d05927 4542 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4543};
4544
0e32b39c 4545void
21d40d37 4546intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4547{
0e32b39c 4548 return;
c8110e52 4549}
6207937d 4550
b2c5c181 4551enum irqreturn
13cf5504
DA
4552intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4553{
4554 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4555 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4556 struct drm_device *dev = intel_dig_port->base.base.dev;
4557 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4558 enum intel_display_power_domain power_domain;
b2c5c181 4559 enum irqreturn ret = IRQ_NONE;
1c767b33 4560
0e32b39c
DA
4561 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4562 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4563
7a7f84cc
VS
4564 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4565 /*
4566 * vdd off can generate a long pulse on eDP which
4567 * would require vdd on to handle it, and thus we
4568 * would end up in an endless cycle of
4569 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4570 */
4571 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4572 port_name(intel_dig_port->port));
a8b3d52f 4573 return IRQ_HANDLED;
7a7f84cc
VS
4574 }
4575
26fbb774
VS
4576 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4577 port_name(intel_dig_port->port),
0e32b39c 4578 long_hpd ? "long" : "short");
13cf5504 4579
1c767b33
ID
4580 power_domain = intel_display_port_power_domain(intel_encoder);
4581 intel_display_power_get(dev_priv, power_domain);
4582
0e32b39c 4583 if (long_hpd) {
2a592bec
DA
4584
4585 if (HAS_PCH_SPLIT(dev)) {
4586 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4587 goto mst_fail;
4588 } else {
4589 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4590 goto mst_fail;
4591 }
0e32b39c
DA
4592
4593 if (!intel_dp_get_dpcd(intel_dp)) {
4594 goto mst_fail;
4595 }
4596
4597 intel_dp_probe_oui(intel_dp);
4598
4599 if (!intel_dp_probe_mst(intel_dp))
4600 goto mst_fail;
4601
4602 } else {
4603 if (intel_dp->is_mst) {
1c767b33 4604 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4605 goto mst_fail;
4606 }
4607
4608 if (!intel_dp->is_mst) {
4609 /*
4610 * we'll check the link status via the normal hot plug path later -
4611 * but for short hpds we should check it now
4612 */
5b215bcf 4613 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4614 intel_dp_check_link_status(intel_dp);
5b215bcf 4615 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4616 }
4617 }
b2c5c181
DV
4618
4619 ret = IRQ_HANDLED;
4620
1c767b33 4621 goto put_power;
0e32b39c
DA
4622mst_fail:
4623 /* if we were in MST mode, and device is not there get out of MST mode */
4624 if (intel_dp->is_mst) {
4625 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4626 intel_dp->is_mst = false;
4627 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4628 }
1c767b33
ID
4629put_power:
4630 intel_display_power_put(dev_priv, power_domain);
4631
4632 return ret;
13cf5504
DA
4633}
4634
e3421a18
ZW
4635/* Return which DP Port should be selected for Transcoder DP control */
4636int
0206e353 4637intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4638{
4639 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4640 struct intel_encoder *intel_encoder;
4641 struct intel_dp *intel_dp;
e3421a18 4642
fa90ecef
PZ
4643 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4644 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4645
fa90ecef
PZ
4646 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4647 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4648 return intel_dp->output_reg;
e3421a18 4649 }
ea5b213a 4650
e3421a18
ZW
4651 return -1;
4652}
4653
36e83a18 4654/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4655bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4656{
4657 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4658 union child_device_config *p_child;
36e83a18 4659 int i;
5d8a7752
VS
4660 static const short port_mapping[] = {
4661 [PORT_B] = PORT_IDPB,
4662 [PORT_C] = PORT_IDPC,
4663 [PORT_D] = PORT_IDPD,
4664 };
36e83a18 4665
3b32a35b
VS
4666 if (port == PORT_A)
4667 return true;
4668
41aa3448 4669 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4670 return false;
4671
41aa3448
RV
4672 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4673 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4674
5d8a7752 4675 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4676 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4677 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4678 return true;
4679 }
4680 return false;
4681}
4682
0e32b39c 4683void
f684960e
CW
4684intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4685{
53b41837
YN
4686 struct intel_connector *intel_connector = to_intel_connector(connector);
4687
3f43c48d 4688 intel_attach_force_audio_property(connector);
e953fd7b 4689 intel_attach_broadcast_rgb_property(connector);
55bc60db 4690 intel_dp->color_range_auto = true;
53b41837
YN
4691
4692 if (is_edp(intel_dp)) {
4693 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4694 drm_object_attach_property(
4695 &connector->base,
53b41837 4696 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4697 DRM_MODE_SCALE_ASPECT);
4698 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4699 }
f684960e
CW
4700}
4701
dada1a9f
ID
4702static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4703{
4704 intel_dp->last_power_cycle = jiffies;
4705 intel_dp->last_power_on = jiffies;
4706 intel_dp->last_backlight_off = jiffies;
4707}
4708
67a54566
DV
4709static void
4710intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4711 struct intel_dp *intel_dp)
67a54566
DV
4712{
4713 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4714 struct edp_power_seq cur, vbt, spec,
4715 *final = &intel_dp->pps_delays;
67a54566 4716 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4717 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4718
e39b999a
VS
4719 lockdep_assert_held(&dev_priv->pps_mutex);
4720
81ddbc69
VS
4721 /* already initialized? */
4722 if (final->t11_t12 != 0)
4723 return;
4724
453c5420 4725 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4726 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4727 pp_on_reg = PCH_PP_ON_DELAYS;
4728 pp_off_reg = PCH_PP_OFF_DELAYS;
4729 pp_div_reg = PCH_PP_DIVISOR;
4730 } else {
bf13e81b
JN
4731 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4732
4733 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4734 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4735 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4736 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4737 }
67a54566
DV
4738
4739 /* Workaround: Need to write PP_CONTROL with the unlock key as
4740 * the very first thing. */
453c5420 4741 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4742 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4743
453c5420
JB
4744 pp_on = I915_READ(pp_on_reg);
4745 pp_off = I915_READ(pp_off_reg);
4746 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4747
4748 /* Pull timing values out of registers */
4749 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4750 PANEL_POWER_UP_DELAY_SHIFT;
4751
4752 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4753 PANEL_LIGHT_ON_DELAY_SHIFT;
4754
4755 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4756 PANEL_LIGHT_OFF_DELAY_SHIFT;
4757
4758 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4759 PANEL_POWER_DOWN_DELAY_SHIFT;
4760
4761 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4762 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4763
4764 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4765 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4766
41aa3448 4767 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4768
4769 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4770 * our hw here, which are all in 100usec. */
4771 spec.t1_t3 = 210 * 10;
4772 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4773 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4774 spec.t10 = 500 * 10;
4775 /* This one is special and actually in units of 100ms, but zero
4776 * based in the hw (so we need to add 100 ms). But the sw vbt
4777 * table multiplies it with 1000 to make it in units of 100usec,
4778 * too. */
4779 spec.t11_t12 = (510 + 100) * 10;
4780
4781 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4782 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4783
4784 /* Use the max of the register settings and vbt. If both are
4785 * unset, fall back to the spec limits. */
36b5f425 4786#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4787 spec.field : \
4788 max(cur.field, vbt.field))
4789 assign_final(t1_t3);
4790 assign_final(t8);
4791 assign_final(t9);
4792 assign_final(t10);
4793 assign_final(t11_t12);
4794#undef assign_final
4795
36b5f425 4796#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4797 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4798 intel_dp->backlight_on_delay = get_delay(t8);
4799 intel_dp->backlight_off_delay = get_delay(t9);
4800 intel_dp->panel_power_down_delay = get_delay(t10);
4801 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4802#undef get_delay
4803
f30d26e4
JN
4804 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4805 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4806 intel_dp->panel_power_cycle_delay);
4807
4808 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4809 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4810}
4811
4812static void
4813intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4814 struct intel_dp *intel_dp)
f30d26e4
JN
4815{
4816 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4817 u32 pp_on, pp_off, pp_div, port_sel = 0;
4818 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4819 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4820 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4821 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4822
e39b999a 4823 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4824
4825 if (HAS_PCH_SPLIT(dev)) {
4826 pp_on_reg = PCH_PP_ON_DELAYS;
4827 pp_off_reg = PCH_PP_OFF_DELAYS;
4828 pp_div_reg = PCH_PP_DIVISOR;
4829 } else {
bf13e81b
JN
4830 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4831
4832 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4833 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4834 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4835 }
4836
b2f19d1a
PZ
4837 /*
4838 * And finally store the new values in the power sequencer. The
4839 * backlight delays are set to 1 because we do manual waits on them. For
4840 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4841 * we'll end up waiting for the backlight off delay twice: once when we
4842 * do the manual sleep, and once when we disable the panel and wait for
4843 * the PP_STATUS bit to become zero.
4844 */
f30d26e4 4845 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4846 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4847 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4848 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4849 /* Compute the divisor for the pp clock, simply match the Bspec
4850 * formula. */
453c5420 4851 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4852 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4853 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4854
4855 /* Haswell doesn't have any port selection bits for the panel
4856 * power sequencer any more. */
bc7d38a4 4857 if (IS_VALLEYVIEW(dev)) {
ad933b56 4858 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4859 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4860 if (port == PORT_A)
a24c144c 4861 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4862 else
a24c144c 4863 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4864 }
4865
453c5420
JB
4866 pp_on |= port_sel;
4867
4868 I915_WRITE(pp_on_reg, pp_on);
4869 I915_WRITE(pp_off_reg, pp_off);
4870 I915_WRITE(pp_div_reg, pp_div);
67a54566 4871
67a54566 4872 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4873 I915_READ(pp_on_reg),
4874 I915_READ(pp_off_reg),
4875 I915_READ(pp_div_reg));
f684960e
CW
4876}
4877
b33a2815
VK
4878/**
4879 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4880 * @dev: DRM device
4881 * @refresh_rate: RR to be programmed
4882 *
4883 * This function gets called when refresh rate (RR) has to be changed from
4884 * one frequency to another. Switches can be between high and low RR
4885 * supported by the panel or to any other RR based on media playback (in
4886 * this case, RR value needs to be passed from user space).
4887 *
4888 * The caller of this function needs to take a lock on dev_priv->drrs.
4889 */
96178eeb 4890static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4891{
4892 struct drm_i915_private *dev_priv = dev->dev_private;
4893 struct intel_encoder *encoder;
96178eeb
VK
4894 struct intel_digital_port *dig_port = NULL;
4895 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4896 struct intel_crtc_state *config = NULL;
439d7ac0 4897 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4898 u32 reg, val;
96178eeb 4899 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4900
4901 if (refresh_rate <= 0) {
4902 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4903 return;
4904 }
4905
96178eeb
VK
4906 if (intel_dp == NULL) {
4907 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4908 return;
4909 }
4910
1fcc9d1c 4911 /*
e4d59f6b
RV
4912 * FIXME: This needs proper synchronization with psr state for some
4913 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4914 */
439d7ac0 4915
96178eeb
VK
4916 dig_port = dp_to_dig_port(intel_dp);
4917 encoder = &dig_port->base;
439d7ac0
PB
4918 intel_crtc = encoder->new_crtc;
4919
4920 if (!intel_crtc) {
4921 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4922 return;
4923 }
4924
6e3c9717 4925 config = intel_crtc->config;
439d7ac0 4926
96178eeb 4927 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
4928 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4929 return;
4930 }
4931
96178eeb
VK
4932 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4933 refresh_rate)
439d7ac0
PB
4934 index = DRRS_LOW_RR;
4935
96178eeb 4936 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
4937 DRM_DEBUG_KMS(
4938 "DRRS requested for previously set RR...ignoring\n");
4939 return;
4940 }
4941
4942 if (!intel_crtc->active) {
4943 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4944 return;
4945 }
4946
44395bfe 4947 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
4948 switch (index) {
4949 case DRRS_HIGH_RR:
4950 intel_dp_set_m_n(intel_crtc, M1_N1);
4951 break;
4952 case DRRS_LOW_RR:
4953 intel_dp_set_m_n(intel_crtc, M2_N2);
4954 break;
4955 case DRRS_MAX_RR:
4956 default:
4957 DRM_ERROR("Unsupported refreshrate type\n");
4958 }
4959 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 4960 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 4961 val = I915_READ(reg);
a4c30b1d 4962
439d7ac0 4963 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
4964 if (IS_VALLEYVIEW(dev))
4965 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4966 else
4967 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 4968 } else {
6fa7aec1
VK
4969 if (IS_VALLEYVIEW(dev))
4970 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4971 else
4972 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
4973 }
4974 I915_WRITE(reg, val);
4975 }
4976
4e9ac947
VK
4977 dev_priv->drrs.refresh_rate_type = index;
4978
4979 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4980}
4981
b33a2815
VK
4982/**
4983 * intel_edp_drrs_enable - init drrs struct if supported
4984 * @intel_dp: DP struct
4985 *
4986 * Initializes frontbuffer_bits and drrs.dp
4987 */
c395578e
VK
4988void intel_edp_drrs_enable(struct intel_dp *intel_dp)
4989{
4990 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4991 struct drm_i915_private *dev_priv = dev->dev_private;
4992 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4993 struct drm_crtc *crtc = dig_port->base.base.crtc;
4994 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4995
4996 if (!intel_crtc->config->has_drrs) {
4997 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
4998 return;
4999 }
5000
5001 mutex_lock(&dev_priv->drrs.mutex);
5002 if (WARN_ON(dev_priv->drrs.dp)) {
5003 DRM_ERROR("DRRS already enabled\n");
5004 goto unlock;
5005 }
5006
5007 dev_priv->drrs.busy_frontbuffer_bits = 0;
5008
5009 dev_priv->drrs.dp = intel_dp;
5010
5011unlock:
5012 mutex_unlock(&dev_priv->drrs.mutex);
5013}
5014
b33a2815
VK
5015/**
5016 * intel_edp_drrs_disable - Disable DRRS
5017 * @intel_dp: DP struct
5018 *
5019 */
c395578e
VK
5020void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5021{
5022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5023 struct drm_i915_private *dev_priv = dev->dev_private;
5024 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5025 struct drm_crtc *crtc = dig_port->base.base.crtc;
5026 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5027
5028 if (!intel_crtc->config->has_drrs)
5029 return;
5030
5031 mutex_lock(&dev_priv->drrs.mutex);
5032 if (!dev_priv->drrs.dp) {
5033 mutex_unlock(&dev_priv->drrs.mutex);
5034 return;
5035 }
5036
5037 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5038 intel_dp_set_drrs_state(dev_priv->dev,
5039 intel_dp->attached_connector->panel.
5040 fixed_mode->vrefresh);
5041
5042 dev_priv->drrs.dp = NULL;
5043 mutex_unlock(&dev_priv->drrs.mutex);
5044
5045 cancel_delayed_work_sync(&dev_priv->drrs.work);
5046}
5047
4e9ac947
VK
5048static void intel_edp_drrs_downclock_work(struct work_struct *work)
5049{
5050 struct drm_i915_private *dev_priv =
5051 container_of(work, typeof(*dev_priv), drrs.work.work);
5052 struct intel_dp *intel_dp;
5053
5054 mutex_lock(&dev_priv->drrs.mutex);
5055
5056 intel_dp = dev_priv->drrs.dp;
5057
5058 if (!intel_dp)
5059 goto unlock;
5060
439d7ac0 5061 /*
4e9ac947
VK
5062 * The delayed work can race with an invalidate hence we need to
5063 * recheck.
439d7ac0
PB
5064 */
5065
4e9ac947
VK
5066 if (dev_priv->drrs.busy_frontbuffer_bits)
5067 goto unlock;
439d7ac0 5068
4e9ac947
VK
5069 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5070 intel_dp_set_drrs_state(dev_priv->dev,
5071 intel_dp->attached_connector->panel.
5072 downclock_mode->vrefresh);
439d7ac0 5073
4e9ac947 5074unlock:
439d7ac0 5075
4e9ac947 5076 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5077}
5078
b33a2815
VK
5079/**
5080 * intel_edp_drrs_invalidate - Invalidate DRRS
5081 * @dev: DRM device
5082 * @frontbuffer_bits: frontbuffer plane tracking bits
5083 *
5084 * When there is a disturbance on screen (due to cursor movement/time
5085 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5086 * high RR.
5087 *
5088 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5089 */
a93fad0f
VK
5090void intel_edp_drrs_invalidate(struct drm_device *dev,
5091 unsigned frontbuffer_bits)
5092{
5093 struct drm_i915_private *dev_priv = dev->dev_private;
5094 struct drm_crtc *crtc;
5095 enum pipe pipe;
5096
5097 if (!dev_priv->drrs.dp)
5098 return;
5099
3954e733
R
5100 cancel_delayed_work_sync(&dev_priv->drrs.work);
5101
a93fad0f
VK
5102 mutex_lock(&dev_priv->drrs.mutex);
5103 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5104 pipe = to_intel_crtc(crtc)->pipe;
5105
5106 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5107 intel_dp_set_drrs_state(dev_priv->dev,
5108 dev_priv->drrs.dp->attached_connector->panel.
5109 fixed_mode->vrefresh);
5110 }
5111
5112 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5113
5114 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5115 mutex_unlock(&dev_priv->drrs.mutex);
5116}
5117
b33a2815
VK
5118/**
5119 * intel_edp_drrs_flush - Flush DRRS
5120 * @dev: DRM device
5121 * @frontbuffer_bits: frontbuffer plane tracking bits
5122 *
5123 * When there is no movement on screen, DRRS work can be scheduled.
5124 * This DRRS work is responsible for setting relevant registers after a
5125 * timeout of 1 second.
5126 *
5127 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5128 */
a93fad0f
VK
5129void intel_edp_drrs_flush(struct drm_device *dev,
5130 unsigned frontbuffer_bits)
5131{
5132 struct drm_i915_private *dev_priv = dev->dev_private;
5133 struct drm_crtc *crtc;
5134 enum pipe pipe;
5135
5136 if (!dev_priv->drrs.dp)
5137 return;
5138
3954e733
R
5139 cancel_delayed_work_sync(&dev_priv->drrs.work);
5140
a93fad0f
VK
5141 mutex_lock(&dev_priv->drrs.mutex);
5142 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5143 pipe = to_intel_crtc(crtc)->pipe;
5144 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5145
a93fad0f
VK
5146 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5147 !dev_priv->drrs.busy_frontbuffer_bits)
5148 schedule_delayed_work(&dev_priv->drrs.work,
5149 msecs_to_jiffies(1000));
5150 mutex_unlock(&dev_priv->drrs.mutex);
5151}
5152
b33a2815
VK
5153/**
5154 * DOC: Display Refresh Rate Switching (DRRS)
5155 *
5156 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5157 * which enables swtching between low and high refresh rates,
5158 * dynamically, based on the usage scenario. This feature is applicable
5159 * for internal panels.
5160 *
5161 * Indication that the panel supports DRRS is given by the panel EDID, which
5162 * would list multiple refresh rates for one resolution.
5163 *
5164 * DRRS is of 2 types - static and seamless.
5165 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5166 * (may appear as a blink on screen) and is used in dock-undock scenario.
5167 * Seamless DRRS involves changing RR without any visual effect to the user
5168 * and can be used during normal system usage. This is done by programming
5169 * certain registers.
5170 *
5171 * Support for static/seamless DRRS may be indicated in the VBT based on
5172 * inputs from the panel spec.
5173 *
5174 * DRRS saves power by switching to low RR based on usage scenarios.
5175 *
5176 * eDP DRRS:-
5177 * The implementation is based on frontbuffer tracking implementation.
5178 * When there is a disturbance on the screen triggered by user activity or a
5179 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5180 * When there is no movement on screen, after a timeout of 1 second, a switch
5181 * to low RR is made.
5182 * For integration with frontbuffer tracking code,
5183 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5184 *
5185 * DRRS can be further extended to support other internal panels and also
5186 * the scenario of video playback wherein RR is set based on the rate
5187 * requested by userspace.
5188 */
5189
5190/**
5191 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5192 * @intel_connector: eDP connector
5193 * @fixed_mode: preferred mode of panel
5194 *
5195 * This function is called only once at driver load to initialize basic
5196 * DRRS stuff.
5197 *
5198 * Returns:
5199 * Downclock mode if panel supports it, else return NULL.
5200 * DRRS support is determined by the presence of downclock mode (apart
5201 * from VBT setting).
5202 */
4f9db5b5 5203static struct drm_display_mode *
96178eeb
VK
5204intel_dp_drrs_init(struct intel_connector *intel_connector,
5205 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5206{
5207 struct drm_connector *connector = &intel_connector->base;
96178eeb 5208 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5209 struct drm_i915_private *dev_priv = dev->dev_private;
5210 struct drm_display_mode *downclock_mode = NULL;
5211
5212 if (INTEL_INFO(dev)->gen <= 6) {
5213 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5214 return NULL;
5215 }
5216
5217 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5218 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5219 return NULL;
5220 }
5221
5222 downclock_mode = intel_find_panel_downclock
5223 (dev, fixed_mode, connector);
5224
5225 if (!downclock_mode) {
a1d26342 5226 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5227 return NULL;
5228 }
5229
4e9ac947
VK
5230 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5231
96178eeb 5232 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5233
96178eeb 5234 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5235
96178eeb 5236 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5237 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5238 return downclock_mode;
5239}
5240
ed92f0b2 5241static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5242 struct intel_connector *intel_connector)
ed92f0b2
PZ
5243{
5244 struct drm_connector *connector = &intel_connector->base;
5245 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5246 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5247 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5248 struct drm_i915_private *dev_priv = dev->dev_private;
5249 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5250 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5251 bool has_dpcd;
5252 struct drm_display_mode *scan;
5253 struct edid *edid;
6517d273 5254 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5255
96178eeb 5256 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5257
ed92f0b2
PZ
5258 if (!is_edp(intel_dp))
5259 return true;
5260
49e6bc51
VS
5261 pps_lock(intel_dp);
5262 intel_edp_panel_vdd_sanitize(intel_dp);
5263 pps_unlock(intel_dp);
63635217 5264
ed92f0b2 5265 /* Cache DPCD and EDID for edp. */
ed92f0b2 5266 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5267
5268 if (has_dpcd) {
5269 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5270 dev_priv->no_aux_handshake =
5271 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5272 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5273 } else {
5274 /* if this fails, presume the device is a ghost */
5275 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5276 return false;
5277 }
5278
5279 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5280 pps_lock(intel_dp);
36b5f425 5281 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5282 pps_unlock(intel_dp);
ed92f0b2 5283
060c8778 5284 mutex_lock(&dev->mode_config.mutex);
0b99836f 5285 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5286 if (edid) {
5287 if (drm_add_edid_modes(connector, edid)) {
5288 drm_mode_connector_update_edid_property(connector,
5289 edid);
5290 drm_edid_to_eld(connector, edid);
5291 } else {
5292 kfree(edid);
5293 edid = ERR_PTR(-EINVAL);
5294 }
5295 } else {
5296 edid = ERR_PTR(-ENOENT);
5297 }
5298 intel_connector->edid = edid;
5299
5300 /* prefer fixed mode from EDID if available */
5301 list_for_each_entry(scan, &connector->probed_modes, head) {
5302 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5303 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5304 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5305 intel_connector, fixed_mode);
ed92f0b2
PZ
5306 break;
5307 }
5308 }
5309
5310 /* fallback to VBT if available for eDP */
5311 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5312 fixed_mode = drm_mode_duplicate(dev,
5313 dev_priv->vbt.lfp_lvds_vbt_mode);
5314 if (fixed_mode)
5315 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5316 }
060c8778 5317 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5318
01527b31
CT
5319 if (IS_VALLEYVIEW(dev)) {
5320 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5321 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5322
5323 /*
5324 * Figure out the current pipe for the initial backlight setup.
5325 * If the current pipe isn't valid, try the PPS pipe, and if that
5326 * fails just assume pipe A.
5327 */
5328 if (IS_CHERRYVIEW(dev))
5329 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5330 else
5331 pipe = PORT_TO_PIPE(intel_dp->DP);
5332
5333 if (pipe != PIPE_A && pipe != PIPE_B)
5334 pipe = intel_dp->pps_pipe;
5335
5336 if (pipe != PIPE_A && pipe != PIPE_B)
5337 pipe = PIPE_A;
5338
5339 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5340 pipe_name(pipe));
01527b31
CT
5341 }
5342
4f9db5b5 5343 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5344 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5345 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5346
5347 return true;
5348}
5349
16c25533 5350bool
f0fec3f2
PZ
5351intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5352 struct intel_connector *intel_connector)
a4fc5ed6 5353{
f0fec3f2
PZ
5354 struct drm_connector *connector = &intel_connector->base;
5355 struct intel_dp *intel_dp = &intel_dig_port->dp;
5356 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5357 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5358 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5359 enum port port = intel_dig_port->port;
0b99836f 5360 int type;
a4fc5ed6 5361
a4a5d2f8
VS
5362 intel_dp->pps_pipe = INVALID_PIPE;
5363
ec5b01dd 5364 /* intel_dp vfuncs */
b6b5e383
DL
5365 if (INTEL_INFO(dev)->gen >= 9)
5366 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5367 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5368 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5369 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5370 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5371 else if (HAS_PCH_SPLIT(dev))
5372 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5373 else
5374 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5375
b9ca5fad
DL
5376 if (INTEL_INFO(dev)->gen >= 9)
5377 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5378 else
5379 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5380
0767935e
DV
5381 /* Preserve the current hw state. */
5382 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5383 intel_dp->attached_connector = intel_connector;
3d3dc149 5384
3b32a35b 5385 if (intel_dp_is_edp(dev, port))
b329530c 5386 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5387 else
5388 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5389
f7d24902
ID
5390 /*
5391 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5392 * for DP the encoder type can be set by the caller to
5393 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5394 */
5395 if (type == DRM_MODE_CONNECTOR_eDP)
5396 intel_encoder->type = INTEL_OUTPUT_EDP;
5397
c17ed5b5
VS
5398 /* eDP only on port B and/or C on vlv/chv */
5399 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5400 port != PORT_B && port != PORT_C))
5401 return false;
5402
e7281eab
ID
5403 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5404 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5405 port_name(port));
5406
b329530c 5407 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5408 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5409
a4fc5ed6
KP
5410 connector->interlace_allowed = true;
5411 connector->doublescan_allowed = 0;
5412
f0fec3f2 5413 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5414 edp_panel_vdd_work);
a4fc5ed6 5415
df0e9248 5416 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5417 drm_connector_register(connector);
a4fc5ed6 5418
affa9354 5419 if (HAS_DDI(dev))
bcbc889b
PZ
5420 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5421 else
5422 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5423 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5424
0b99836f 5425 /* Set up the hotplug pin. */
ab9d7c30
PZ
5426 switch (port) {
5427 case PORT_A:
1d843f9d 5428 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5429 break;
5430 case PORT_B:
1d843f9d 5431 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5432 break;
5433 case PORT_C:
1d843f9d 5434 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5435 break;
5436 case PORT_D:
1d843f9d 5437 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5438 break;
5439 default:
ad1c0b19 5440 BUG();
5eb08b69
ZW
5441 }
5442
dada1a9f 5443 if (is_edp(intel_dp)) {
773538e8 5444 pps_lock(intel_dp);
1e74a324
VS
5445 intel_dp_init_panel_power_timestamps(intel_dp);
5446 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5447 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5448 else
36b5f425 5449 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5450 pps_unlock(intel_dp);
dada1a9f 5451 }
0095e6dc 5452
9d1a1031 5453 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5454
0e32b39c 5455 /* init MST on ports that can support it */
c86ea3d0 5456 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5457 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5458 intel_dp_mst_encoder_init(intel_dig_port,
5459 intel_connector->base.base.id);
0e32b39c
DA
5460 }
5461 }
5462
36b5f425 5463 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5464 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5465 if (is_edp(intel_dp)) {
5466 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5467 /*
5468 * vdd might still be enabled do to the delayed vdd off.
5469 * Make sure vdd is actually turned off here.
5470 */
773538e8 5471 pps_lock(intel_dp);
4be73780 5472 edp_panel_vdd_off_sync(intel_dp);
773538e8 5473 pps_unlock(intel_dp);
15b1d171 5474 }
34ea3d38 5475 drm_connector_unregister(connector);
b2f246a8 5476 drm_connector_cleanup(connector);
16c25533 5477 return false;
b2f246a8 5478 }
32f9d658 5479
f684960e
CW
5480 intel_dp_add_properties(intel_dp, connector);
5481
a4fc5ed6
KP
5482 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5483 * 0xd. Failure to do so will result in spurious interrupts being
5484 * generated on the port when a cable is not attached.
5485 */
5486 if (IS_G4X(dev) && !IS_GM45(dev)) {
5487 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5488 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5489 }
16c25533
PZ
5490
5491 return true;
a4fc5ed6 5492}
f0fec3f2
PZ
5493
5494void
5495intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5496{
13cf5504 5497 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5498 struct intel_digital_port *intel_dig_port;
5499 struct intel_encoder *intel_encoder;
5500 struct drm_encoder *encoder;
5501 struct intel_connector *intel_connector;
5502
b14c5679 5503 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5504 if (!intel_dig_port)
5505 return;
5506
b14c5679 5507 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5508 if (!intel_connector) {
5509 kfree(intel_dig_port);
5510 return;
5511 }
5512
5513 intel_encoder = &intel_dig_port->base;
5514 encoder = &intel_encoder->base;
5515
5516 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5517 DRM_MODE_ENCODER_TMDS);
5518
5bfe2ac0 5519 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5520 intel_encoder->disable = intel_disable_dp;
00c09d70 5521 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5522 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5523 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5524 if (IS_CHERRYVIEW(dev)) {
9197c88b 5525 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5526 intel_encoder->pre_enable = chv_pre_enable_dp;
5527 intel_encoder->enable = vlv_enable_dp;
580d3811 5528 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5529 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5530 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5531 intel_encoder->pre_enable = vlv_pre_enable_dp;
5532 intel_encoder->enable = vlv_enable_dp;
49277c31 5533 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5534 } else {
ecff4f3b
JN
5535 intel_encoder->pre_enable = g4x_pre_enable_dp;
5536 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5537 if (INTEL_INFO(dev)->gen >= 5)
5538 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5539 }
f0fec3f2 5540
174edf1f 5541 intel_dig_port->port = port;
f0fec3f2
PZ
5542 intel_dig_port->dp.output_reg = output_reg;
5543
00c09d70 5544 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5545 if (IS_CHERRYVIEW(dev)) {
5546 if (port == PORT_D)
5547 intel_encoder->crtc_mask = 1 << 2;
5548 else
5549 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5550 } else {
5551 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5552 }
bc079e8b 5553 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5554 intel_encoder->hot_plug = intel_dp_hot_plug;
5555
13cf5504
DA
5556 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5557 dev_priv->hpd_irq_port[port] = intel_dig_port;
5558
15b1d171
PZ
5559 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5560 drm_encoder_cleanup(encoder);
5561 kfree(intel_dig_port);
b2f246a8 5562 kfree(intel_connector);
15b1d171 5563 }
f0fec3f2 5564}
0e32b39c
DA
5565
5566void intel_dp_mst_suspend(struct drm_device *dev)
5567{
5568 struct drm_i915_private *dev_priv = dev->dev_private;
5569 int i;
5570
5571 /* disable MST */
5572 for (i = 0; i < I915_MAX_PORTS; i++) {
5573 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5574 if (!intel_dig_port)
5575 continue;
5576
5577 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5578 if (!intel_dig_port->dp.can_mst)
5579 continue;
5580 if (intel_dig_port->dp.is_mst)
5581 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5582 }
5583 }
5584}
5585
5586void intel_dp_mst_resume(struct drm_device *dev)
5587{
5588 struct drm_i915_private *dev_priv = dev->dev_private;
5589 int i;
5590
5591 for (i = 0; i < I915_MAX_PORTS; i++) {
5592 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5593 if (!intel_dig_port)
5594 continue;
5595 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5596 int ret;
5597
5598 if (!intel_dig_port->dp.can_mst)
5599 continue;
5600
5601 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5602 if (ret != 0) {
5603 intel_dp_check_mst_status(&intel_dig_port->dp);
5604 }
5605 }
5606 }
5607}