drm/i915: Hide the source vs. sink rate handling from intel_dp_compute_config()
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 91
cfcb0fc9
JB
92/**
93 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94 * @intel_dp: DP struct
95 *
96 * If a CPU or PCH DP output is attached to an eDP panel, this function
97 * will return true, and false otherwise.
98 */
99static bool is_edp(struct intel_dp *intel_dp)
100{
da63a9f2
PZ
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
104}
105
68b4d824 106static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 107{
68b4d824
ID
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
111}
112
df0e9248
CW
113static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114{
fa90ecef 115 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
116}
117
ea5b213a 118static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 119static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 120static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 121static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
122static void vlv_steal_power_sequencer(struct drm_device *dev,
123 enum pipe pipe);
a4fc5ed6 124
0e32b39c 125int
ea5b213a 126intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 127{
7183dc29 128 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
129
130 switch (max_link_bw) {
131 case DP_LINK_BW_1_62:
132 case DP_LINK_BW_2_7:
1db10e28 133 case DP_LINK_BW_5_4:
d4eead50 134 break;
a4fc5ed6 135 default:
d4eead50
ID
136 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137 max_link_bw);
a4fc5ed6
KP
138 max_link_bw = DP_LINK_BW_1_62;
139 break;
140 }
141 return max_link_bw;
142}
143
eeb6324d
PZ
144static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145{
146 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147 struct drm_device *dev = intel_dig_port->base.base.dev;
148 u8 source_max, sink_max;
149
150 source_max = 4;
151 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153 source_max = 2;
154
155 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157 return min(source_max, sink_max);
158}
159
cd9dde44
AJ
160/*
161 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec.
163 *
164 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165 *
166 * 270000 * 1 * 8 / 10 == 216000
167 *
168 * The actual data capacity of that configuration is 2.16Gbit/s, so the
169 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
170 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171 * 119000. At 18bpp that's 2142000 kilobits per second.
172 *
173 * Thus the strange-looking division by 10 in intel_dp_link_required, to
174 * get the result in decakilobits instead of kilobits.
175 */
176
a4fc5ed6 177static int
c898261c 178intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 179{
cd9dde44 180 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
181}
182
fe27d53e
DA
183static int
184intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185{
186 return (max_link_clock * max_lanes * 8) / 10;
187}
188
c19de8eb 189static enum drm_mode_status
a4fc5ed6
KP
190intel_dp_mode_valid(struct drm_connector *connector,
191 struct drm_display_mode *mode)
192{
df0e9248 193 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
194 struct intel_connector *intel_connector = to_intel_connector(connector);
195 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
196 int target_clock = mode->clock;
197 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 198
dd06f90e
JN
199 if (is_edp(intel_dp) && fixed_mode) {
200 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
201 return MODE_PANEL;
202
dd06f90e 203 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 204 return MODE_PANEL;
03afc4a2
DV
205
206 target_clock = fixed_mode->clock;
7de56f43
ZY
207 }
208
36008365 209 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
eeb6324d 210 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
211
212 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213 mode_rate = intel_dp_link_required(target_clock, 18);
214
215 if (mode_rate > max_rate)
c4867936 216 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
217
218 if (mode->clock < 10000)
219 return MODE_CLOCK_LOW;
220
0af78a2b
DV
221 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222 return MODE_H_ILLEGAL;
223
a4fc5ed6
KP
224 return MODE_OK;
225}
226
a4f1289e 227uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
228{
229 int i;
230 uint32_t v = 0;
231
232 if (src_bytes > 4)
233 src_bytes = 4;
234 for (i = 0; i < src_bytes; i++)
235 v |= ((uint32_t) src[i]) << ((3-i) * 8);
236 return v;
237}
238
c2af70e2 239static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
240{
241 int i;
242 if (dst_bytes > 4)
243 dst_bytes = 4;
244 for (i = 0; i < dst_bytes; i++)
245 dst[i] = src >> ((3-i) * 8);
246}
247
fb0f8fbf
KP
248/* hrawclock is 1/4 the FSB frequency */
249static int
250intel_hrawclk(struct drm_device *dev)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 uint32_t clkcfg;
254
9473c8f4
VP
255 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256 if (IS_VALLEYVIEW(dev))
257 return 200;
258
fb0f8fbf
KP
259 clkcfg = I915_READ(CLKCFG);
260 switch (clkcfg & CLKCFG_FSB_MASK) {
261 case CLKCFG_FSB_400:
262 return 100;
263 case CLKCFG_FSB_533:
264 return 133;
265 case CLKCFG_FSB_667:
266 return 166;
267 case CLKCFG_FSB_800:
268 return 200;
269 case CLKCFG_FSB_1067:
270 return 266;
271 case CLKCFG_FSB_1333:
272 return 333;
273 /* these two are just a guess; one of them might be right */
274 case CLKCFG_FSB_1600:
275 case CLKCFG_FSB_1600_ALT:
276 return 400;
277 default:
278 return 133;
279 }
280}
281
bf13e81b
JN
282static void
283intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 284 struct intel_dp *intel_dp);
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b 288
773538e8
VS
289static void pps_lock(struct intel_dp *intel_dp)
290{
291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292 struct intel_encoder *encoder = &intel_dig_port->base;
293 struct drm_device *dev = encoder->base.dev;
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 enum intel_display_power_domain power_domain;
296
297 /*
298 * See vlv_power_sequencer_reset() why we need
299 * a power domain reference here.
300 */
301 power_domain = intel_display_port_power_domain(encoder);
302 intel_display_power_get(dev_priv, power_domain);
303
304 mutex_lock(&dev_priv->pps_mutex);
305}
306
307static void pps_unlock(struct intel_dp *intel_dp)
308{
309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
310 struct intel_encoder *encoder = &intel_dig_port->base;
311 struct drm_device *dev = encoder->base.dev;
312 struct drm_i915_private *dev_priv = dev->dev_private;
313 enum intel_display_power_domain power_domain;
314
315 mutex_unlock(&dev_priv->pps_mutex);
316
317 power_domain = intel_display_port_power_domain(encoder);
318 intel_display_power_put(dev_priv, power_domain);
319}
320
961a0db0
VS
321static void
322vlv_power_sequencer_kick(struct intel_dp *intel_dp)
323{
324 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
325 struct drm_device *dev = intel_dig_port->base.base.dev;
326 struct drm_i915_private *dev_priv = dev->dev_private;
327 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 328 bool pll_enabled;
961a0db0
VS
329 uint32_t DP;
330
331 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
332 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
333 pipe_name(pipe), port_name(intel_dig_port->port)))
334 return;
335
336 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
337 pipe_name(pipe), port_name(intel_dig_port->port));
338
339 /* Preserve the BIOS-computed detected bit. This is
340 * supposed to be read-only.
341 */
342 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
343 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
344 DP |= DP_PORT_WIDTH(1);
345 DP |= DP_LINK_TRAIN_PAT_1;
346
347 if (IS_CHERRYVIEW(dev))
348 DP |= DP_PIPE_SELECT_CHV(pipe);
349 else if (pipe == PIPE_B)
350 DP |= DP_PIPEB_SELECT;
351
d288f65f
VS
352 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
353
354 /*
355 * The DPLL for the pipe must be enabled for this to work.
356 * So enable temporarily it if it's not already enabled.
357 */
358 if (!pll_enabled)
359 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
360 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
361
961a0db0
VS
362 /*
363 * Similar magic as in intel_dp_enable_port().
364 * We _must_ do this port enable + disable trick
365 * to make this power seqeuencer lock onto the port.
366 * Otherwise even VDD force bit won't work.
367 */
368 I915_WRITE(intel_dp->output_reg, DP);
369 POSTING_READ(intel_dp->output_reg);
370
371 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
376
377 if (!pll_enabled)
378 vlv_force_pll_off(dev, pipe);
961a0db0
VS
379}
380
bf13e81b
JN
381static enum pipe
382vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
383{
384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
385 struct drm_device *dev = intel_dig_port->base.base.dev;
386 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
387 struct intel_encoder *encoder;
388 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 389 enum pipe pipe;
bf13e81b 390
e39b999a 391 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 392
a8c3344e
VS
393 /* We should never land here with regular DP ports */
394 WARN_ON(!is_edp(intel_dp));
395
a4a5d2f8
VS
396 if (intel_dp->pps_pipe != INVALID_PIPE)
397 return intel_dp->pps_pipe;
398
399 /*
400 * We don't have power sequencer currently.
401 * Pick one that's not used by other ports.
402 */
403 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
404 base.head) {
405 struct intel_dp *tmp;
406
407 if (encoder->type != INTEL_OUTPUT_EDP)
408 continue;
409
410 tmp = enc_to_intel_dp(&encoder->base);
411
412 if (tmp->pps_pipe != INVALID_PIPE)
413 pipes &= ~(1 << tmp->pps_pipe);
414 }
415
416 /*
417 * Didn't find one. This should not happen since there
418 * are two power sequencers and up to two eDP ports.
419 */
420 if (WARN_ON(pipes == 0))
a8c3344e
VS
421 pipe = PIPE_A;
422 else
423 pipe = ffs(pipes) - 1;
a4a5d2f8 424
a8c3344e
VS
425 vlv_steal_power_sequencer(dev, pipe);
426 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
427
428 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
429 pipe_name(intel_dp->pps_pipe),
430 port_name(intel_dig_port->port));
431
432 /* init power sequencer on this pipe and port */
36b5f425
VS
433 intel_dp_init_panel_power_sequencer(dev, intel_dp);
434 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 435
961a0db0
VS
436 /*
437 * Even vdd force doesn't work until we've made
438 * the power sequencer lock in on the port.
439 */
440 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
441
442 return intel_dp->pps_pipe;
443}
444
6491ab27
VS
445typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
446 enum pipe pipe);
447
448static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
452}
453
454static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
455 enum pipe pipe)
456{
457 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
458}
459
460static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
461 enum pipe pipe)
462{
463 return true;
464}
bf13e81b 465
a4a5d2f8 466static enum pipe
6491ab27
VS
467vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
468 enum port port,
469 vlv_pipe_check pipe_check)
a4a5d2f8
VS
470{
471 enum pipe pipe;
bf13e81b 472
bf13e81b
JN
473 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
474 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
475 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
476
477 if (port_sel != PANEL_PORT_SELECT_VLV(port))
478 continue;
479
6491ab27
VS
480 if (!pipe_check(dev_priv, pipe))
481 continue;
482
a4a5d2f8 483 return pipe;
bf13e81b
JN
484 }
485
a4a5d2f8
VS
486 return INVALID_PIPE;
487}
488
489static void
490vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
491{
492 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
493 struct drm_device *dev = intel_dig_port->base.base.dev;
494 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
495 enum port port = intel_dig_port->port;
496
497 lockdep_assert_held(&dev_priv->pps_mutex);
498
499 /* try to find a pipe with this port selected */
6491ab27
VS
500 /* first pick one where the panel is on */
501 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502 vlv_pipe_has_pp_on);
503 /* didn't find one? pick one where vdd is on */
504 if (intel_dp->pps_pipe == INVALID_PIPE)
505 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 vlv_pipe_has_vdd_on);
507 /* didn't find one? pick one with just the correct port */
508 if (intel_dp->pps_pipe == INVALID_PIPE)
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_any);
a4a5d2f8
VS
511
512 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
513 if (intel_dp->pps_pipe == INVALID_PIPE) {
514 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
515 port_name(port));
516 return;
bf13e81b
JN
517 }
518
a4a5d2f8
VS
519 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
520 port_name(port), pipe_name(intel_dp->pps_pipe));
521
36b5f425
VS
522 intel_dp_init_panel_power_sequencer(dev, intel_dp);
523 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
524}
525
773538e8
VS
526void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
527{
528 struct drm_device *dev = dev_priv->dev;
529 struct intel_encoder *encoder;
530
531 if (WARN_ON(!IS_VALLEYVIEW(dev)))
532 return;
533
534 /*
535 * We can't grab pps_mutex here due to deadlock with power_domain
536 * mutex when power_domain functions are called while holding pps_mutex.
537 * That also means that in order to use pps_pipe the code needs to
538 * hold both a power domain reference and pps_mutex, and the power domain
539 * reference get/put must be done while _not_ holding pps_mutex.
540 * pps_{lock,unlock}() do these steps in the correct order, so one
541 * should use them always.
542 */
543
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
545 struct intel_dp *intel_dp;
546
547 if (encoder->type != INTEL_OUTPUT_EDP)
548 continue;
549
550 intel_dp = enc_to_intel_dp(&encoder->base);
551 intel_dp->pps_pipe = INVALID_PIPE;
552 }
bf13e81b
JN
553}
554
555static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
556{
557 struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
559 if (HAS_PCH_SPLIT(dev))
560 return PCH_PP_CONTROL;
561 else
562 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563}
564
565static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566{
567 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568
569 if (HAS_PCH_SPLIT(dev))
570 return PCH_PP_STATUS;
571 else
572 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
573}
574
01527b31
CT
575/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
576 This function only applicable when panel PM state is not to be tracked */
577static int edp_notify_handler(struct notifier_block *this, unsigned long code,
578 void *unused)
579{
580 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
581 edp_notifier);
582 struct drm_device *dev = intel_dp_to_dev(intel_dp);
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 u32 pp_div;
585 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
586
587 if (!is_edp(intel_dp) || code != SYS_RESTART)
588 return 0;
589
773538e8 590 pps_lock(intel_dp);
e39b999a 591
01527b31 592 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
593 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594
01527b31
CT
595 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
596 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
597 pp_div = I915_READ(pp_div_reg);
598 pp_div &= PP_REFERENCE_DIVIDER_MASK;
599
600 /* 0x1F write to PP_DIV_REG sets max cycle delay */
601 I915_WRITE(pp_div_reg, pp_div | 0x1F);
602 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
603 msleep(intel_dp->panel_power_cycle_delay);
604 }
605
773538e8 606 pps_unlock(intel_dp);
e39b999a 607
01527b31
CT
608 return 0;
609}
610
4be73780 611static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 612{
30add22d 613 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
614 struct drm_i915_private *dev_priv = dev->dev_private;
615
e39b999a
VS
616 lockdep_assert_held(&dev_priv->pps_mutex);
617
9a42356b
VS
618 if (IS_VALLEYVIEW(dev) &&
619 intel_dp->pps_pipe == INVALID_PIPE)
620 return false;
621
bf13e81b 622 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
623}
624
4be73780 625static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 626{
30add22d 627 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
628 struct drm_i915_private *dev_priv = dev->dev_private;
629
e39b999a
VS
630 lockdep_assert_held(&dev_priv->pps_mutex);
631
9a42356b
VS
632 if (IS_VALLEYVIEW(dev) &&
633 intel_dp->pps_pipe == INVALID_PIPE)
634 return false;
635
773538e8 636 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
637}
638
9b984dae
KP
639static void
640intel_dp_check_edp(struct intel_dp *intel_dp)
641{
30add22d 642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 643 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 644
9b984dae
KP
645 if (!is_edp(intel_dp))
646 return;
453c5420 647
4be73780 648 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
649 WARN(1, "eDP powered off while attempting aux channel communication.\n");
650 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
651 I915_READ(_pp_stat_reg(intel_dp)),
652 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
653 }
654}
655
9ee32fea
DV
656static uint32_t
657intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658{
659 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
660 struct drm_device *dev = intel_dig_port->base.base.dev;
661 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 662 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
663 uint32_t status;
664 bool done;
665
ef04f00d 666#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 667 if (has_aux_irq)
b18ac466 668 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 669 msecs_to_jiffies_timeout(10));
9ee32fea
DV
670 else
671 done = wait_for_atomic(C, 10) == 0;
672 if (!done)
673 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674 has_aux_irq);
675#undef C
676
677 return status;
678}
679
ec5b01dd 680static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 681{
174edf1f
PZ
682 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
683 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 684
ec5b01dd
DL
685 /*
686 * The clock divider is based off the hrawclk, and would like to run at
687 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 688 */
ec5b01dd
DL
689 return index ? 0 : intel_hrawclk(dev) / 2;
690}
691
692static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693{
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
696
697 if (index)
698 return 0;
699
700 if (intel_dig_port->port == PORT_A) {
701 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 702 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 703 else
b84a1cf8 704 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
705 } else {
706 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
707 }
708}
709
710static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711{
712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713 struct drm_device *dev = intel_dig_port->base.base.dev;
714 struct drm_i915_private *dev_priv = dev->dev_private;
715
716 if (intel_dig_port->port == PORT_A) {
717 if (index)
718 return 0;
719 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
720 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
721 /* Workaround for non-ULT HSW */
bc86625a
CW
722 switch (index) {
723 case 0: return 63;
724 case 1: return 72;
725 default: return 0;
726 }
ec5b01dd 727 } else {
bc86625a 728 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 729 }
b84a1cf8
RV
730}
731
ec5b01dd
DL
732static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733{
734 return index ? 0 : 100;
735}
736
b6b5e383
DL
737static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738{
739 /*
740 * SKL doesn't need us to program the AUX clock divider (Hardware will
741 * derive the clock from CDCLK automatically). We still implement the
742 * get_aux_clock_divider vfunc to plug-in into the existing code.
743 */
744 return index ? 0 : 1;
745}
746
5ed12a19
DL
747static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
748 bool has_aux_irq,
749 int send_bytes,
750 uint32_t aux_clock_divider)
751{
752 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
753 struct drm_device *dev = intel_dig_port->base.base.dev;
754 uint32_t precharge, timeout;
755
756 if (IS_GEN6(dev))
757 precharge = 3;
758 else
759 precharge = 5;
760
761 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
762 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
763 else
764 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
765
766 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 767 DP_AUX_CH_CTL_DONE |
5ed12a19 768 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 769 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 770 timeout |
788d4433 771 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
772 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 774 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
775}
776
b9ca5fad
DL
777static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778 bool has_aux_irq,
779 int send_bytes,
780 uint32_t unused)
781{
782 return DP_AUX_CH_CTL_SEND_BUSY |
783 DP_AUX_CH_CTL_DONE |
784 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
785 DP_AUX_CH_CTL_TIME_OUT_ERROR |
786 DP_AUX_CH_CTL_TIME_OUT_1600us |
787 DP_AUX_CH_CTL_RECEIVE_ERROR |
788 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
789 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
790}
791
b84a1cf8
RV
792static int
793intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 794 const uint8_t *send, int send_bytes,
b84a1cf8
RV
795 uint8_t *recv, int recv_size)
796{
797 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
798 struct drm_device *dev = intel_dig_port->base.base.dev;
799 struct drm_i915_private *dev_priv = dev->dev_private;
800 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
801 uint32_t ch_data = ch_ctl + 4;
bc86625a 802 uint32_t aux_clock_divider;
b84a1cf8
RV
803 int i, ret, recv_bytes;
804 uint32_t status;
5ed12a19 805 int try, clock = 0;
4e6b788c 806 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
807 bool vdd;
808
773538e8 809 pps_lock(intel_dp);
e39b999a 810
72c3500a
VS
811 /*
812 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 * In such cases we want to leave VDD enabled and it's up to upper layers
814 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 * ourselves.
816 */
1e0560e0 817 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
818
819 /* dp aux is extremely sensitive to irq latency, hence request the
820 * lowest possible wakeup latency and so prevent the cpu from going into
821 * deep sleep states.
822 */
823 pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825 intel_dp_check_edp(intel_dp);
5eb08b69 826
c67a470b
PZ
827 intel_aux_display_runtime_get(dev_priv);
828
11bee43e
JB
829 /* Try to wait for any previous AUX channel activity */
830 for (try = 0; try < 3; try++) {
ef04f00d 831 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
832 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833 break;
834 msleep(1);
835 }
836
837 if (try == 3) {
838 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839 I915_READ(ch_ctl));
9ee32fea
DV
840 ret = -EBUSY;
841 goto out;
4f7f7b7e
CW
842 }
843
46a5ae9f
PZ
844 /* Only 5 data registers! */
845 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846 ret = -E2BIG;
847 goto out;
848 }
849
ec5b01dd 850 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
851 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852 has_aux_irq,
853 send_bytes,
854 aux_clock_divider);
5ed12a19 855
bc86625a
CW
856 /* Must try at least 3 times according to DP spec */
857 for (try = 0; try < 5; try++) {
858 /* Load the send data into the aux channel data registers */
859 for (i = 0; i < send_bytes; i += 4)
860 I915_WRITE(ch_data + i,
a4f1289e
RV
861 intel_dp_pack_aux(send + i,
862 send_bytes - i));
bc86625a
CW
863
864 /* Send the command and wait for it to complete */
5ed12a19 865 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
866
867 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868
869 /* Clear done status and any errors */
870 I915_WRITE(ch_ctl,
871 status |
872 DP_AUX_CH_CTL_DONE |
873 DP_AUX_CH_CTL_TIME_OUT_ERROR |
874 DP_AUX_CH_CTL_RECEIVE_ERROR);
875
876 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR))
878 continue;
879 if (status & DP_AUX_CH_CTL_DONE)
880 break;
881 }
4f7f7b7e 882 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
883 break;
884 }
885
a4fc5ed6 886 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 887 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
888 ret = -EBUSY;
889 goto out;
a4fc5ed6
KP
890 }
891
892 /* Check for timeout or receive error.
893 * Timeouts occur when the sink is not connected
894 */
a5b3da54 895 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 896 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
897 ret = -EIO;
898 goto out;
a5b3da54 899 }
1ae8c0a5
KP
900
901 /* Timeouts occur when the device isn't connected, so they're
902 * "normal" -- don't fill the kernel log with these */
a5b3da54 903 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 904 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
905 ret = -ETIMEDOUT;
906 goto out;
a4fc5ed6
KP
907 }
908
909 /* Unload any bytes sent back from the other side */
910 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
912 if (recv_bytes > recv_size)
913 recv_bytes = recv_size;
0206e353 914
4f7f7b7e 915 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
916 intel_dp_unpack_aux(I915_READ(ch_data + i),
917 recv + i, recv_bytes - i);
a4fc5ed6 918
9ee32fea
DV
919 ret = recv_bytes;
920out:
921 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 922 intel_aux_display_runtime_put(dev_priv);
9ee32fea 923
884f19e9
JN
924 if (vdd)
925 edp_panel_vdd_off(intel_dp, false);
926
773538e8 927 pps_unlock(intel_dp);
e39b999a 928
9ee32fea 929 return ret;
a4fc5ed6
KP
930}
931
a6c8aff0
JN
932#define BARE_ADDRESS_SIZE 3
933#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
934static ssize_t
935intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 936{
9d1a1031
JN
937 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
938 uint8_t txbuf[20], rxbuf[20];
939 size_t txsize, rxsize;
a4fc5ed6 940 int ret;
a4fc5ed6 941
9d1a1031
JN
942 txbuf[0] = msg->request << 4;
943 txbuf[1] = msg->address >> 8;
944 txbuf[2] = msg->address & 0xff;
945 txbuf[3] = msg->size - 1;
46a5ae9f 946
9d1a1031
JN
947 switch (msg->request & ~DP_AUX_I2C_MOT) {
948 case DP_AUX_NATIVE_WRITE:
949 case DP_AUX_I2C_WRITE:
a6c8aff0 950 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 951 rxsize = 1;
f51a44b9 952
9d1a1031
JN
953 if (WARN_ON(txsize > 20))
954 return -E2BIG;
a4fc5ed6 955
9d1a1031 956 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 957
9d1a1031
JN
958 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
959 if (ret > 0) {
960 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 961
9d1a1031
JN
962 /* Return payload size. */
963 ret = msg->size;
964 }
965 break;
46a5ae9f 966
9d1a1031
JN
967 case DP_AUX_NATIVE_READ:
968 case DP_AUX_I2C_READ:
a6c8aff0 969 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 970 rxsize = msg->size + 1;
a4fc5ed6 971
9d1a1031
JN
972 if (WARN_ON(rxsize > 20))
973 return -E2BIG;
a4fc5ed6 974
9d1a1031
JN
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
978 /*
979 * Assume happy day, and copy the data. The caller is
980 * expected to check msg->reply before touching it.
981 *
982 * Return payload size.
983 */
984 ret--;
985 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 986 }
9d1a1031
JN
987 break;
988
989 default:
990 ret = -EINVAL;
991 break;
a4fc5ed6 992 }
f51a44b9 993
9d1a1031 994 return ret;
a4fc5ed6
KP
995}
996
9d1a1031
JN
997static void
998intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
999{
1000 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1001 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1002 enum port port = intel_dig_port->port;
0b99836f 1003 const char *name = NULL;
ab2c0672
DA
1004 int ret;
1005
33ad6626
JN
1006 switch (port) {
1007 case PORT_A:
1008 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1009 name = "DPDDC-A";
ab2c0672 1010 break;
33ad6626
JN
1011 case PORT_B:
1012 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1013 name = "DPDDC-B";
ab2c0672 1014 break;
33ad6626
JN
1015 case PORT_C:
1016 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1017 name = "DPDDC-C";
ab2c0672 1018 break;
33ad6626
JN
1019 case PORT_D:
1020 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1021 name = "DPDDC-D";
33ad6626
JN
1022 break;
1023 default:
1024 BUG();
ab2c0672
DA
1025 }
1026
1b1aad75
DL
1027 /*
1028 * The AUX_CTL register is usually DP_CTL + 0x10.
1029 *
1030 * On Haswell and Broadwell though:
1031 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1032 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1033 *
1034 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1035 */
1036 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1037 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1038
0b99836f 1039 intel_dp->aux.name = name;
9d1a1031
JN
1040 intel_dp->aux.dev = dev->dev;
1041 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1042
0b99836f
JN
1043 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1044 connector->base.kdev->kobj.name);
8316f337 1045
4f71d0cb 1046 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1047 if (ret < 0) {
4f71d0cb 1048 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1049 name, ret);
1050 return;
ab2c0672 1051 }
8a5e6aeb 1052
0b99836f
JN
1053 ret = sysfs_create_link(&connector->base.kdev->kobj,
1054 &intel_dp->aux.ddc.dev.kobj,
1055 intel_dp->aux.ddc.dev.kobj.name);
1056 if (ret < 0) {
1057 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1058 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1059 }
a4fc5ed6
KP
1060}
1061
80f65de3
ID
1062static void
1063intel_dp_connector_unregister(struct intel_connector *intel_connector)
1064{
1065 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1066
0e32b39c
DA
1067 if (!intel_connector->mst_port)
1068 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1069 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1070 intel_connector_unregister(intel_connector);
1071}
1072
5416d871 1073static void
c3346ef6 1074skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1075{
1076 u32 ctrl1;
1077
1078 pipe_config->ddi_pll_sel = SKL_DPLL0;
1079 pipe_config->dpll_hw_state.cfgcr1 = 0;
1080 pipe_config->dpll_hw_state.cfgcr2 = 0;
1081
1082 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1083 switch (link_clock / 2) {
1084 case 81000:
5416d871
DL
1085 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1086 SKL_DPLL0);
1087 break;
c3346ef6 1088 case 135000:
5416d871
DL
1089 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1090 SKL_DPLL0);
1091 break;
c3346ef6 1092 case 270000:
5416d871
DL
1093 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1094 SKL_DPLL0);
1095 break;
c3346ef6
SJ
1096 case 162000:
1097 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1098 SKL_DPLL0);
1099 break;
1100 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1101 results in CDCLK change. Need to handle the change of CDCLK by
1102 disabling pipes and re-enabling them */
1103 case 108000:
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1105 SKL_DPLL0);
1106 break;
1107 case 216000:
1108 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1109 SKL_DPLL0);
1110 break;
1111
5416d871
DL
1112 }
1113 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1114}
1115
0e50338c 1116static void
5cec258b 1117hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1118{
1119 switch (link_bw) {
1120 case DP_LINK_BW_1_62:
1121 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1122 break;
1123 case DP_LINK_BW_2_7:
1124 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1125 break;
1126 case DP_LINK_BW_5_4:
1127 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1128 break;
1129 }
1130}
1131
fc0f8e25 1132static int
12f6a2e2 1133intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1134{
12f6a2e2
VS
1135 if (intel_dp->num_supported_rates) {
1136 *sink_rates = intel_dp->supported_rates;
ea2d8a42 1137 return intel_dp->num_supported_rates;
fc0f8e25 1138 }
12f6a2e2
VS
1139
1140 *sink_rates = default_rates;
1141
1142 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1143}
1144
a8f3ef61 1145static int
1db10e28 1146intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1147{
636280ba
VS
1148 if (INTEL_INFO(dev)->gen >= 9) {
1149 *source_rates = gen9_rates;
1150 return ARRAY_SIZE(gen9_rates);
a8f3ef61 1151 }
636280ba
VS
1152
1153 *source_rates = default_rates;
1154
1db10e28
VS
1155 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1156 /* WaDisableHBR2:skl */
1157 return (DP_LINK_BW_2_7 >> 3) + 1;
1158 else if (INTEL_INFO(dev)->gen >= 8 ||
1159 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1160 return (DP_LINK_BW_5_4 >> 3) + 1;
1161 else
1162 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1163}
1164
c6bb3538
DV
1165static void
1166intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1167 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1168{
1169 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1170 const struct dp_link_dpll *divisor = NULL;
1171 int i, count = 0;
c6bb3538
DV
1172
1173 if (IS_G4X(dev)) {
9dd4ffdf
CML
1174 divisor = gen4_dpll;
1175 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1176 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1177 divisor = pch_dpll;
1178 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1179 } else if (IS_CHERRYVIEW(dev)) {
1180 divisor = chv_dpll;
1181 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1182 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1183 divisor = vlv_dpll;
1184 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1185 }
9dd4ffdf
CML
1186
1187 if (divisor && count) {
1188 for (i = 0; i < count; i++) {
1189 if (link_bw == divisor[i].link_bw) {
1190 pipe_config->dpll = divisor[i].dpll;
1191 pipe_config->clock_set = true;
1192 break;
1193 }
1194 }
c6bb3538
DV
1195 }
1196}
1197
2ecae76a
VS
1198static int intersect_rates(const int *source_rates, int source_len,
1199 const int *sink_rates, int sink_len,
1200 int *supported_rates)
a8f3ef61
SJ
1201{
1202 int i = 0, j = 0, k = 0;
1203
a8f3ef61
SJ
1204 while (i < source_len && j < sink_len) {
1205 if (source_rates[i] == sink_rates[j]) {
1206 supported_rates[k] = source_rates[i];
1207 ++k;
1208 ++i;
1209 ++j;
1210 } else if (source_rates[i] < sink_rates[j]) {
1211 ++i;
1212 } else {
1213 ++j;
1214 }
1215 }
1216 return k;
1217}
1218
2ecae76a
VS
1219static int intel_supported_rates(struct intel_dp *intel_dp,
1220 int *supported_rates)
1221{
1222 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1223 const int *source_rates, *sink_rates;
1224 int source_len, sink_len;
1225
1226 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1227 source_len = intel_dp_source_rates(dev, &source_rates);
1228
1229 return intersect_rates(source_rates, source_len,
1230 sink_rates, sink_len,
1231 supported_rates);
1232}
1233
f4896f15 1234static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1235{
1236 int i = 0;
1237
1238 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1239 if (find == rates[i])
1240 break;
1241
1242 return i;
1243}
1244
00c09d70 1245bool
5bfe2ac0 1246intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1247 struct intel_crtc_state *pipe_config)
a4fc5ed6 1248{
5bfe2ac0 1249 struct drm_device *dev = encoder->base.dev;
36008365 1250 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1251 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1252 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1253 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1254 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1255 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1256 int lane_count, clock;
56071a20 1257 int min_lane_count = 1;
eeb6324d 1258 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1259 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1260 int min_clock = 0;
a8f3ef61 1261 int max_clock;
083f9560 1262 int bpp, mode_rate;
ff9a6750 1263 int link_avail, link_clock;
2ecae76a
VS
1264 int supported_rates[DP_MAX_SUPPORTED_RATES] = {};
1265 int supported_len;
a8f3ef61 1266
2ecae76a 1267 supported_len = intel_supported_rates(intel_dp, supported_rates);
a8f3ef61
SJ
1268
1269 /* No common link rates between source and sink */
1270 WARN_ON(supported_len <= 0);
1271
1272 max_clock = supported_len - 1;
a4fc5ed6 1273
bc7d38a4 1274 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1275 pipe_config->has_pch_encoder = true;
1276
03afc4a2 1277 pipe_config->has_dp_encoder = true;
f769cd24 1278 pipe_config->has_drrs = false;
9ed109a7 1279 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1280
dd06f90e
JN
1281 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1282 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1283 adjusted_mode);
2dd24552
JB
1284 if (!HAS_PCH_SPLIT(dev))
1285 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1286 intel_connector->panel.fitting_mode);
1287 else
b074cec8
JB
1288 intel_pch_panel_fitting(intel_crtc, pipe_config,
1289 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1290 }
1291
cb1793ce 1292 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1293 return false;
1294
083f9560 1295 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61
SJ
1296 "max bw %d pixel clock %iKHz\n",
1297 max_lane_count, supported_rates[max_clock],
241bfc38 1298 adjusted_mode->crtc_clock);
083f9560 1299
36008365
DV
1300 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1301 * bpc in between. */
3e7ca985 1302 bpp = pipe_config->pipe_bpp;
56071a20
JN
1303 if (is_edp(intel_dp)) {
1304 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1305 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1306 dev_priv->vbt.edp_bpp);
1307 bpp = dev_priv->vbt.edp_bpp;
1308 }
1309
344c5bbc
JN
1310 /*
1311 * Use the maximum clock and number of lanes the eDP panel
1312 * advertizes being capable of. The panels are generally
1313 * designed to support only a single clock and lane
1314 * configuration, and typically these values correspond to the
1315 * native resolution of the panel.
1316 */
1317 min_lane_count = max_lane_count;
1318 min_clock = max_clock;
7984211e 1319 }
657445fe 1320
36008365 1321 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1322 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1323 bpp);
36008365 1324
c6930992 1325 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1326 for (lane_count = min_lane_count;
1327 lane_count <= max_lane_count;
1328 lane_count <<= 1) {
1329
1330 link_clock = supported_rates[clock];
36008365
DV
1331 link_avail = intel_dp_max_data_rate(link_clock,
1332 lane_count);
1333
1334 if (mode_rate <= link_avail) {
1335 goto found;
1336 }
1337 }
1338 }
1339 }
c4867936 1340
36008365 1341 return false;
3685a8f3 1342
36008365 1343found:
55bc60db
VS
1344 if (intel_dp->color_range_auto) {
1345 /*
1346 * See:
1347 * CEA-861-E - 5.1 Default Encoding Parameters
1348 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1349 */
18316c8c 1350 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1351 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1352 else
1353 intel_dp->color_range = 0;
1354 }
1355
3685a8f3 1356 if (intel_dp->color_range)
50f3b016 1357 pipe_config->limited_color_range = true;
a4fc5ed6 1358
36008365 1359 intel_dp->lane_count = lane_count;
a8f3ef61
SJ
1360
1361 intel_dp->link_bw =
1362 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1363
1364 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1365 intel_dp->rate_select =
2ecae76a
VS
1366 rate_to_index(supported_rates[clock],
1367 intel_dp->supported_rates);
a8f3ef61
SJ
1368 intel_dp->link_bw = 0;
1369 }
1370
657445fe 1371 pipe_config->pipe_bpp = bpp;
a8f3ef61 1372 pipe_config->port_clock = supported_rates[clock];
a4fc5ed6 1373
36008365
DV
1374 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1375 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1376 pipe_config->port_clock, bpp);
36008365
DV
1377 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1378 mode_rate, link_avail);
a4fc5ed6 1379
03afc4a2 1380 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1381 adjusted_mode->crtc_clock,
1382 pipe_config->port_clock,
03afc4a2 1383 &pipe_config->dp_m_n);
9d1a455b 1384
439d7ac0 1385 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1386 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1387 pipe_config->has_drrs = true;
439d7ac0
PB
1388 intel_link_compute_m_n(bpp, lane_count,
1389 intel_connector->panel.downclock_mode->clock,
1390 pipe_config->port_clock,
1391 &pipe_config->dp_m2_n2);
1392 }
1393
5416d871 1394 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
c3346ef6 1395 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
5416d871 1396 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1397 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1398 else
1399 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1400
03afc4a2 1401 return true;
a4fc5ed6
KP
1402}
1403
7c62a164 1404static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1405{
7c62a164
DV
1406 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1407 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1408 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1409 struct drm_i915_private *dev_priv = dev->dev_private;
1410 u32 dpa_ctl;
1411
6e3c9717
ACO
1412 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1413 crtc->config->port_clock);
ea9b6006
DV
1414 dpa_ctl = I915_READ(DP_A);
1415 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1416
6e3c9717 1417 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1418 /* For a long time we've carried around a ILK-DevA w/a for the
1419 * 160MHz clock. If we're really unlucky, it's still required.
1420 */
1421 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1422 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1423 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1424 } else {
1425 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1426 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1427 }
1ce17038 1428
ea9b6006
DV
1429 I915_WRITE(DP_A, dpa_ctl);
1430
1431 POSTING_READ(DP_A);
1432 udelay(500);
1433}
1434
8ac33ed3 1435static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1436{
b934223d 1437 struct drm_device *dev = encoder->base.dev;
417e822d 1438 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1439 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1440 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1441 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1442 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1443
417e822d 1444 /*
1a2eb460 1445 * There are four kinds of DP registers:
417e822d
KP
1446 *
1447 * IBX PCH
1a2eb460
KP
1448 * SNB CPU
1449 * IVB CPU
417e822d
KP
1450 * CPT PCH
1451 *
1452 * IBX PCH and CPU are the same for almost everything,
1453 * except that the CPU DP PLL is configured in this
1454 * register
1455 *
1456 * CPT PCH is quite different, having many bits moved
1457 * to the TRANS_DP_CTL register instead. That
1458 * configuration happens (oddly) in ironlake_pch_enable
1459 */
9c9e7927 1460
417e822d
KP
1461 /* Preserve the BIOS-computed detected bit. This is
1462 * supposed to be read-only.
1463 */
1464 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1465
417e822d 1466 /* Handle DP bits in common between all three register formats */
417e822d 1467 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1468 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1469
6e3c9717 1470 if (crtc->config->has_audio)
ea5b213a 1471 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1472
417e822d 1473 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1474
bc7d38a4 1475 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1476 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1477 intel_dp->DP |= DP_SYNC_HS_HIGH;
1478 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1479 intel_dp->DP |= DP_SYNC_VS_HIGH;
1480 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1481
6aba5b6c 1482 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1483 intel_dp->DP |= DP_ENHANCED_FRAMING;
1484
7c62a164 1485 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1486 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1487 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1488 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1489
1490 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1491 intel_dp->DP |= DP_SYNC_HS_HIGH;
1492 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1493 intel_dp->DP |= DP_SYNC_VS_HIGH;
1494 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1495
6aba5b6c 1496 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1497 intel_dp->DP |= DP_ENHANCED_FRAMING;
1498
44f37d1f
CML
1499 if (!IS_CHERRYVIEW(dev)) {
1500 if (crtc->pipe == 1)
1501 intel_dp->DP |= DP_PIPEB_SELECT;
1502 } else {
1503 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1504 }
417e822d
KP
1505 } else {
1506 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1507 }
a4fc5ed6
KP
1508}
1509
ffd6749d
PZ
1510#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1511#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1512
1a5ef5b7
PZ
1513#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1514#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1515
ffd6749d
PZ
1516#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1517#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1518
4be73780 1519static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1520 u32 mask,
1521 u32 value)
bd943159 1522{
30add22d 1523 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1524 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1525 u32 pp_stat_reg, pp_ctrl_reg;
1526
e39b999a
VS
1527 lockdep_assert_held(&dev_priv->pps_mutex);
1528
bf13e81b
JN
1529 pp_stat_reg = _pp_stat_reg(intel_dp);
1530 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1531
99ea7127 1532 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1533 mask, value,
1534 I915_READ(pp_stat_reg),
1535 I915_READ(pp_ctrl_reg));
32ce697c 1536
453c5420 1537 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1538 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1539 I915_READ(pp_stat_reg),
1540 I915_READ(pp_ctrl_reg));
32ce697c 1541 }
54c136d4
CW
1542
1543 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1544}
32ce697c 1545
4be73780 1546static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1547{
1548 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1549 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1550}
1551
4be73780 1552static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1553{
1554 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1555 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1556}
1557
4be73780 1558static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1559{
1560 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1561
1562 /* When we disable the VDD override bit last we have to do the manual
1563 * wait. */
1564 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1565 intel_dp->panel_power_cycle_delay);
1566
4be73780 1567 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1568}
1569
4be73780 1570static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1571{
1572 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1573 intel_dp->backlight_on_delay);
1574}
1575
4be73780 1576static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1577{
1578 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1579 intel_dp->backlight_off_delay);
1580}
99ea7127 1581
832dd3c1
KP
1582/* Read the current pp_control value, unlocking the register if it
1583 * is locked
1584 */
1585
453c5420 1586static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1587{
453c5420
JB
1588 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1589 struct drm_i915_private *dev_priv = dev->dev_private;
1590 u32 control;
832dd3c1 1591
e39b999a
VS
1592 lockdep_assert_held(&dev_priv->pps_mutex);
1593
bf13e81b 1594 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1595 control &= ~PANEL_UNLOCK_MASK;
1596 control |= PANEL_UNLOCK_REGS;
1597 return control;
bd943159
KP
1598}
1599
951468f3
VS
1600/*
1601 * Must be paired with edp_panel_vdd_off().
1602 * Must hold pps_mutex around the whole on/off sequence.
1603 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1604 */
1e0560e0 1605static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1606{
30add22d 1607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1608 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1609 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1610 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1611 enum intel_display_power_domain power_domain;
5d613501 1612 u32 pp;
453c5420 1613 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1614 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1615
e39b999a
VS
1616 lockdep_assert_held(&dev_priv->pps_mutex);
1617
97af61f5 1618 if (!is_edp(intel_dp))
adddaaf4 1619 return false;
bd943159 1620
2c623c11 1621 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1622 intel_dp->want_panel_vdd = true;
99ea7127 1623
4be73780 1624 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1625 return need_to_disable;
b0665d57 1626
4e6e1a54
ID
1627 power_domain = intel_display_port_power_domain(intel_encoder);
1628 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1629
3936fcf4
VS
1630 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1631 port_name(intel_dig_port->port));
bd943159 1632
4be73780
DV
1633 if (!edp_have_panel_power(intel_dp))
1634 wait_panel_power_cycle(intel_dp);
99ea7127 1635
453c5420 1636 pp = ironlake_get_pp_control(intel_dp);
5d613501 1637 pp |= EDP_FORCE_VDD;
ebf33b18 1638
bf13e81b
JN
1639 pp_stat_reg = _pp_stat_reg(intel_dp);
1640 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1641
1642 I915_WRITE(pp_ctrl_reg, pp);
1643 POSTING_READ(pp_ctrl_reg);
1644 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1645 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1646 /*
1647 * If the panel wasn't on, delay before accessing aux channel
1648 */
4be73780 1649 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1650 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1651 port_name(intel_dig_port->port));
f01eca2e 1652 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1653 }
adddaaf4
JN
1654
1655 return need_to_disable;
1656}
1657
951468f3
VS
1658/*
1659 * Must be paired with intel_edp_panel_vdd_off() or
1660 * intel_edp_panel_off().
1661 * Nested calls to these functions are not allowed since
1662 * we drop the lock. Caller must use some higher level
1663 * locking to prevent nested calls from other threads.
1664 */
b80d6c78 1665void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1666{
c695b6b6 1667 bool vdd;
adddaaf4 1668
c695b6b6
VS
1669 if (!is_edp(intel_dp))
1670 return;
1671
773538e8 1672 pps_lock(intel_dp);
c695b6b6 1673 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1674 pps_unlock(intel_dp);
c695b6b6 1675
e2c719b7 1676 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1677 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1678}
1679
4be73780 1680static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1681{
30add22d 1682 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1683 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1684 struct intel_digital_port *intel_dig_port =
1685 dp_to_dig_port(intel_dp);
1686 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1687 enum intel_display_power_domain power_domain;
5d613501 1688 u32 pp;
453c5420 1689 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1690
e39b999a 1691 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1692
15e899a0 1693 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1694
15e899a0 1695 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1696 return;
b0665d57 1697
3936fcf4
VS
1698 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1699 port_name(intel_dig_port->port));
bd943159 1700
be2c9196
VS
1701 pp = ironlake_get_pp_control(intel_dp);
1702 pp &= ~EDP_FORCE_VDD;
453c5420 1703
be2c9196
VS
1704 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1705 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1706
be2c9196
VS
1707 I915_WRITE(pp_ctrl_reg, pp);
1708 POSTING_READ(pp_ctrl_reg);
90791a5c 1709
be2c9196
VS
1710 /* Make sure sequencer is idle before allowing subsequent activity */
1711 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1712 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1713
be2c9196
VS
1714 if ((pp & POWER_TARGET_ON) == 0)
1715 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1716
be2c9196
VS
1717 power_domain = intel_display_port_power_domain(intel_encoder);
1718 intel_display_power_put(dev_priv, power_domain);
bd943159 1719}
5d613501 1720
4be73780 1721static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1722{
1723 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1724 struct intel_dp, panel_vdd_work);
bd943159 1725
773538e8 1726 pps_lock(intel_dp);
15e899a0
VS
1727 if (!intel_dp->want_panel_vdd)
1728 edp_panel_vdd_off_sync(intel_dp);
773538e8 1729 pps_unlock(intel_dp);
bd943159
KP
1730}
1731
aba86890
ID
1732static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1733{
1734 unsigned long delay;
1735
1736 /*
1737 * Queue the timer to fire a long time from now (relative to the power
1738 * down delay) to keep the panel power up across a sequence of
1739 * operations.
1740 */
1741 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1742 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1743}
1744
951468f3
VS
1745/*
1746 * Must be paired with edp_panel_vdd_on().
1747 * Must hold pps_mutex around the whole on/off sequence.
1748 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1749 */
4be73780 1750static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1751{
e39b999a
VS
1752 struct drm_i915_private *dev_priv =
1753 intel_dp_to_dev(intel_dp)->dev_private;
1754
1755 lockdep_assert_held(&dev_priv->pps_mutex);
1756
97af61f5
KP
1757 if (!is_edp(intel_dp))
1758 return;
5d613501 1759
e2c719b7 1760 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1761 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1762
bd943159
KP
1763 intel_dp->want_panel_vdd = false;
1764
aba86890 1765 if (sync)
4be73780 1766 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1767 else
1768 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1769}
1770
9f0fb5be 1771static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1772{
30add22d 1773 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1774 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1775 u32 pp;
453c5420 1776 u32 pp_ctrl_reg;
9934c132 1777
9f0fb5be
VS
1778 lockdep_assert_held(&dev_priv->pps_mutex);
1779
97af61f5 1780 if (!is_edp(intel_dp))
bd943159 1781 return;
99ea7127 1782
3936fcf4
VS
1783 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1784 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1785
e7a89ace
VS
1786 if (WARN(edp_have_panel_power(intel_dp),
1787 "eDP port %c panel power already on\n",
1788 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1789 return;
9934c132 1790
4be73780 1791 wait_panel_power_cycle(intel_dp);
37c6c9b0 1792
bf13e81b 1793 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1794 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1795 if (IS_GEN5(dev)) {
1796 /* ILK workaround: disable reset around power sequence */
1797 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1798 I915_WRITE(pp_ctrl_reg, pp);
1799 POSTING_READ(pp_ctrl_reg);
05ce1a49 1800 }
37c6c9b0 1801
1c0ae80a 1802 pp |= POWER_TARGET_ON;
99ea7127
KP
1803 if (!IS_GEN5(dev))
1804 pp |= PANEL_POWER_RESET;
1805
453c5420
JB
1806 I915_WRITE(pp_ctrl_reg, pp);
1807 POSTING_READ(pp_ctrl_reg);
9934c132 1808
4be73780 1809 wait_panel_on(intel_dp);
dce56b3c 1810 intel_dp->last_power_on = jiffies;
9934c132 1811
05ce1a49
KP
1812 if (IS_GEN5(dev)) {
1813 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1814 I915_WRITE(pp_ctrl_reg, pp);
1815 POSTING_READ(pp_ctrl_reg);
05ce1a49 1816 }
9f0fb5be 1817}
e39b999a 1818
9f0fb5be
VS
1819void intel_edp_panel_on(struct intel_dp *intel_dp)
1820{
1821 if (!is_edp(intel_dp))
1822 return;
1823
1824 pps_lock(intel_dp);
1825 edp_panel_on(intel_dp);
773538e8 1826 pps_unlock(intel_dp);
9934c132
JB
1827}
1828
9f0fb5be
VS
1829
1830static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1831{
4e6e1a54
ID
1832 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1833 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1834 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1835 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1836 enum intel_display_power_domain power_domain;
99ea7127 1837 u32 pp;
453c5420 1838 u32 pp_ctrl_reg;
9934c132 1839
9f0fb5be
VS
1840 lockdep_assert_held(&dev_priv->pps_mutex);
1841
97af61f5
KP
1842 if (!is_edp(intel_dp))
1843 return;
37c6c9b0 1844
3936fcf4
VS
1845 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1846 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1847
3936fcf4
VS
1848 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1849 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1850
453c5420 1851 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1852 /* We need to switch off panel power _and_ force vdd, for otherwise some
1853 * panels get very unhappy and cease to work. */
b3064154
PJ
1854 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1855 EDP_BLC_ENABLE);
453c5420 1856
bf13e81b 1857 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1858
849e39f5
PZ
1859 intel_dp->want_panel_vdd = false;
1860
453c5420
JB
1861 I915_WRITE(pp_ctrl_reg, pp);
1862 POSTING_READ(pp_ctrl_reg);
9934c132 1863
dce56b3c 1864 intel_dp->last_power_cycle = jiffies;
4be73780 1865 wait_panel_off(intel_dp);
849e39f5
PZ
1866
1867 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1868 power_domain = intel_display_port_power_domain(intel_encoder);
1869 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1870}
e39b999a 1871
9f0fb5be
VS
1872void intel_edp_panel_off(struct intel_dp *intel_dp)
1873{
1874 if (!is_edp(intel_dp))
1875 return;
e39b999a 1876
9f0fb5be
VS
1877 pps_lock(intel_dp);
1878 edp_panel_off(intel_dp);
773538e8 1879 pps_unlock(intel_dp);
9934c132
JB
1880}
1881
1250d107
JN
1882/* Enable backlight in the panel power control. */
1883static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1884{
da63a9f2
PZ
1885 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1886 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1887 struct drm_i915_private *dev_priv = dev->dev_private;
1888 u32 pp;
453c5420 1889 u32 pp_ctrl_reg;
32f9d658 1890
01cb9ea6
JB
1891 /*
1892 * If we enable the backlight right away following a panel power
1893 * on, we may see slight flicker as the panel syncs with the eDP
1894 * link. So delay a bit to make sure the image is solid before
1895 * allowing it to appear.
1896 */
4be73780 1897 wait_backlight_on(intel_dp);
e39b999a 1898
773538e8 1899 pps_lock(intel_dp);
e39b999a 1900
453c5420 1901 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1902 pp |= EDP_BLC_ENABLE;
453c5420 1903
bf13e81b 1904 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1905
1906 I915_WRITE(pp_ctrl_reg, pp);
1907 POSTING_READ(pp_ctrl_reg);
e39b999a 1908
773538e8 1909 pps_unlock(intel_dp);
32f9d658
ZW
1910}
1911
1250d107
JN
1912/* Enable backlight PWM and backlight PP control. */
1913void intel_edp_backlight_on(struct intel_dp *intel_dp)
1914{
1915 if (!is_edp(intel_dp))
1916 return;
1917
1918 DRM_DEBUG_KMS("\n");
1919
1920 intel_panel_enable_backlight(intel_dp->attached_connector);
1921 _intel_edp_backlight_on(intel_dp);
1922}
1923
1924/* Disable backlight in the panel power control. */
1925static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1926{
30add22d 1927 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1928 struct drm_i915_private *dev_priv = dev->dev_private;
1929 u32 pp;
453c5420 1930 u32 pp_ctrl_reg;
32f9d658 1931
f01eca2e
KP
1932 if (!is_edp(intel_dp))
1933 return;
1934
773538e8 1935 pps_lock(intel_dp);
e39b999a 1936
453c5420 1937 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1938 pp &= ~EDP_BLC_ENABLE;
453c5420 1939
bf13e81b 1940 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1941
1942 I915_WRITE(pp_ctrl_reg, pp);
1943 POSTING_READ(pp_ctrl_reg);
f7d2323c 1944
773538e8 1945 pps_unlock(intel_dp);
e39b999a
VS
1946
1947 intel_dp->last_backlight_off = jiffies;
f7d2323c 1948 edp_wait_backlight_off(intel_dp);
1250d107 1949}
f7d2323c 1950
1250d107
JN
1951/* Disable backlight PP control and backlight PWM. */
1952void intel_edp_backlight_off(struct intel_dp *intel_dp)
1953{
1954 if (!is_edp(intel_dp))
1955 return;
1956
1957 DRM_DEBUG_KMS("\n");
f7d2323c 1958
1250d107 1959 _intel_edp_backlight_off(intel_dp);
f7d2323c 1960 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 1961}
a4fc5ed6 1962
73580fb7
JN
1963/*
1964 * Hook for controlling the panel power control backlight through the bl_power
1965 * sysfs attribute. Take care to handle multiple calls.
1966 */
1967static void intel_edp_backlight_power(struct intel_connector *connector,
1968 bool enable)
1969{
1970 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
1971 bool is_enabled;
1972
773538e8 1973 pps_lock(intel_dp);
e39b999a 1974 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 1975 pps_unlock(intel_dp);
73580fb7
JN
1976
1977 if (is_enabled == enable)
1978 return;
1979
23ba9373
JN
1980 DRM_DEBUG_KMS("panel power control backlight %s\n",
1981 enable ? "enable" : "disable");
73580fb7
JN
1982
1983 if (enable)
1984 _intel_edp_backlight_on(intel_dp);
1985 else
1986 _intel_edp_backlight_off(intel_dp);
1987}
1988
2bd2ad64 1989static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 1990{
da63a9f2
PZ
1991 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1992 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1993 struct drm_device *dev = crtc->dev;
d240f20f
JB
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1995 u32 dpa_ctl;
1996
2bd2ad64
DV
1997 assert_pipe_disabled(dev_priv,
1998 to_intel_crtc(crtc)->pipe);
1999
d240f20f
JB
2000 DRM_DEBUG_KMS("\n");
2001 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2002 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2003 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2004
2005 /* We don't adjust intel_dp->DP while tearing down the link, to
2006 * facilitate link retraining (e.g. after hotplug). Hence clear all
2007 * enable bits here to ensure that we don't enable too much. */
2008 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2009 intel_dp->DP |= DP_PLL_ENABLE;
2010 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2011 POSTING_READ(DP_A);
2012 udelay(200);
d240f20f
JB
2013}
2014
2bd2ad64 2015static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2016{
da63a9f2
PZ
2017 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2018 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2019 struct drm_device *dev = crtc->dev;
d240f20f
JB
2020 struct drm_i915_private *dev_priv = dev->dev_private;
2021 u32 dpa_ctl;
2022
2bd2ad64
DV
2023 assert_pipe_disabled(dev_priv,
2024 to_intel_crtc(crtc)->pipe);
2025
d240f20f 2026 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2027 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2028 "dp pll off, should be on\n");
2029 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2030
2031 /* We can't rely on the value tracked for the DP register in
2032 * intel_dp->DP because link_down must not change that (otherwise link
2033 * re-training will fail. */
298b0b39 2034 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2035 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2036 POSTING_READ(DP_A);
d240f20f
JB
2037 udelay(200);
2038}
2039
c7ad3810 2040/* If the sink supports it, try to set the power state appropriately */
c19b0669 2041void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2042{
2043 int ret, i;
2044
2045 /* Should have a valid DPCD by this point */
2046 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2047 return;
2048
2049 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2050 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2051 DP_SET_POWER_D3);
c7ad3810
JB
2052 } else {
2053 /*
2054 * When turning on, we need to retry for 1ms to give the sink
2055 * time to wake up.
2056 */
2057 for (i = 0; i < 3; i++) {
9d1a1031
JN
2058 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2059 DP_SET_POWER_D0);
c7ad3810
JB
2060 if (ret == 1)
2061 break;
2062 msleep(1);
2063 }
2064 }
f9cac721
JN
2065
2066 if (ret != 1)
2067 DRM_DEBUG_KMS("failed to %s sink power state\n",
2068 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2069}
2070
19d8fe15
DV
2071static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2072 enum pipe *pipe)
d240f20f 2073{
19d8fe15 2074 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2075 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2076 struct drm_device *dev = encoder->base.dev;
2077 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2078 enum intel_display_power_domain power_domain;
2079 u32 tmp;
2080
2081 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2082 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2083 return false;
2084
2085 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2086
2087 if (!(tmp & DP_PORT_EN))
2088 return false;
2089
bc7d38a4 2090 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2091 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2092 } else if (IS_CHERRYVIEW(dev)) {
2093 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2094 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2095 *pipe = PORT_TO_PIPE(tmp);
2096 } else {
2097 u32 trans_sel;
2098 u32 trans_dp;
2099 int i;
2100
2101 switch (intel_dp->output_reg) {
2102 case PCH_DP_B:
2103 trans_sel = TRANS_DP_PORT_SEL_B;
2104 break;
2105 case PCH_DP_C:
2106 trans_sel = TRANS_DP_PORT_SEL_C;
2107 break;
2108 case PCH_DP_D:
2109 trans_sel = TRANS_DP_PORT_SEL_D;
2110 break;
2111 default:
2112 return true;
2113 }
2114
055e393f 2115 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2116 trans_dp = I915_READ(TRANS_DP_CTL(i));
2117 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2118 *pipe = i;
2119 return true;
2120 }
2121 }
19d8fe15 2122
4a0833ec
DV
2123 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2124 intel_dp->output_reg);
2125 }
d240f20f 2126
19d8fe15
DV
2127 return true;
2128}
d240f20f 2129
045ac3b5 2130static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2131 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2132{
2133 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2134 u32 tmp, flags = 0;
63000ef6
XZ
2135 struct drm_device *dev = encoder->base.dev;
2136 struct drm_i915_private *dev_priv = dev->dev_private;
2137 enum port port = dp_to_dig_port(intel_dp)->port;
2138 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2139 int dotclock;
045ac3b5 2140
9ed109a7
DV
2141 tmp = I915_READ(intel_dp->output_reg);
2142 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2143 pipe_config->has_audio = true;
2144
63000ef6 2145 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2146 if (tmp & DP_SYNC_HS_HIGH)
2147 flags |= DRM_MODE_FLAG_PHSYNC;
2148 else
2149 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2150
63000ef6
XZ
2151 if (tmp & DP_SYNC_VS_HIGH)
2152 flags |= DRM_MODE_FLAG_PVSYNC;
2153 else
2154 flags |= DRM_MODE_FLAG_NVSYNC;
2155 } else {
2156 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2157 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2158 flags |= DRM_MODE_FLAG_PHSYNC;
2159 else
2160 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2161
63000ef6
XZ
2162 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2163 flags |= DRM_MODE_FLAG_PVSYNC;
2164 else
2165 flags |= DRM_MODE_FLAG_NVSYNC;
2166 }
045ac3b5 2167
2d112de7 2168 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2169
8c875fca
VS
2170 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2171 tmp & DP_COLOR_RANGE_16_235)
2172 pipe_config->limited_color_range = true;
2173
eb14cb74
VS
2174 pipe_config->has_dp_encoder = true;
2175
2176 intel_dp_get_m_n(crtc, pipe_config);
2177
18442d08 2178 if (port == PORT_A) {
f1f644dc
JB
2179 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2180 pipe_config->port_clock = 162000;
2181 else
2182 pipe_config->port_clock = 270000;
2183 }
18442d08
VS
2184
2185 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2186 &pipe_config->dp_m_n);
2187
2188 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2189 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2190
2d112de7 2191 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2192
c6cd2ee2
JN
2193 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2194 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2195 /*
2196 * This is a big fat ugly hack.
2197 *
2198 * Some machines in UEFI boot mode provide us a VBT that has 18
2199 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2200 * unknown we fail to light up. Yet the same BIOS boots up with
2201 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2202 * max, not what it tells us to use.
2203 *
2204 * Note: This will still be broken if the eDP panel is not lit
2205 * up by the BIOS, and thus we can't get the mode at module
2206 * load.
2207 */
2208 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2209 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2210 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2211 }
045ac3b5
JB
2212}
2213
e8cb4558 2214static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2215{
e8cb4558 2216 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2217 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2218 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2219
6e3c9717 2220 if (crtc->config->has_audio)
495a5bb8 2221 intel_audio_codec_disable(encoder);
6cb49835 2222
b32c6f48
RV
2223 if (HAS_PSR(dev) && !HAS_DDI(dev))
2224 intel_psr_disable(intel_dp);
2225
6cb49835
DV
2226 /* Make sure the panel is off before trying to change the mode. But also
2227 * ensure that we have vdd while we switch off the panel. */
24f3e092 2228 intel_edp_panel_vdd_on(intel_dp);
4be73780 2229 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2230 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2231 intel_edp_panel_off(intel_dp);
3739850b 2232
08aff3fe
VS
2233 /* disable the port before the pipe on g4x */
2234 if (INTEL_INFO(dev)->gen < 5)
3739850b 2235 intel_dp_link_down(intel_dp);
d240f20f
JB
2236}
2237
08aff3fe 2238static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2239{
2bd2ad64 2240 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2241 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2242
49277c31 2243 intel_dp_link_down(intel_dp);
08aff3fe
VS
2244 if (port == PORT_A)
2245 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2246}
2247
2248static void vlv_post_disable_dp(struct intel_encoder *encoder)
2249{
2250 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2251
2252 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2253}
2254
580d3811
VS
2255static void chv_post_disable_dp(struct intel_encoder *encoder)
2256{
2257 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2258 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2259 struct drm_device *dev = encoder->base.dev;
2260 struct drm_i915_private *dev_priv = dev->dev_private;
2261 struct intel_crtc *intel_crtc =
2262 to_intel_crtc(encoder->base.crtc);
2263 enum dpio_channel ch = vlv_dport_to_channel(dport);
2264 enum pipe pipe = intel_crtc->pipe;
2265 u32 val;
2266
2267 intel_dp_link_down(intel_dp);
2268
2269 mutex_lock(&dev_priv->dpio_lock);
2270
2271 /* Propagate soft reset to data lane reset */
97fd4d5c 2272 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2273 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2274 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2275
97fd4d5c
VS
2276 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2277 val |= CHV_PCS_REQ_SOFTRESET_EN;
2278 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2279
2280 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2281 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2282 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2283
2284 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2285 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2286 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2287
2288 mutex_unlock(&dev_priv->dpio_lock);
2289}
2290
7b13b58a
VS
2291static void
2292_intel_dp_set_link_train(struct intel_dp *intel_dp,
2293 uint32_t *DP,
2294 uint8_t dp_train_pat)
2295{
2296 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2297 struct drm_device *dev = intel_dig_port->base.base.dev;
2298 struct drm_i915_private *dev_priv = dev->dev_private;
2299 enum port port = intel_dig_port->port;
2300
2301 if (HAS_DDI(dev)) {
2302 uint32_t temp = I915_READ(DP_TP_CTL(port));
2303
2304 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2305 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2306 else
2307 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2308
2309 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2310 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2311 case DP_TRAINING_PATTERN_DISABLE:
2312 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2313
2314 break;
2315 case DP_TRAINING_PATTERN_1:
2316 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2317 break;
2318 case DP_TRAINING_PATTERN_2:
2319 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2320 break;
2321 case DP_TRAINING_PATTERN_3:
2322 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2323 break;
2324 }
2325 I915_WRITE(DP_TP_CTL(port), temp);
2326
2327 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2328 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2329
2330 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2331 case DP_TRAINING_PATTERN_DISABLE:
2332 *DP |= DP_LINK_TRAIN_OFF_CPT;
2333 break;
2334 case DP_TRAINING_PATTERN_1:
2335 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2336 break;
2337 case DP_TRAINING_PATTERN_2:
2338 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2339 break;
2340 case DP_TRAINING_PATTERN_3:
2341 DRM_ERROR("DP training pattern 3 not supported\n");
2342 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2343 break;
2344 }
2345
2346 } else {
2347 if (IS_CHERRYVIEW(dev))
2348 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2349 else
2350 *DP &= ~DP_LINK_TRAIN_MASK;
2351
2352 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2353 case DP_TRAINING_PATTERN_DISABLE:
2354 *DP |= DP_LINK_TRAIN_OFF;
2355 break;
2356 case DP_TRAINING_PATTERN_1:
2357 *DP |= DP_LINK_TRAIN_PAT_1;
2358 break;
2359 case DP_TRAINING_PATTERN_2:
2360 *DP |= DP_LINK_TRAIN_PAT_2;
2361 break;
2362 case DP_TRAINING_PATTERN_3:
2363 if (IS_CHERRYVIEW(dev)) {
2364 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2365 } else {
2366 DRM_ERROR("DP training pattern 3 not supported\n");
2367 *DP |= DP_LINK_TRAIN_PAT_2;
2368 }
2369 break;
2370 }
2371 }
2372}
2373
2374static void intel_dp_enable_port(struct intel_dp *intel_dp)
2375{
2376 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2377 struct drm_i915_private *dev_priv = dev->dev_private;
2378
7b13b58a
VS
2379 /* enable with pattern 1 (as per spec) */
2380 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2381 DP_TRAINING_PATTERN_1);
2382
2383 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2384 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2385
2386 /*
2387 * Magic for VLV/CHV. We _must_ first set up the register
2388 * without actually enabling the port, and then do another
2389 * write to enable the port. Otherwise link training will
2390 * fail when the power sequencer is freshly used for this port.
2391 */
2392 intel_dp->DP |= DP_PORT_EN;
2393
2394 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2395 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2396}
2397
e8cb4558 2398static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2399{
e8cb4558
DV
2400 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2401 struct drm_device *dev = encoder->base.dev;
2402 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2403 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2404 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2405
0c33d8d7
DV
2406 if (WARN_ON(dp_reg & DP_PORT_EN))
2407 return;
5d613501 2408
093e3f13
VS
2409 pps_lock(intel_dp);
2410
2411 if (IS_VALLEYVIEW(dev))
2412 vlv_init_panel_power_sequencer(intel_dp);
2413
7b13b58a 2414 intel_dp_enable_port(intel_dp);
093e3f13
VS
2415
2416 edp_panel_vdd_on(intel_dp);
2417 edp_panel_on(intel_dp);
2418 edp_panel_vdd_off(intel_dp, true);
2419
2420 pps_unlock(intel_dp);
2421
61234fa5
VS
2422 if (IS_VALLEYVIEW(dev))
2423 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2424
f01eca2e 2425 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2426 intel_dp_start_link_train(intel_dp);
33a34e4e 2427 intel_dp_complete_link_train(intel_dp);
3ab9c637 2428 intel_dp_stop_link_train(intel_dp);
c1dec79a 2429
6e3c9717 2430 if (crtc->config->has_audio) {
c1dec79a
JN
2431 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2432 pipe_name(crtc->pipe));
2433 intel_audio_codec_enable(encoder);
2434 }
ab1f90f9 2435}
89b667f8 2436
ecff4f3b
JN
2437static void g4x_enable_dp(struct intel_encoder *encoder)
2438{
828f5c6e
JN
2439 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2440
ecff4f3b 2441 intel_enable_dp(encoder);
4be73780 2442 intel_edp_backlight_on(intel_dp);
ab1f90f9 2443}
89b667f8 2444
ab1f90f9
JN
2445static void vlv_enable_dp(struct intel_encoder *encoder)
2446{
828f5c6e
JN
2447 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2448
4be73780 2449 intel_edp_backlight_on(intel_dp);
b32c6f48 2450 intel_psr_enable(intel_dp);
d240f20f
JB
2451}
2452
ecff4f3b 2453static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2454{
2455 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2456 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2457
8ac33ed3
DV
2458 intel_dp_prepare(encoder);
2459
d41f1efb
DV
2460 /* Only ilk+ has port A */
2461 if (dport->port == PORT_A) {
2462 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2463 ironlake_edp_pll_on(intel_dp);
d41f1efb 2464 }
ab1f90f9
JN
2465}
2466
83b84597
VS
2467static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2468{
2469 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2470 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2471 enum pipe pipe = intel_dp->pps_pipe;
2472 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2473
2474 edp_panel_vdd_off_sync(intel_dp);
2475
2476 /*
2477 * VLV seems to get confused when multiple power seqeuencers
2478 * have the same port selected (even if only one has power/vdd
2479 * enabled). The failure manifests as vlv_wait_port_ready() failing
2480 * CHV on the other hand doesn't seem to mind having the same port
2481 * selected in multiple power seqeuencers, but let's clear the
2482 * port select always when logically disconnecting a power sequencer
2483 * from a port.
2484 */
2485 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2486 pipe_name(pipe), port_name(intel_dig_port->port));
2487 I915_WRITE(pp_on_reg, 0);
2488 POSTING_READ(pp_on_reg);
2489
2490 intel_dp->pps_pipe = INVALID_PIPE;
2491}
2492
a4a5d2f8
VS
2493static void vlv_steal_power_sequencer(struct drm_device *dev,
2494 enum pipe pipe)
2495{
2496 struct drm_i915_private *dev_priv = dev->dev_private;
2497 struct intel_encoder *encoder;
2498
2499 lockdep_assert_held(&dev_priv->pps_mutex);
2500
ac3c12e4
VS
2501 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2502 return;
2503
a4a5d2f8
VS
2504 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2505 base.head) {
2506 struct intel_dp *intel_dp;
773538e8 2507 enum port port;
a4a5d2f8
VS
2508
2509 if (encoder->type != INTEL_OUTPUT_EDP)
2510 continue;
2511
2512 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2513 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2514
2515 if (intel_dp->pps_pipe != pipe)
2516 continue;
2517
2518 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2519 pipe_name(pipe), port_name(port));
a4a5d2f8 2520
034e43c6
VS
2521 WARN(encoder->connectors_active,
2522 "stealing pipe %c power sequencer from active eDP port %c\n",
2523 pipe_name(pipe), port_name(port));
a4a5d2f8 2524
a4a5d2f8 2525 /* make sure vdd is off before we steal it */
83b84597 2526 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2527 }
2528}
2529
2530static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2531{
2532 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2533 struct intel_encoder *encoder = &intel_dig_port->base;
2534 struct drm_device *dev = encoder->base.dev;
2535 struct drm_i915_private *dev_priv = dev->dev_private;
2536 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2537
2538 lockdep_assert_held(&dev_priv->pps_mutex);
2539
093e3f13
VS
2540 if (!is_edp(intel_dp))
2541 return;
2542
a4a5d2f8
VS
2543 if (intel_dp->pps_pipe == crtc->pipe)
2544 return;
2545
2546 /*
2547 * If another power sequencer was being used on this
2548 * port previously make sure to turn off vdd there while
2549 * we still have control of it.
2550 */
2551 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2552 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2553
2554 /*
2555 * We may be stealing the power
2556 * sequencer from another port.
2557 */
2558 vlv_steal_power_sequencer(dev, crtc->pipe);
2559
2560 /* now it's all ours */
2561 intel_dp->pps_pipe = crtc->pipe;
2562
2563 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2564 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2565
2566 /* init power sequencer on this pipe and port */
36b5f425
VS
2567 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2568 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2569}
2570
ab1f90f9 2571static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2572{
2bd2ad64 2573 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2574 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2575 struct drm_device *dev = encoder->base.dev;
89b667f8 2576 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2577 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2578 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2579 int pipe = intel_crtc->pipe;
2580 u32 val;
a4fc5ed6 2581
ab1f90f9 2582 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2583
ab3c759a 2584 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2585 val = 0;
2586 if (pipe)
2587 val |= (1<<21);
2588 else
2589 val &= ~(1<<21);
2590 val |= 0x001000c4;
ab3c759a
CML
2591 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2592 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2593 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2594
ab1f90f9
JN
2595 mutex_unlock(&dev_priv->dpio_lock);
2596
2597 intel_enable_dp(encoder);
89b667f8
JB
2598}
2599
ecff4f3b 2600static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2601{
2602 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2603 struct drm_device *dev = encoder->base.dev;
2604 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2605 struct intel_crtc *intel_crtc =
2606 to_intel_crtc(encoder->base.crtc);
e4607fcf 2607 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2608 int pipe = intel_crtc->pipe;
89b667f8 2609
8ac33ed3
DV
2610 intel_dp_prepare(encoder);
2611
89b667f8 2612 /* Program Tx lane resets to default */
0980a60f 2613 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2614 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2615 DPIO_PCS_TX_LANE2_RESET |
2616 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2617 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2618 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2619 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2620 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2621 DPIO_PCS_CLK_SOFT_RESET);
2622
2623 /* Fix up inter-pair skew failure */
ab3c759a
CML
2624 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2625 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2626 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2627 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2628}
2629
e4a1d846
CML
2630static void chv_pre_enable_dp(struct intel_encoder *encoder)
2631{
2632 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2633 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2634 struct drm_device *dev = encoder->base.dev;
2635 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2636 struct intel_crtc *intel_crtc =
2637 to_intel_crtc(encoder->base.crtc);
2638 enum dpio_channel ch = vlv_dport_to_channel(dport);
2639 int pipe = intel_crtc->pipe;
2640 int data, i;
949c1d43 2641 u32 val;
e4a1d846 2642
e4a1d846 2643 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2644
570e2a74
VS
2645 /* allow hardware to manage TX FIFO reset source */
2646 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2647 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2648 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2649
2650 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2651 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2652 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2653
949c1d43 2654 /* Deassert soft data lane reset*/
97fd4d5c 2655 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2656 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2657 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2658
2659 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2660 val |= CHV_PCS_REQ_SOFTRESET_EN;
2661 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2662
2663 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2664 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2665 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2666
97fd4d5c 2667 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2668 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2669 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2670
2671 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2672 for (i = 0; i < 4; i++) {
2673 /* Set the latency optimal bit */
2674 data = (i == 1) ? 0x0 : 0x6;
2675 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2676 data << DPIO_FRC_LATENCY_SHFIT);
2677
2678 /* Set the upar bit */
2679 data = (i == 1) ? 0x0 : 0x1;
2680 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2681 data << DPIO_UPAR_SHIFT);
2682 }
2683
2684 /* Data lane stagger programming */
2685 /* FIXME: Fix up value only after power analysis */
2686
2687 mutex_unlock(&dev_priv->dpio_lock);
2688
e4a1d846 2689 intel_enable_dp(encoder);
e4a1d846
CML
2690}
2691
9197c88b
VS
2692static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2693{
2694 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2695 struct drm_device *dev = encoder->base.dev;
2696 struct drm_i915_private *dev_priv = dev->dev_private;
2697 struct intel_crtc *intel_crtc =
2698 to_intel_crtc(encoder->base.crtc);
2699 enum dpio_channel ch = vlv_dport_to_channel(dport);
2700 enum pipe pipe = intel_crtc->pipe;
2701 u32 val;
2702
625695f8
VS
2703 intel_dp_prepare(encoder);
2704
9197c88b
VS
2705 mutex_lock(&dev_priv->dpio_lock);
2706
b9e5ac3c
VS
2707 /* program left/right clock distribution */
2708 if (pipe != PIPE_B) {
2709 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2710 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2711 if (ch == DPIO_CH0)
2712 val |= CHV_BUFLEFTENA1_FORCE;
2713 if (ch == DPIO_CH1)
2714 val |= CHV_BUFRIGHTENA1_FORCE;
2715 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2716 } else {
2717 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2718 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2719 if (ch == DPIO_CH0)
2720 val |= CHV_BUFLEFTENA2_FORCE;
2721 if (ch == DPIO_CH1)
2722 val |= CHV_BUFRIGHTENA2_FORCE;
2723 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2724 }
2725
9197c88b
VS
2726 /* program clock channel usage */
2727 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2728 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2729 if (pipe != PIPE_B)
2730 val &= ~CHV_PCS_USEDCLKCHANNEL;
2731 else
2732 val |= CHV_PCS_USEDCLKCHANNEL;
2733 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2734
2735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2736 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2737 if (pipe != PIPE_B)
2738 val &= ~CHV_PCS_USEDCLKCHANNEL;
2739 else
2740 val |= CHV_PCS_USEDCLKCHANNEL;
2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2742
2743 /*
2744 * This a a bit weird since generally CL
2745 * matches the pipe, but here we need to
2746 * pick the CL based on the port.
2747 */
2748 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2749 if (pipe != PIPE_B)
2750 val &= ~CHV_CMN_USEDCLKCHANNEL;
2751 else
2752 val |= CHV_CMN_USEDCLKCHANNEL;
2753 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2754
2755 mutex_unlock(&dev_priv->dpio_lock);
2756}
2757
a4fc5ed6 2758/*
df0c237d
JB
2759 * Native read with retry for link status and receiver capability reads for
2760 * cases where the sink may still be asleep.
9d1a1031
JN
2761 *
2762 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2763 * supposed to retry 3 times per the spec.
a4fc5ed6 2764 */
9d1a1031
JN
2765static ssize_t
2766intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2767 void *buffer, size_t size)
a4fc5ed6 2768{
9d1a1031
JN
2769 ssize_t ret;
2770 int i;
61da5fab 2771
f6a19066
VS
2772 /*
2773 * Sometime we just get the same incorrect byte repeated
2774 * over the entire buffer. Doing just one throw away read
2775 * initially seems to "solve" it.
2776 */
2777 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2778
61da5fab 2779 for (i = 0; i < 3; i++) {
9d1a1031
JN
2780 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2781 if (ret == size)
2782 return ret;
61da5fab
JB
2783 msleep(1);
2784 }
a4fc5ed6 2785
9d1a1031 2786 return ret;
a4fc5ed6
KP
2787}
2788
2789/*
2790 * Fetch AUX CH registers 0x202 - 0x207 which contain
2791 * link status information
2792 */
2793static bool
93f62dad 2794intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2795{
9d1a1031
JN
2796 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2797 DP_LANE0_1_STATUS,
2798 link_status,
2799 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2800}
2801
1100244e 2802/* These are source-specific values. */
a4fc5ed6 2803static uint8_t
1a2eb460 2804intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2805{
30add22d 2806 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2807 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2808 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2809
7ad14a29
SJ
2810 if (INTEL_INFO(dev)->gen >= 9) {
2811 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2812 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2813 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2814 } else if (IS_VALLEYVIEW(dev))
bd60018a 2815 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2816 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2817 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2818 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2819 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2820 else
bd60018a 2821 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2822}
2823
2824static uint8_t
2825intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2826{
30add22d 2827 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2828 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2829
5a9d1f1a
DL
2830 if (INTEL_INFO(dev)->gen >= 9) {
2831 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2832 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2833 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2834 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2835 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2836 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2837 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2838 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2839 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2840 default:
2841 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2842 }
2843 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2844 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2845 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2846 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2847 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2848 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2849 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2850 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2851 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2852 default:
bd60018a 2853 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2854 }
e2fa6fba
P
2855 } else if (IS_VALLEYVIEW(dev)) {
2856 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2858 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2859 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2860 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2862 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2864 default:
bd60018a 2865 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2866 }
bc7d38a4 2867 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2868 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2869 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2870 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2872 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2873 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2874 default:
bd60018a 2875 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2876 }
2877 } else {
2878 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2879 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2880 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2881 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2882 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2884 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2885 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2886 default:
bd60018a 2887 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2888 }
a4fc5ed6
KP
2889 }
2890}
2891
e2fa6fba
P
2892static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2893{
2894 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2895 struct drm_i915_private *dev_priv = dev->dev_private;
2896 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2897 struct intel_crtc *intel_crtc =
2898 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2899 unsigned long demph_reg_value, preemph_reg_value,
2900 uniqtranscale_reg_value;
2901 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2902 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2903 int pipe = intel_crtc->pipe;
e2fa6fba
P
2904
2905 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2906 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2907 preemph_reg_value = 0x0004000;
2908 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2909 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2910 demph_reg_value = 0x2B405555;
2911 uniqtranscale_reg_value = 0x552AB83A;
2912 break;
bd60018a 2913 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2914 demph_reg_value = 0x2B404040;
2915 uniqtranscale_reg_value = 0x5548B83A;
2916 break;
bd60018a 2917 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2918 demph_reg_value = 0x2B245555;
2919 uniqtranscale_reg_value = 0x5560B83A;
2920 break;
bd60018a 2921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2922 demph_reg_value = 0x2B405555;
2923 uniqtranscale_reg_value = 0x5598DA3A;
2924 break;
2925 default:
2926 return 0;
2927 }
2928 break;
bd60018a 2929 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2930 preemph_reg_value = 0x0002000;
2931 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2932 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2933 demph_reg_value = 0x2B404040;
2934 uniqtranscale_reg_value = 0x5552B83A;
2935 break;
bd60018a 2936 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2937 demph_reg_value = 0x2B404848;
2938 uniqtranscale_reg_value = 0x5580B83A;
2939 break;
bd60018a 2940 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2941 demph_reg_value = 0x2B404040;
2942 uniqtranscale_reg_value = 0x55ADDA3A;
2943 break;
2944 default:
2945 return 0;
2946 }
2947 break;
bd60018a 2948 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
2949 preemph_reg_value = 0x0000000;
2950 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2952 demph_reg_value = 0x2B305555;
2953 uniqtranscale_reg_value = 0x5570B83A;
2954 break;
bd60018a 2955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2956 demph_reg_value = 0x2B2B4040;
2957 uniqtranscale_reg_value = 0x55ADDA3A;
2958 break;
2959 default:
2960 return 0;
2961 }
2962 break;
bd60018a 2963 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
2964 preemph_reg_value = 0x0006000;
2965 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2966 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2967 demph_reg_value = 0x1B405555;
2968 uniqtranscale_reg_value = 0x55ADDA3A;
2969 break;
2970 default:
2971 return 0;
2972 }
2973 break;
2974 default:
2975 return 0;
2976 }
2977
0980a60f 2978 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
2979 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2980 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2981 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 2982 uniqtranscale_reg_value);
ab3c759a
CML
2983 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2984 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2985 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2986 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 2987 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
2988
2989 return 0;
2990}
2991
e4a1d846
CML
2992static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2993{
2994 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2995 struct drm_i915_private *dev_priv = dev->dev_private;
2996 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2997 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 2998 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
2999 uint8_t train_set = intel_dp->train_set[0];
3000 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3001 enum pipe pipe = intel_crtc->pipe;
3002 int i;
e4a1d846
CML
3003
3004 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3005 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3006 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3008 deemph_reg_value = 128;
3009 margin_reg_value = 52;
3010 break;
bd60018a 3011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3012 deemph_reg_value = 128;
3013 margin_reg_value = 77;
3014 break;
bd60018a 3015 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3016 deemph_reg_value = 128;
3017 margin_reg_value = 102;
3018 break;
bd60018a 3019 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3020 deemph_reg_value = 128;
3021 margin_reg_value = 154;
3022 /* FIXME extra to set for 1200 */
3023 break;
3024 default:
3025 return 0;
3026 }
3027 break;
bd60018a 3028 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3029 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3030 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3031 deemph_reg_value = 85;
3032 margin_reg_value = 78;
3033 break;
bd60018a 3034 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3035 deemph_reg_value = 85;
3036 margin_reg_value = 116;
3037 break;
bd60018a 3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3039 deemph_reg_value = 85;
3040 margin_reg_value = 154;
3041 break;
3042 default:
3043 return 0;
3044 }
3045 break;
bd60018a 3046 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3047 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3049 deemph_reg_value = 64;
3050 margin_reg_value = 104;
3051 break;
bd60018a 3052 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3053 deemph_reg_value = 64;
3054 margin_reg_value = 154;
3055 break;
3056 default:
3057 return 0;
3058 }
3059 break;
bd60018a 3060 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3061 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3062 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3063 deemph_reg_value = 43;
3064 margin_reg_value = 154;
3065 break;
3066 default:
3067 return 0;
3068 }
3069 break;
3070 default:
3071 return 0;
3072 }
3073
3074 mutex_lock(&dev_priv->dpio_lock);
3075
3076 /* Clear calc init */
1966e59e
VS
3077 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3078 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3079 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3080 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3081 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3082
3083 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3084 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3085 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3086 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3087 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3088
a02ef3c7
VS
3089 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3090 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3091 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3092 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3093
3094 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3095 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3096 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3097 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3098
e4a1d846 3099 /* Program swing deemph */
f72df8db
VS
3100 for (i = 0; i < 4; i++) {
3101 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3102 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3103 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3104 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3105 }
e4a1d846
CML
3106
3107 /* Program swing margin */
f72df8db
VS
3108 for (i = 0; i < 4; i++) {
3109 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3110 val &= ~DPIO_SWING_MARGIN000_MASK;
3111 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3112 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3113 }
e4a1d846
CML
3114
3115 /* Disable unique transition scale */
f72df8db
VS
3116 for (i = 0; i < 4; i++) {
3117 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3118 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3119 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3120 }
e4a1d846
CML
3121
3122 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3123 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3124 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3125 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3126
3127 /*
3128 * The document said it needs to set bit 27 for ch0 and bit 26
3129 * for ch1. Might be a typo in the doc.
3130 * For now, for this unique transition scale selection, set bit
3131 * 27 for ch0 and ch1.
3132 */
f72df8db
VS
3133 for (i = 0; i < 4; i++) {
3134 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3135 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3136 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3137 }
e4a1d846 3138
f72df8db
VS
3139 for (i = 0; i < 4; i++) {
3140 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3141 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3142 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3143 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3144 }
e4a1d846
CML
3145 }
3146
3147 /* Start swing calculation */
1966e59e
VS
3148 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3149 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3150 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3151
3152 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3153 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3154 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3155
3156 /* LRC Bypass */
3157 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3158 val |= DPIO_LRC_BYPASS;
3159 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3160
3161 mutex_unlock(&dev_priv->dpio_lock);
3162
3163 return 0;
3164}
3165
a4fc5ed6 3166static void
0301b3ac
JN
3167intel_get_adjust_train(struct intel_dp *intel_dp,
3168 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3169{
3170 uint8_t v = 0;
3171 uint8_t p = 0;
3172 int lane;
1a2eb460
KP
3173 uint8_t voltage_max;
3174 uint8_t preemph_max;
a4fc5ed6 3175
33a34e4e 3176 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3177 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3178 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3179
3180 if (this_v > v)
3181 v = this_v;
3182 if (this_p > p)
3183 p = this_p;
3184 }
3185
1a2eb460 3186 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3187 if (v >= voltage_max)
3188 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3189
1a2eb460
KP
3190 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3191 if (p >= preemph_max)
3192 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3193
3194 for (lane = 0; lane < 4; lane++)
33a34e4e 3195 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3196}
3197
3198static uint32_t
f0a3424e 3199intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3200{
3cf2efb1 3201 uint32_t signal_levels = 0;
a4fc5ed6 3202
3cf2efb1 3203 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3204 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3205 default:
3206 signal_levels |= DP_VOLTAGE_0_4;
3207 break;
bd60018a 3208 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3209 signal_levels |= DP_VOLTAGE_0_6;
3210 break;
bd60018a 3211 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3212 signal_levels |= DP_VOLTAGE_0_8;
3213 break;
bd60018a 3214 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3215 signal_levels |= DP_VOLTAGE_1_2;
3216 break;
3217 }
3cf2efb1 3218 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3219 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3220 default:
3221 signal_levels |= DP_PRE_EMPHASIS_0;
3222 break;
bd60018a 3223 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3224 signal_levels |= DP_PRE_EMPHASIS_3_5;
3225 break;
bd60018a 3226 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3227 signal_levels |= DP_PRE_EMPHASIS_6;
3228 break;
bd60018a 3229 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3230 signal_levels |= DP_PRE_EMPHASIS_9_5;
3231 break;
3232 }
3233 return signal_levels;
3234}
3235
e3421a18
ZW
3236/* Gen6's DP voltage swing and pre-emphasis control */
3237static uint32_t
3238intel_gen6_edp_signal_levels(uint8_t train_set)
3239{
3c5a62b5
YL
3240 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3241 DP_TRAIN_PRE_EMPHASIS_MASK);
3242 switch (signal_levels) {
bd60018a
SJ
3243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3245 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3247 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3248 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3250 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3251 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3253 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3256 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3257 default:
3c5a62b5
YL
3258 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3259 "0x%x\n", signal_levels);
3260 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3261 }
3262}
3263
1a2eb460
KP
3264/* Gen7's DP voltage swing and pre-emphasis control */
3265static uint32_t
3266intel_gen7_edp_signal_levels(uint8_t train_set)
3267{
3268 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3269 DP_TRAIN_PRE_EMPHASIS_MASK);
3270 switch (signal_levels) {
bd60018a 3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3272 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3274 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3276 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3277
bd60018a 3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3279 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3281 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3282
bd60018a 3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3284 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3285 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3286 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3287
3288 default:
3289 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3290 "0x%x\n", signal_levels);
3291 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3292 }
3293}
3294
d6c0d722
PZ
3295/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3296static uint32_t
f0a3424e 3297intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3298{
d6c0d722
PZ
3299 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3300 DP_TRAIN_PRE_EMPHASIS_MASK);
3301 switch (signal_levels) {
bd60018a 3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3303 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3305 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3307 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3308 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3309 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3310
bd60018a 3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3312 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3314 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3316 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3317
bd60018a 3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3319 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3321 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3322
3323 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3324 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3325 default:
3326 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3327 "0x%x\n", signal_levels);
c5fe6a06 3328 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3329 }
a4fc5ed6
KP
3330}
3331
f0a3424e
PZ
3332/* Properly updates "DP" with the correct signal levels. */
3333static void
3334intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3335{
3336 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3337 enum port port = intel_dig_port->port;
f0a3424e
PZ
3338 struct drm_device *dev = intel_dig_port->base.base.dev;
3339 uint32_t signal_levels, mask;
3340 uint8_t train_set = intel_dp->train_set[0];
3341
5a9d1f1a 3342 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3343 signal_levels = intel_hsw_signal_levels(train_set);
3344 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3345 } else if (IS_CHERRYVIEW(dev)) {
3346 signal_levels = intel_chv_signal_levels(intel_dp);
3347 mask = 0;
e2fa6fba
P
3348 } else if (IS_VALLEYVIEW(dev)) {
3349 signal_levels = intel_vlv_signal_levels(intel_dp);
3350 mask = 0;
bc7d38a4 3351 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3352 signal_levels = intel_gen7_edp_signal_levels(train_set);
3353 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3354 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3355 signal_levels = intel_gen6_edp_signal_levels(train_set);
3356 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3357 } else {
3358 signal_levels = intel_gen4_signal_levels(train_set);
3359 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3360 }
3361
3362 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3363
3364 *DP = (*DP & ~mask) | signal_levels;
3365}
3366
a4fc5ed6 3367static bool
ea5b213a 3368intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3369 uint32_t *DP,
58e10eb9 3370 uint8_t dp_train_pat)
a4fc5ed6 3371{
174edf1f
PZ
3372 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3373 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3374 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3375 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3376 int ret, len;
a4fc5ed6 3377
7b13b58a 3378 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3379
70aff66c 3380 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3381 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3382
2cdfe6c8
JN
3383 buf[0] = dp_train_pat;
3384 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3385 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3386 /* don't write DP_TRAINING_LANEx_SET on disable */
3387 len = 1;
3388 } else {
3389 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3390 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3391 len = intel_dp->lane_count + 1;
47ea7542 3392 }
a4fc5ed6 3393
9d1a1031
JN
3394 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3395 buf, len);
2cdfe6c8
JN
3396
3397 return ret == len;
a4fc5ed6
KP
3398}
3399
70aff66c
JN
3400static bool
3401intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3402 uint8_t dp_train_pat)
3403{
953d22e8 3404 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3405 intel_dp_set_signal_levels(intel_dp, DP);
3406 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3407}
3408
3409static bool
3410intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3411 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3412{
3413 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3414 struct drm_device *dev = intel_dig_port->base.base.dev;
3415 struct drm_i915_private *dev_priv = dev->dev_private;
3416 int ret;
3417
3418 intel_get_adjust_train(intel_dp, link_status);
3419 intel_dp_set_signal_levels(intel_dp, DP);
3420
3421 I915_WRITE(intel_dp->output_reg, *DP);
3422 POSTING_READ(intel_dp->output_reg);
3423
9d1a1031
JN
3424 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3425 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3426
3427 return ret == intel_dp->lane_count;
3428}
3429
3ab9c637
ID
3430static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3431{
3432 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3433 struct drm_device *dev = intel_dig_port->base.base.dev;
3434 struct drm_i915_private *dev_priv = dev->dev_private;
3435 enum port port = intel_dig_port->port;
3436 uint32_t val;
3437
3438 if (!HAS_DDI(dev))
3439 return;
3440
3441 val = I915_READ(DP_TP_CTL(port));
3442 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3443 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3444 I915_WRITE(DP_TP_CTL(port), val);
3445
3446 /*
3447 * On PORT_A we can have only eDP in SST mode. There the only reason
3448 * we need to set idle transmission mode is to work around a HW issue
3449 * where we enable the pipe while not in idle link-training mode.
3450 * In this case there is requirement to wait for a minimum number of
3451 * idle patterns to be sent.
3452 */
3453 if (port == PORT_A)
3454 return;
3455
3456 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3457 1))
3458 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3459}
3460
33a34e4e 3461/* Enable corresponding port and start training pattern 1 */
c19b0669 3462void
33a34e4e 3463intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3464{
da63a9f2 3465 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3466 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3467 int i;
3468 uint8_t voltage;
cdb0e95b 3469 int voltage_tries, loop_tries;
ea5b213a 3470 uint32_t DP = intel_dp->DP;
6aba5b6c 3471 uint8_t link_config[2];
a4fc5ed6 3472
affa9354 3473 if (HAS_DDI(dev))
c19b0669
PZ
3474 intel_ddi_prepare_link_retrain(encoder);
3475
3cf2efb1 3476 /* Write the link configuration data */
6aba5b6c
JN
3477 link_config[0] = intel_dp->link_bw;
3478 link_config[1] = intel_dp->lane_count;
3479 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3480 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3481 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
a8f3ef61
SJ
3482 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
3483 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3484 &intel_dp->rate_select, 1);
6aba5b6c
JN
3485
3486 link_config[0] = 0;
3487 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3488 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3489
3490 DP |= DP_PORT_EN;
1a2eb460 3491
70aff66c
JN
3492 /* clock recovery */
3493 if (!intel_dp_reset_link_train(intel_dp, &DP,
3494 DP_TRAINING_PATTERN_1 |
3495 DP_LINK_SCRAMBLING_DISABLE)) {
3496 DRM_ERROR("failed to enable link training\n");
3497 return;
3498 }
3499
a4fc5ed6 3500 voltage = 0xff;
cdb0e95b
KP
3501 voltage_tries = 0;
3502 loop_tries = 0;
a4fc5ed6 3503 for (;;) {
70aff66c 3504 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3505
a7c9655f 3506 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3507 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3508 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3509 break;
93f62dad 3510 }
a4fc5ed6 3511
01916270 3512 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3513 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3514 break;
3515 }
3516
3517 /* Check to see if we've tried the max voltage */
3518 for (i = 0; i < intel_dp->lane_count; i++)
3519 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3520 break;
3b4f819d 3521 if (i == intel_dp->lane_count) {
b06fbda3
DV
3522 ++loop_tries;
3523 if (loop_tries == 5) {
3def84b3 3524 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3525 break;
3526 }
70aff66c
JN
3527 intel_dp_reset_link_train(intel_dp, &DP,
3528 DP_TRAINING_PATTERN_1 |
3529 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3530 voltage_tries = 0;
3531 continue;
3532 }
a4fc5ed6 3533
3cf2efb1 3534 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3535 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3536 ++voltage_tries;
b06fbda3 3537 if (voltage_tries == 5) {
3def84b3 3538 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3539 break;
3540 }
3541 } else
3542 voltage_tries = 0;
3543 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3544
70aff66c
JN
3545 /* Update training set as requested by target */
3546 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3547 DRM_ERROR("failed to update link training\n");
3548 break;
3549 }
a4fc5ed6
KP
3550 }
3551
33a34e4e
JB
3552 intel_dp->DP = DP;
3553}
3554
c19b0669 3555void
33a34e4e
JB
3556intel_dp_complete_link_train(struct intel_dp *intel_dp)
3557{
33a34e4e 3558 bool channel_eq = false;
37f80975 3559 int tries, cr_tries;
33a34e4e 3560 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3561 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3562
3563 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3564 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3565 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3566
a4fc5ed6 3567 /* channel equalization */
70aff66c 3568 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3569 training_pattern |
70aff66c
JN
3570 DP_LINK_SCRAMBLING_DISABLE)) {
3571 DRM_ERROR("failed to start channel equalization\n");
3572 return;
3573 }
3574
a4fc5ed6 3575 tries = 0;
37f80975 3576 cr_tries = 0;
a4fc5ed6
KP
3577 channel_eq = false;
3578 for (;;) {
70aff66c 3579 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3580
37f80975
JB
3581 if (cr_tries > 5) {
3582 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3583 break;
3584 }
3585
a7c9655f 3586 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3587 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3588 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3589 break;
70aff66c 3590 }
a4fc5ed6 3591
37f80975 3592 /* Make sure clock is still ok */
01916270 3593 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3594 intel_dp_start_link_train(intel_dp);
70aff66c 3595 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3596 training_pattern |
70aff66c 3597 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3598 cr_tries++;
3599 continue;
3600 }
3601
1ffdff13 3602 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3603 channel_eq = true;
3604 break;
3605 }
a4fc5ed6 3606
37f80975
JB
3607 /* Try 5 times, then try clock recovery if that fails */
3608 if (tries > 5) {
37f80975 3609 intel_dp_start_link_train(intel_dp);
70aff66c 3610 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3611 training_pattern |
70aff66c 3612 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3613 tries = 0;
3614 cr_tries++;
3615 continue;
3616 }
a4fc5ed6 3617
70aff66c
JN
3618 /* Update training set as requested by target */
3619 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3620 DRM_ERROR("failed to update link training\n");
3621 break;
3622 }
3cf2efb1 3623 ++tries;
869184a6 3624 }
3cf2efb1 3625
3ab9c637
ID
3626 intel_dp_set_idle_link_train(intel_dp);
3627
3628 intel_dp->DP = DP;
3629
d6c0d722 3630 if (channel_eq)
07f42258 3631 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3632
3ab9c637
ID
3633}
3634
3635void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3636{
70aff66c 3637 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3638 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3639}
3640
3641static void
ea5b213a 3642intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3643{
da63a9f2 3644 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3645 enum port port = intel_dig_port->port;
da63a9f2 3646 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3647 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3648 uint32_t DP = intel_dp->DP;
a4fc5ed6 3649
bc76e320 3650 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3651 return;
3652
0c33d8d7 3653 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3654 return;
3655
28c97730 3656 DRM_DEBUG_KMS("\n");
32f9d658 3657
bc7d38a4 3658 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3659 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3660 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3661 } else {
aad3d14d
VS
3662 if (IS_CHERRYVIEW(dev))
3663 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3664 else
3665 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3666 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3667 }
fe255d00 3668 POSTING_READ(intel_dp->output_reg);
5eb08b69 3669
493a7081 3670 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3671 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3672 /* Hardware workaround: leaving our transcoder select
3673 * set to transcoder B while it's off will prevent the
3674 * corresponding HDMI output on transcoder A.
3675 *
3676 * Combine this with another hardware workaround:
3677 * transcoder select bit can only be cleared while the
3678 * port is enabled.
3679 */
3680 DP &= ~DP_PIPEB_SELECT;
3681 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3682 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3683 }
3684
832afda6 3685 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3686 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3687 POSTING_READ(intel_dp->output_reg);
f01eca2e 3688 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3689}
3690
26d61aad
KP
3691static bool
3692intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3693{
a031d709
RV
3694 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3695 struct drm_device *dev = dig_port->base.base.dev;
3696 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3697 uint8_t rev;
a031d709 3698
9d1a1031
JN
3699 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3700 sizeof(intel_dp->dpcd)) < 0)
edb39244 3701 return false; /* aux transfer failed */
92fd8fd1 3702
a8e98153 3703 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3704
edb39244
AJ
3705 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3706 return false; /* DPCD not present */
3707
2293bb5c
SK
3708 /* Check if the panel supports PSR */
3709 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3710 if (is_edp(intel_dp)) {
9d1a1031
JN
3711 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3712 intel_dp->psr_dpcd,
3713 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3714 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3715 dev_priv->psr.sink_support = true;
50003939 3716 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3717 }
50003939
JN
3718 }
3719
7809a611 3720 /* Training Pattern 3 support, both source and sink */
06ea66b6 3721 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3722 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3723 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3724 intel_dp->use_tps3 = true;
f8d8a672 3725 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3726 } else
3727 intel_dp->use_tps3 = false;
3728
fc0f8e25
SJ
3729 /* Intermediate frequency support */
3730 if (is_edp(intel_dp) &&
3731 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3732 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3733 (rev >= 0x03)) { /* eDp v1.4 or higher */
ea2d8a42
VS
3734 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3735 int i;
3736
fc0f8e25
SJ
3737 intel_dp_dpcd_read_wake(&intel_dp->aux,
3738 DP_SUPPORTED_LINK_RATES,
ea2d8a42
VS
3739 supported_rates,
3740 sizeof(supported_rates));
3741
3742 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3743 int val = le16_to_cpu(supported_rates[i]);
3744
3745 if (val == 0)
3746 break;
3747
3748 intel_dp->supported_rates[i] = val * 200;
3749 }
3750 intel_dp->num_supported_rates = i;
fc0f8e25 3751 }
edb39244
AJ
3752 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3753 DP_DWN_STRM_PORT_PRESENT))
3754 return true; /* native DP sink */
3755
3756 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3757 return true; /* no per-port downstream info */
3758
9d1a1031
JN
3759 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3760 intel_dp->downstream_ports,
3761 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3762 return false; /* downstream port status fetch failed */
3763
3764 return true;
92fd8fd1
KP
3765}
3766
0d198328
AJ
3767static void
3768intel_dp_probe_oui(struct intel_dp *intel_dp)
3769{
3770 u8 buf[3];
3771
3772 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3773 return;
3774
9d1a1031 3775 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3776 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3777 buf[0], buf[1], buf[2]);
3778
9d1a1031 3779 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3780 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3781 buf[0], buf[1], buf[2]);
3782}
3783
0e32b39c
DA
3784static bool
3785intel_dp_probe_mst(struct intel_dp *intel_dp)
3786{
3787 u8 buf[1];
3788
3789 if (!intel_dp->can_mst)
3790 return false;
3791
3792 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3793 return false;
3794
0e32b39c
DA
3795 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3796 if (buf[0] & DP_MST_CAP) {
3797 DRM_DEBUG_KMS("Sink is MST capable\n");
3798 intel_dp->is_mst = true;
3799 } else {
3800 DRM_DEBUG_KMS("Sink is not MST capable\n");
3801 intel_dp->is_mst = false;
3802 }
3803 }
0e32b39c
DA
3804
3805 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3806 return intel_dp->is_mst;
3807}
3808
d2e216d0
RV
3809int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3810{
3811 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3812 struct drm_device *dev = intel_dig_port->base.base.dev;
3813 struct intel_crtc *intel_crtc =
3814 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3815 u8 buf;
3816 int test_crc_count;
3817 int attempts = 6;
d2e216d0 3818
ad9dc91b 3819 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3820 return -EIO;
d2e216d0 3821
ad9dc91b 3822 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3823 return -ENOTTY;
3824
1dda5f93
RV
3825 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3826 return -EIO;
3827
9d1a1031 3828 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3829 buf | DP_TEST_SINK_START) < 0)
bda0381e 3830 return -EIO;
d2e216d0 3831
1dda5f93 3832 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3833 return -EIO;
ad9dc91b 3834 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3835
ad9dc91b 3836 do {
1dda5f93
RV
3837 if (drm_dp_dpcd_readb(&intel_dp->aux,
3838 DP_TEST_SINK_MISC, &buf) < 0)
3839 return -EIO;
ad9dc91b
RV
3840 intel_wait_for_vblank(dev, intel_crtc->pipe);
3841 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3842
3843 if (attempts == 0) {
90bd1f46
DV
3844 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3845 return -ETIMEDOUT;
ad9dc91b 3846 }
d2e216d0 3847
9d1a1031 3848 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3849 return -EIO;
d2e216d0 3850
1dda5f93
RV
3851 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3852 return -EIO;
3853 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3854 buf & ~DP_TEST_SINK_START) < 0)
3855 return -EIO;
ce31d9f4 3856
d2e216d0
RV
3857 return 0;
3858}
3859
a60f0e38
JB
3860static bool
3861intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3862{
9d1a1031
JN
3863 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3864 DP_DEVICE_SERVICE_IRQ_VECTOR,
3865 sink_irq_vector, 1) == 1;
a60f0e38
JB
3866}
3867
0e32b39c
DA
3868static bool
3869intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3870{
3871 int ret;
3872
3873 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3874 DP_SINK_COUNT_ESI,
3875 sink_irq_vector, 14);
3876 if (ret != 14)
3877 return false;
3878
3879 return true;
3880}
3881
a60f0e38
JB
3882static void
3883intel_dp_handle_test_request(struct intel_dp *intel_dp)
3884{
3885 /* NAK by default */
9d1a1031 3886 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3887}
3888
0e32b39c
DA
3889static int
3890intel_dp_check_mst_status(struct intel_dp *intel_dp)
3891{
3892 bool bret;
3893
3894 if (intel_dp->is_mst) {
3895 u8 esi[16] = { 0 };
3896 int ret = 0;
3897 int retry;
3898 bool handled;
3899 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3900go_again:
3901 if (bret == true) {
3902
3903 /* check link status - esi[10] = 0x200c */
3904 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3905 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3906 intel_dp_start_link_train(intel_dp);
3907 intel_dp_complete_link_train(intel_dp);
3908 intel_dp_stop_link_train(intel_dp);
3909 }
3910
6f34cc39 3911 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3912 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3913
3914 if (handled) {
3915 for (retry = 0; retry < 3; retry++) {
3916 int wret;
3917 wret = drm_dp_dpcd_write(&intel_dp->aux,
3918 DP_SINK_COUNT_ESI+1,
3919 &esi[1], 3);
3920 if (wret == 3) {
3921 break;
3922 }
3923 }
3924
3925 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3926 if (bret == true) {
6f34cc39 3927 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3928 goto go_again;
3929 }
3930 } else
3931 ret = 0;
3932
3933 return ret;
3934 } else {
3935 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3936 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3937 intel_dp->is_mst = false;
3938 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3939 /* send a hotplug event */
3940 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3941 }
3942 }
3943 return -EINVAL;
3944}
3945
a4fc5ed6
KP
3946/*
3947 * According to DP spec
3948 * 5.1.2:
3949 * 1. Read DPCD
3950 * 2. Configure link according to Receiver Capabilities
3951 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3952 * 4. Check link status on receipt of hot-plug interrupt
3953 */
a5146200 3954static void
ea5b213a 3955intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 3956{
5b215bcf 3957 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 3958 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 3959 u8 sink_irq_vector;
93f62dad 3960 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 3961
5b215bcf
DA
3962 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3963
da63a9f2 3964 if (!intel_encoder->connectors_active)
d2b996ac 3965 return;
59cd09e1 3966
da63a9f2 3967 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
3968 return;
3969
1a125d8a
ID
3970 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3971 return;
3972
92fd8fd1 3973 /* Try to read receiver status if the link appears to be up */
93f62dad 3974 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
3975 return;
3976 }
3977
92fd8fd1 3978 /* Now read the DPCD to see if it's actually running */
26d61aad 3979 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
3980 return;
3981 }
3982
a60f0e38
JB
3983 /* Try to read the source of the interrupt */
3984 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3985 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3986 /* Clear interrupt source */
9d1a1031
JN
3987 drm_dp_dpcd_writeb(&intel_dp->aux,
3988 DP_DEVICE_SERVICE_IRQ_VECTOR,
3989 sink_irq_vector);
a60f0e38
JB
3990
3991 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3992 intel_dp_handle_test_request(intel_dp);
3993 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3994 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3995 }
3996
1ffdff13 3997 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 3998 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 3999 intel_encoder->base.name);
33a34e4e
JB
4000 intel_dp_start_link_train(intel_dp);
4001 intel_dp_complete_link_train(intel_dp);
3ab9c637 4002 intel_dp_stop_link_train(intel_dp);
33a34e4e 4003 }
a4fc5ed6 4004}
a4fc5ed6 4005
caf9ab24 4006/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4007static enum drm_connector_status
26d61aad 4008intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4009{
caf9ab24 4010 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4011 uint8_t type;
4012
4013 if (!intel_dp_get_dpcd(intel_dp))
4014 return connector_status_disconnected;
4015
4016 /* if there's no downstream port, we're done */
4017 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4018 return connector_status_connected;
caf9ab24
AJ
4019
4020 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4021 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4022 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4023 uint8_t reg;
9d1a1031
JN
4024
4025 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4026 &reg, 1) < 0)
caf9ab24 4027 return connector_status_unknown;
9d1a1031 4028
23235177
AJ
4029 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4030 : connector_status_disconnected;
caf9ab24
AJ
4031 }
4032
4033 /* If no HPD, poke DDC gently */
0b99836f 4034 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4035 return connector_status_connected;
caf9ab24
AJ
4036
4037 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4038 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4039 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4040 if (type == DP_DS_PORT_TYPE_VGA ||
4041 type == DP_DS_PORT_TYPE_NON_EDID)
4042 return connector_status_unknown;
4043 } else {
4044 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4045 DP_DWN_STRM_PORT_TYPE_MASK;
4046 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4047 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4048 return connector_status_unknown;
4049 }
caf9ab24
AJ
4050
4051 /* Anything else is out of spec, warn and ignore */
4052 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4053 return connector_status_disconnected;
71ba9000
AJ
4054}
4055
d410b56d
CW
4056static enum drm_connector_status
4057edp_detect(struct intel_dp *intel_dp)
4058{
4059 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4060 enum drm_connector_status status;
4061
4062 status = intel_panel_detect(dev);
4063 if (status == connector_status_unknown)
4064 status = connector_status_connected;
4065
4066 return status;
4067}
4068
5eb08b69 4069static enum drm_connector_status
a9756bb5 4070ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4071{
30add22d 4072 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4073 struct drm_i915_private *dev_priv = dev->dev_private;
4074 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4075
1b469639
DL
4076 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4077 return connector_status_disconnected;
4078
26d61aad 4079 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4080}
4081
2a592bec
DA
4082static int g4x_digital_port_connected(struct drm_device *dev,
4083 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4084{
a4fc5ed6 4085 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4086 uint32_t bit;
5eb08b69 4087
232a6ee9
TP
4088 if (IS_VALLEYVIEW(dev)) {
4089 switch (intel_dig_port->port) {
4090 case PORT_B:
4091 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4092 break;
4093 case PORT_C:
4094 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4095 break;
4096 case PORT_D:
4097 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4098 break;
4099 default:
2a592bec 4100 return -EINVAL;
232a6ee9
TP
4101 }
4102 } else {
4103 switch (intel_dig_port->port) {
4104 case PORT_B:
4105 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4106 break;
4107 case PORT_C:
4108 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4109 break;
4110 case PORT_D:
4111 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4112 break;
4113 default:
2a592bec 4114 return -EINVAL;
232a6ee9 4115 }
a4fc5ed6
KP
4116 }
4117
10f76a38 4118 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4119 return 0;
4120 return 1;
4121}
4122
4123static enum drm_connector_status
4124g4x_dp_detect(struct intel_dp *intel_dp)
4125{
4126 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4127 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4128 int ret;
4129
4130 /* Can't disconnect eDP, but you can close the lid... */
4131 if (is_edp(intel_dp)) {
4132 enum drm_connector_status status;
4133
4134 status = intel_panel_detect(dev);
4135 if (status == connector_status_unknown)
4136 status = connector_status_connected;
4137 return status;
4138 }
4139
4140 ret = g4x_digital_port_connected(dev, intel_dig_port);
4141 if (ret == -EINVAL)
4142 return connector_status_unknown;
4143 else if (ret == 0)
a4fc5ed6
KP
4144 return connector_status_disconnected;
4145
26d61aad 4146 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4147}
4148
8c241fef 4149static struct edid *
beb60608 4150intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4151{
beb60608 4152 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4153
9cd300e0
JN
4154 /* use cached edid if we have one */
4155 if (intel_connector->edid) {
9cd300e0
JN
4156 /* invalid edid */
4157 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4158 return NULL;
4159
55e9edeb 4160 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4161 } else
4162 return drm_get_edid(&intel_connector->base,
4163 &intel_dp->aux.ddc);
4164}
8c241fef 4165
beb60608
CW
4166static void
4167intel_dp_set_edid(struct intel_dp *intel_dp)
4168{
4169 struct intel_connector *intel_connector = intel_dp->attached_connector;
4170 struct edid *edid;
8c241fef 4171
beb60608
CW
4172 edid = intel_dp_get_edid(intel_dp);
4173 intel_connector->detect_edid = edid;
4174
4175 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4176 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4177 else
4178 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4179}
4180
beb60608
CW
4181static void
4182intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4183{
beb60608 4184 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4185
beb60608
CW
4186 kfree(intel_connector->detect_edid);
4187 intel_connector->detect_edid = NULL;
9cd300e0 4188
beb60608
CW
4189 intel_dp->has_audio = false;
4190}
d6f24d0f 4191
beb60608
CW
4192static enum intel_display_power_domain
4193intel_dp_power_get(struct intel_dp *dp)
4194{
4195 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4196 enum intel_display_power_domain power_domain;
4197
4198 power_domain = intel_display_port_power_domain(encoder);
4199 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4200
4201 return power_domain;
4202}
d6f24d0f 4203
beb60608
CW
4204static void
4205intel_dp_power_put(struct intel_dp *dp,
4206 enum intel_display_power_domain power_domain)
4207{
4208 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4209 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4210}
4211
a9756bb5
ZW
4212static enum drm_connector_status
4213intel_dp_detect(struct drm_connector *connector, bool force)
4214{
4215 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4216 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4217 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4218 struct drm_device *dev = connector->dev;
a9756bb5 4219 enum drm_connector_status status;
671dedd2 4220 enum intel_display_power_domain power_domain;
0e32b39c 4221 bool ret;
a9756bb5 4222
164c8598 4223 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4224 connector->base.id, connector->name);
beb60608 4225 intel_dp_unset_edid(intel_dp);
164c8598 4226
0e32b39c
DA
4227 if (intel_dp->is_mst) {
4228 /* MST devices are disconnected from a monitor POV */
4229 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4230 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4231 return connector_status_disconnected;
0e32b39c
DA
4232 }
4233
beb60608 4234 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4235
d410b56d
CW
4236 /* Can't disconnect eDP, but you can close the lid... */
4237 if (is_edp(intel_dp))
4238 status = edp_detect(intel_dp);
4239 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4240 status = ironlake_dp_detect(intel_dp);
4241 else
4242 status = g4x_dp_detect(intel_dp);
4243 if (status != connector_status_connected)
c8c8fb33 4244 goto out;
a9756bb5 4245
0d198328
AJ
4246 intel_dp_probe_oui(intel_dp);
4247
0e32b39c
DA
4248 ret = intel_dp_probe_mst(intel_dp);
4249 if (ret) {
4250 /* if we are in MST mode then this connector
4251 won't appear connected or have anything with EDID on it */
4252 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4253 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4254 status = connector_status_disconnected;
4255 goto out;
4256 }
4257
beb60608 4258 intel_dp_set_edid(intel_dp);
a9756bb5 4259
d63885da
PZ
4260 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4261 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4262 status = connector_status_connected;
4263
4264out:
beb60608 4265 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4266 return status;
a4fc5ed6
KP
4267}
4268
beb60608
CW
4269static void
4270intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4271{
df0e9248 4272 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4273 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4274 enum intel_display_power_domain power_domain;
a4fc5ed6 4275
beb60608
CW
4276 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4277 connector->base.id, connector->name);
4278 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4279
beb60608
CW
4280 if (connector->status != connector_status_connected)
4281 return;
671dedd2 4282
beb60608
CW
4283 power_domain = intel_dp_power_get(intel_dp);
4284
4285 intel_dp_set_edid(intel_dp);
4286
4287 intel_dp_power_put(intel_dp, power_domain);
4288
4289 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4290 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4291}
4292
4293static int intel_dp_get_modes(struct drm_connector *connector)
4294{
4295 struct intel_connector *intel_connector = to_intel_connector(connector);
4296 struct edid *edid;
4297
4298 edid = intel_connector->detect_edid;
4299 if (edid) {
4300 int ret = intel_connector_update_modes(connector, edid);
4301 if (ret)
4302 return ret;
4303 }
32f9d658 4304
f8779fda 4305 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4306 if (is_edp(intel_attached_dp(connector)) &&
4307 intel_connector->panel.fixed_mode) {
f8779fda 4308 struct drm_display_mode *mode;
beb60608
CW
4309
4310 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4311 intel_connector->panel.fixed_mode);
f8779fda 4312 if (mode) {
32f9d658
ZW
4313 drm_mode_probed_add(connector, mode);
4314 return 1;
4315 }
4316 }
beb60608 4317
32f9d658 4318 return 0;
a4fc5ed6
KP
4319}
4320
1aad7ac0
CW
4321static bool
4322intel_dp_detect_audio(struct drm_connector *connector)
4323{
1aad7ac0 4324 bool has_audio = false;
beb60608 4325 struct edid *edid;
1aad7ac0 4326
beb60608
CW
4327 edid = to_intel_connector(connector)->detect_edid;
4328 if (edid)
1aad7ac0 4329 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4330
1aad7ac0
CW
4331 return has_audio;
4332}
4333
f684960e
CW
4334static int
4335intel_dp_set_property(struct drm_connector *connector,
4336 struct drm_property *property,
4337 uint64_t val)
4338{
e953fd7b 4339 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4340 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4341 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4342 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4343 int ret;
4344
662595df 4345 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4346 if (ret)
4347 return ret;
4348
3f43c48d 4349 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4350 int i = val;
4351 bool has_audio;
4352
4353 if (i == intel_dp->force_audio)
f684960e
CW
4354 return 0;
4355
1aad7ac0 4356 intel_dp->force_audio = i;
f684960e 4357
c3e5f67b 4358 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4359 has_audio = intel_dp_detect_audio(connector);
4360 else
c3e5f67b 4361 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4362
4363 if (has_audio == intel_dp->has_audio)
f684960e
CW
4364 return 0;
4365
1aad7ac0 4366 intel_dp->has_audio = has_audio;
f684960e
CW
4367 goto done;
4368 }
4369
e953fd7b 4370 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4371 bool old_auto = intel_dp->color_range_auto;
4372 uint32_t old_range = intel_dp->color_range;
4373
55bc60db
VS
4374 switch (val) {
4375 case INTEL_BROADCAST_RGB_AUTO:
4376 intel_dp->color_range_auto = true;
4377 break;
4378 case INTEL_BROADCAST_RGB_FULL:
4379 intel_dp->color_range_auto = false;
4380 intel_dp->color_range = 0;
4381 break;
4382 case INTEL_BROADCAST_RGB_LIMITED:
4383 intel_dp->color_range_auto = false;
4384 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4385 break;
4386 default:
4387 return -EINVAL;
4388 }
ae4edb80
DV
4389
4390 if (old_auto == intel_dp->color_range_auto &&
4391 old_range == intel_dp->color_range)
4392 return 0;
4393
e953fd7b
CW
4394 goto done;
4395 }
4396
53b41837
YN
4397 if (is_edp(intel_dp) &&
4398 property == connector->dev->mode_config.scaling_mode_property) {
4399 if (val == DRM_MODE_SCALE_NONE) {
4400 DRM_DEBUG_KMS("no scaling not supported\n");
4401 return -EINVAL;
4402 }
4403
4404 if (intel_connector->panel.fitting_mode == val) {
4405 /* the eDP scaling property is not changed */
4406 return 0;
4407 }
4408 intel_connector->panel.fitting_mode = val;
4409
4410 goto done;
4411 }
4412
f684960e
CW
4413 return -EINVAL;
4414
4415done:
c0c36b94
CW
4416 if (intel_encoder->base.crtc)
4417 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4418
4419 return 0;
4420}
4421
a4fc5ed6 4422static void
73845adf 4423intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4424{
1d508706 4425 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4426
10e972d3 4427 kfree(intel_connector->detect_edid);
beb60608 4428
9cd300e0
JN
4429 if (!IS_ERR_OR_NULL(intel_connector->edid))
4430 kfree(intel_connector->edid);
4431
acd8db10
PZ
4432 /* Can't call is_edp() since the encoder may have been destroyed
4433 * already. */
4434 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4435 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4436
a4fc5ed6 4437 drm_connector_cleanup(connector);
55f78c43 4438 kfree(connector);
a4fc5ed6
KP
4439}
4440
00c09d70 4441void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4442{
da63a9f2
PZ
4443 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4444 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4445
4f71d0cb 4446 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4447 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4448 if (is_edp(intel_dp)) {
4449 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4450 /*
4451 * vdd might still be enabled do to the delayed vdd off.
4452 * Make sure vdd is actually turned off here.
4453 */
773538e8 4454 pps_lock(intel_dp);
4be73780 4455 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4456 pps_unlock(intel_dp);
4457
01527b31
CT
4458 if (intel_dp->edp_notifier.notifier_call) {
4459 unregister_reboot_notifier(&intel_dp->edp_notifier);
4460 intel_dp->edp_notifier.notifier_call = NULL;
4461 }
bd943159 4462 }
c8bd0e49 4463 drm_encoder_cleanup(encoder);
da63a9f2 4464 kfree(intel_dig_port);
24d05927
DV
4465}
4466
07f9cd0b
ID
4467static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4468{
4469 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4470
4471 if (!is_edp(intel_dp))
4472 return;
4473
951468f3
VS
4474 /*
4475 * vdd might still be enabled do to the delayed vdd off.
4476 * Make sure vdd is actually turned off here.
4477 */
afa4e53a 4478 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4479 pps_lock(intel_dp);
07f9cd0b 4480 edp_panel_vdd_off_sync(intel_dp);
773538e8 4481 pps_unlock(intel_dp);
07f9cd0b
ID
4482}
4483
49e6bc51
VS
4484static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4485{
4486 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4487 struct drm_device *dev = intel_dig_port->base.base.dev;
4488 struct drm_i915_private *dev_priv = dev->dev_private;
4489 enum intel_display_power_domain power_domain;
4490
4491 lockdep_assert_held(&dev_priv->pps_mutex);
4492
4493 if (!edp_have_panel_vdd(intel_dp))
4494 return;
4495
4496 /*
4497 * The VDD bit needs a power domain reference, so if the bit is
4498 * already enabled when we boot or resume, grab this reference and
4499 * schedule a vdd off, so we don't hold on to the reference
4500 * indefinitely.
4501 */
4502 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4503 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4504 intel_display_power_get(dev_priv, power_domain);
4505
4506 edp_panel_vdd_schedule_off(intel_dp);
4507}
4508
6d93c0c4
ID
4509static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4510{
49e6bc51
VS
4511 struct intel_dp *intel_dp;
4512
4513 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4514 return;
4515
4516 intel_dp = enc_to_intel_dp(encoder);
4517
4518 pps_lock(intel_dp);
4519
4520 /*
4521 * Read out the current power sequencer assignment,
4522 * in case the BIOS did something with it.
4523 */
4524 if (IS_VALLEYVIEW(encoder->dev))
4525 vlv_initial_power_sequencer_setup(intel_dp);
4526
4527 intel_edp_panel_vdd_sanitize(intel_dp);
4528
4529 pps_unlock(intel_dp);
6d93c0c4
ID
4530}
4531
a4fc5ed6 4532static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4533 .dpms = intel_connector_dpms,
a4fc5ed6 4534 .detect = intel_dp_detect,
beb60608 4535 .force = intel_dp_force,
a4fc5ed6 4536 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4537 .set_property = intel_dp_set_property,
2545e4a6 4538 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4539 .destroy = intel_dp_connector_destroy,
c6f95f27 4540 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4541};
4542
4543static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4544 .get_modes = intel_dp_get_modes,
4545 .mode_valid = intel_dp_mode_valid,
df0e9248 4546 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4547};
4548
a4fc5ed6 4549static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4550 .reset = intel_dp_encoder_reset,
24d05927 4551 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4552};
4553
0e32b39c 4554void
21d40d37 4555intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4556{
0e32b39c 4557 return;
c8110e52 4558}
6207937d 4559
b2c5c181 4560enum irqreturn
13cf5504
DA
4561intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4562{
4563 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4564 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4565 struct drm_device *dev = intel_dig_port->base.base.dev;
4566 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4567 enum intel_display_power_domain power_domain;
b2c5c181 4568 enum irqreturn ret = IRQ_NONE;
1c767b33 4569
0e32b39c
DA
4570 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4571 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4572
7a7f84cc
VS
4573 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4574 /*
4575 * vdd off can generate a long pulse on eDP which
4576 * would require vdd on to handle it, and thus we
4577 * would end up in an endless cycle of
4578 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4579 */
4580 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4581 port_name(intel_dig_port->port));
a8b3d52f 4582 return IRQ_HANDLED;
7a7f84cc
VS
4583 }
4584
26fbb774
VS
4585 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4586 port_name(intel_dig_port->port),
0e32b39c 4587 long_hpd ? "long" : "short");
13cf5504 4588
1c767b33
ID
4589 power_domain = intel_display_port_power_domain(intel_encoder);
4590 intel_display_power_get(dev_priv, power_domain);
4591
0e32b39c 4592 if (long_hpd) {
2a592bec
DA
4593
4594 if (HAS_PCH_SPLIT(dev)) {
4595 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4596 goto mst_fail;
4597 } else {
4598 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4599 goto mst_fail;
4600 }
0e32b39c
DA
4601
4602 if (!intel_dp_get_dpcd(intel_dp)) {
4603 goto mst_fail;
4604 }
4605
4606 intel_dp_probe_oui(intel_dp);
4607
4608 if (!intel_dp_probe_mst(intel_dp))
4609 goto mst_fail;
4610
4611 } else {
4612 if (intel_dp->is_mst) {
1c767b33 4613 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4614 goto mst_fail;
4615 }
4616
4617 if (!intel_dp->is_mst) {
4618 /*
4619 * we'll check the link status via the normal hot plug path later -
4620 * but for short hpds we should check it now
4621 */
5b215bcf 4622 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4623 intel_dp_check_link_status(intel_dp);
5b215bcf 4624 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4625 }
4626 }
b2c5c181
DV
4627
4628 ret = IRQ_HANDLED;
4629
1c767b33 4630 goto put_power;
0e32b39c
DA
4631mst_fail:
4632 /* if we were in MST mode, and device is not there get out of MST mode */
4633 if (intel_dp->is_mst) {
4634 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4635 intel_dp->is_mst = false;
4636 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4637 }
1c767b33
ID
4638put_power:
4639 intel_display_power_put(dev_priv, power_domain);
4640
4641 return ret;
13cf5504
DA
4642}
4643
e3421a18
ZW
4644/* Return which DP Port should be selected for Transcoder DP control */
4645int
0206e353 4646intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4647{
4648 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4649 struct intel_encoder *intel_encoder;
4650 struct intel_dp *intel_dp;
e3421a18 4651
fa90ecef
PZ
4652 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4653 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4654
fa90ecef
PZ
4655 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4656 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4657 return intel_dp->output_reg;
e3421a18 4658 }
ea5b213a 4659
e3421a18
ZW
4660 return -1;
4661}
4662
36e83a18 4663/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4664bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4665{
4666 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4667 union child_device_config *p_child;
36e83a18 4668 int i;
5d8a7752
VS
4669 static const short port_mapping[] = {
4670 [PORT_B] = PORT_IDPB,
4671 [PORT_C] = PORT_IDPC,
4672 [PORT_D] = PORT_IDPD,
4673 };
36e83a18 4674
3b32a35b
VS
4675 if (port == PORT_A)
4676 return true;
4677
41aa3448 4678 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4679 return false;
4680
41aa3448
RV
4681 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4682 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4683
5d8a7752 4684 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4685 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4686 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4687 return true;
4688 }
4689 return false;
4690}
4691
0e32b39c 4692void
f684960e
CW
4693intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4694{
53b41837
YN
4695 struct intel_connector *intel_connector = to_intel_connector(connector);
4696
3f43c48d 4697 intel_attach_force_audio_property(connector);
e953fd7b 4698 intel_attach_broadcast_rgb_property(connector);
55bc60db 4699 intel_dp->color_range_auto = true;
53b41837
YN
4700
4701 if (is_edp(intel_dp)) {
4702 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4703 drm_object_attach_property(
4704 &connector->base,
53b41837 4705 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4706 DRM_MODE_SCALE_ASPECT);
4707 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4708 }
f684960e
CW
4709}
4710
dada1a9f
ID
4711static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4712{
4713 intel_dp->last_power_cycle = jiffies;
4714 intel_dp->last_power_on = jiffies;
4715 intel_dp->last_backlight_off = jiffies;
4716}
4717
67a54566
DV
4718static void
4719intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4720 struct intel_dp *intel_dp)
67a54566
DV
4721{
4722 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4723 struct edp_power_seq cur, vbt, spec,
4724 *final = &intel_dp->pps_delays;
67a54566 4725 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4726 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4727
e39b999a
VS
4728 lockdep_assert_held(&dev_priv->pps_mutex);
4729
81ddbc69
VS
4730 /* already initialized? */
4731 if (final->t11_t12 != 0)
4732 return;
4733
453c5420 4734 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4735 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4736 pp_on_reg = PCH_PP_ON_DELAYS;
4737 pp_off_reg = PCH_PP_OFF_DELAYS;
4738 pp_div_reg = PCH_PP_DIVISOR;
4739 } else {
bf13e81b
JN
4740 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4741
4742 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4743 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4744 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4745 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4746 }
67a54566
DV
4747
4748 /* Workaround: Need to write PP_CONTROL with the unlock key as
4749 * the very first thing. */
453c5420 4750 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4751 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4752
453c5420
JB
4753 pp_on = I915_READ(pp_on_reg);
4754 pp_off = I915_READ(pp_off_reg);
4755 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4756
4757 /* Pull timing values out of registers */
4758 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4759 PANEL_POWER_UP_DELAY_SHIFT;
4760
4761 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4762 PANEL_LIGHT_ON_DELAY_SHIFT;
4763
4764 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4765 PANEL_LIGHT_OFF_DELAY_SHIFT;
4766
4767 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4768 PANEL_POWER_DOWN_DELAY_SHIFT;
4769
4770 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4771 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4772
4773 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4774 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4775
41aa3448 4776 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4777
4778 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4779 * our hw here, which are all in 100usec. */
4780 spec.t1_t3 = 210 * 10;
4781 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4782 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4783 spec.t10 = 500 * 10;
4784 /* This one is special and actually in units of 100ms, but zero
4785 * based in the hw (so we need to add 100 ms). But the sw vbt
4786 * table multiplies it with 1000 to make it in units of 100usec,
4787 * too. */
4788 spec.t11_t12 = (510 + 100) * 10;
4789
4790 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4791 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4792
4793 /* Use the max of the register settings and vbt. If both are
4794 * unset, fall back to the spec limits. */
36b5f425 4795#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4796 spec.field : \
4797 max(cur.field, vbt.field))
4798 assign_final(t1_t3);
4799 assign_final(t8);
4800 assign_final(t9);
4801 assign_final(t10);
4802 assign_final(t11_t12);
4803#undef assign_final
4804
36b5f425 4805#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4806 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4807 intel_dp->backlight_on_delay = get_delay(t8);
4808 intel_dp->backlight_off_delay = get_delay(t9);
4809 intel_dp->panel_power_down_delay = get_delay(t10);
4810 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4811#undef get_delay
4812
f30d26e4
JN
4813 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4814 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4815 intel_dp->panel_power_cycle_delay);
4816
4817 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4818 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4819}
4820
4821static void
4822intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4823 struct intel_dp *intel_dp)
f30d26e4
JN
4824{
4825 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4826 u32 pp_on, pp_off, pp_div, port_sel = 0;
4827 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4828 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4829 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4830 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4831
e39b999a 4832 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4833
4834 if (HAS_PCH_SPLIT(dev)) {
4835 pp_on_reg = PCH_PP_ON_DELAYS;
4836 pp_off_reg = PCH_PP_OFF_DELAYS;
4837 pp_div_reg = PCH_PP_DIVISOR;
4838 } else {
bf13e81b
JN
4839 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4840
4841 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4842 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4843 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4844 }
4845
b2f19d1a
PZ
4846 /*
4847 * And finally store the new values in the power sequencer. The
4848 * backlight delays are set to 1 because we do manual waits on them. For
4849 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4850 * we'll end up waiting for the backlight off delay twice: once when we
4851 * do the manual sleep, and once when we disable the panel and wait for
4852 * the PP_STATUS bit to become zero.
4853 */
f30d26e4 4854 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4855 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4856 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4857 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4858 /* Compute the divisor for the pp clock, simply match the Bspec
4859 * formula. */
453c5420 4860 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4861 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4862 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4863
4864 /* Haswell doesn't have any port selection bits for the panel
4865 * power sequencer any more. */
bc7d38a4 4866 if (IS_VALLEYVIEW(dev)) {
ad933b56 4867 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4868 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4869 if (port == PORT_A)
a24c144c 4870 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4871 else
a24c144c 4872 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4873 }
4874
453c5420
JB
4875 pp_on |= port_sel;
4876
4877 I915_WRITE(pp_on_reg, pp_on);
4878 I915_WRITE(pp_off_reg, pp_off);
4879 I915_WRITE(pp_div_reg, pp_div);
67a54566 4880
67a54566 4881 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4882 I915_READ(pp_on_reg),
4883 I915_READ(pp_off_reg),
4884 I915_READ(pp_div_reg));
f684960e
CW
4885}
4886
b33a2815
VK
4887/**
4888 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4889 * @dev: DRM device
4890 * @refresh_rate: RR to be programmed
4891 *
4892 * This function gets called when refresh rate (RR) has to be changed from
4893 * one frequency to another. Switches can be between high and low RR
4894 * supported by the panel or to any other RR based on media playback (in
4895 * this case, RR value needs to be passed from user space).
4896 *
4897 * The caller of this function needs to take a lock on dev_priv->drrs.
4898 */
96178eeb 4899static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4900{
4901 struct drm_i915_private *dev_priv = dev->dev_private;
4902 struct intel_encoder *encoder;
96178eeb
VK
4903 struct intel_digital_port *dig_port = NULL;
4904 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4905 struct intel_crtc_state *config = NULL;
439d7ac0 4906 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4907 u32 reg, val;
96178eeb 4908 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4909
4910 if (refresh_rate <= 0) {
4911 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4912 return;
4913 }
4914
96178eeb
VK
4915 if (intel_dp == NULL) {
4916 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4917 return;
4918 }
4919
1fcc9d1c 4920 /*
e4d59f6b
RV
4921 * FIXME: This needs proper synchronization with psr state for some
4922 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4923 */
439d7ac0 4924
96178eeb
VK
4925 dig_port = dp_to_dig_port(intel_dp);
4926 encoder = &dig_port->base;
439d7ac0
PB
4927 intel_crtc = encoder->new_crtc;
4928
4929 if (!intel_crtc) {
4930 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4931 return;
4932 }
4933
6e3c9717 4934 config = intel_crtc->config;
439d7ac0 4935
96178eeb 4936 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
4937 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4938 return;
4939 }
4940
96178eeb
VK
4941 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4942 refresh_rate)
439d7ac0
PB
4943 index = DRRS_LOW_RR;
4944
96178eeb 4945 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
4946 DRM_DEBUG_KMS(
4947 "DRRS requested for previously set RR...ignoring\n");
4948 return;
4949 }
4950
4951 if (!intel_crtc->active) {
4952 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4953 return;
4954 }
4955
44395bfe 4956 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
4957 switch (index) {
4958 case DRRS_HIGH_RR:
4959 intel_dp_set_m_n(intel_crtc, M1_N1);
4960 break;
4961 case DRRS_LOW_RR:
4962 intel_dp_set_m_n(intel_crtc, M2_N2);
4963 break;
4964 case DRRS_MAX_RR:
4965 default:
4966 DRM_ERROR("Unsupported refreshrate type\n");
4967 }
4968 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 4969 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 4970 val = I915_READ(reg);
a4c30b1d 4971
439d7ac0 4972 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
4973 if (IS_VALLEYVIEW(dev))
4974 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4975 else
4976 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 4977 } else {
6fa7aec1
VK
4978 if (IS_VALLEYVIEW(dev))
4979 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4980 else
4981 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
4982 }
4983 I915_WRITE(reg, val);
4984 }
4985
4e9ac947
VK
4986 dev_priv->drrs.refresh_rate_type = index;
4987
4988 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4989}
4990
b33a2815
VK
4991/**
4992 * intel_edp_drrs_enable - init drrs struct if supported
4993 * @intel_dp: DP struct
4994 *
4995 * Initializes frontbuffer_bits and drrs.dp
4996 */
c395578e
VK
4997void intel_edp_drrs_enable(struct intel_dp *intel_dp)
4998{
4999 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5000 struct drm_i915_private *dev_priv = dev->dev_private;
5001 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5002 struct drm_crtc *crtc = dig_port->base.base.crtc;
5003 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5004
5005 if (!intel_crtc->config->has_drrs) {
5006 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5007 return;
5008 }
5009
5010 mutex_lock(&dev_priv->drrs.mutex);
5011 if (WARN_ON(dev_priv->drrs.dp)) {
5012 DRM_ERROR("DRRS already enabled\n");
5013 goto unlock;
5014 }
5015
5016 dev_priv->drrs.busy_frontbuffer_bits = 0;
5017
5018 dev_priv->drrs.dp = intel_dp;
5019
5020unlock:
5021 mutex_unlock(&dev_priv->drrs.mutex);
5022}
5023
b33a2815
VK
5024/**
5025 * intel_edp_drrs_disable - Disable DRRS
5026 * @intel_dp: DP struct
5027 *
5028 */
c395578e
VK
5029void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5030{
5031 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5032 struct drm_i915_private *dev_priv = dev->dev_private;
5033 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5034 struct drm_crtc *crtc = dig_port->base.base.crtc;
5035 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5036
5037 if (!intel_crtc->config->has_drrs)
5038 return;
5039
5040 mutex_lock(&dev_priv->drrs.mutex);
5041 if (!dev_priv->drrs.dp) {
5042 mutex_unlock(&dev_priv->drrs.mutex);
5043 return;
5044 }
5045
5046 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5047 intel_dp_set_drrs_state(dev_priv->dev,
5048 intel_dp->attached_connector->panel.
5049 fixed_mode->vrefresh);
5050
5051 dev_priv->drrs.dp = NULL;
5052 mutex_unlock(&dev_priv->drrs.mutex);
5053
5054 cancel_delayed_work_sync(&dev_priv->drrs.work);
5055}
5056
4e9ac947
VK
5057static void intel_edp_drrs_downclock_work(struct work_struct *work)
5058{
5059 struct drm_i915_private *dev_priv =
5060 container_of(work, typeof(*dev_priv), drrs.work.work);
5061 struct intel_dp *intel_dp;
5062
5063 mutex_lock(&dev_priv->drrs.mutex);
5064
5065 intel_dp = dev_priv->drrs.dp;
5066
5067 if (!intel_dp)
5068 goto unlock;
5069
439d7ac0 5070 /*
4e9ac947
VK
5071 * The delayed work can race with an invalidate hence we need to
5072 * recheck.
439d7ac0
PB
5073 */
5074
4e9ac947
VK
5075 if (dev_priv->drrs.busy_frontbuffer_bits)
5076 goto unlock;
439d7ac0 5077
4e9ac947
VK
5078 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5079 intel_dp_set_drrs_state(dev_priv->dev,
5080 intel_dp->attached_connector->panel.
5081 downclock_mode->vrefresh);
439d7ac0 5082
4e9ac947 5083unlock:
439d7ac0 5084
4e9ac947 5085 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5086}
5087
b33a2815
VK
5088/**
5089 * intel_edp_drrs_invalidate - Invalidate DRRS
5090 * @dev: DRM device
5091 * @frontbuffer_bits: frontbuffer plane tracking bits
5092 *
5093 * When there is a disturbance on screen (due to cursor movement/time
5094 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5095 * high RR.
5096 *
5097 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5098 */
a93fad0f
VK
5099void intel_edp_drrs_invalidate(struct drm_device *dev,
5100 unsigned frontbuffer_bits)
5101{
5102 struct drm_i915_private *dev_priv = dev->dev_private;
5103 struct drm_crtc *crtc;
5104 enum pipe pipe;
5105
5106 if (!dev_priv->drrs.dp)
5107 return;
5108
3954e733
R
5109 cancel_delayed_work_sync(&dev_priv->drrs.work);
5110
a93fad0f
VK
5111 mutex_lock(&dev_priv->drrs.mutex);
5112 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5113 pipe = to_intel_crtc(crtc)->pipe;
5114
5115 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5116 intel_dp_set_drrs_state(dev_priv->dev,
5117 dev_priv->drrs.dp->attached_connector->panel.
5118 fixed_mode->vrefresh);
5119 }
5120
5121 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5122
5123 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5124 mutex_unlock(&dev_priv->drrs.mutex);
5125}
5126
b33a2815
VK
5127/**
5128 * intel_edp_drrs_flush - Flush DRRS
5129 * @dev: DRM device
5130 * @frontbuffer_bits: frontbuffer plane tracking bits
5131 *
5132 * When there is no movement on screen, DRRS work can be scheduled.
5133 * This DRRS work is responsible for setting relevant registers after a
5134 * timeout of 1 second.
5135 *
5136 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5137 */
a93fad0f
VK
5138void intel_edp_drrs_flush(struct drm_device *dev,
5139 unsigned frontbuffer_bits)
5140{
5141 struct drm_i915_private *dev_priv = dev->dev_private;
5142 struct drm_crtc *crtc;
5143 enum pipe pipe;
5144
5145 if (!dev_priv->drrs.dp)
5146 return;
5147
3954e733
R
5148 cancel_delayed_work_sync(&dev_priv->drrs.work);
5149
a93fad0f
VK
5150 mutex_lock(&dev_priv->drrs.mutex);
5151 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5152 pipe = to_intel_crtc(crtc)->pipe;
5153 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5154
a93fad0f
VK
5155 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5156 !dev_priv->drrs.busy_frontbuffer_bits)
5157 schedule_delayed_work(&dev_priv->drrs.work,
5158 msecs_to_jiffies(1000));
5159 mutex_unlock(&dev_priv->drrs.mutex);
5160}
5161
b33a2815
VK
5162/**
5163 * DOC: Display Refresh Rate Switching (DRRS)
5164 *
5165 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5166 * which enables swtching between low and high refresh rates,
5167 * dynamically, based on the usage scenario. This feature is applicable
5168 * for internal panels.
5169 *
5170 * Indication that the panel supports DRRS is given by the panel EDID, which
5171 * would list multiple refresh rates for one resolution.
5172 *
5173 * DRRS is of 2 types - static and seamless.
5174 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5175 * (may appear as a blink on screen) and is used in dock-undock scenario.
5176 * Seamless DRRS involves changing RR without any visual effect to the user
5177 * and can be used during normal system usage. This is done by programming
5178 * certain registers.
5179 *
5180 * Support for static/seamless DRRS may be indicated in the VBT based on
5181 * inputs from the panel spec.
5182 *
5183 * DRRS saves power by switching to low RR based on usage scenarios.
5184 *
5185 * eDP DRRS:-
5186 * The implementation is based on frontbuffer tracking implementation.
5187 * When there is a disturbance on the screen triggered by user activity or a
5188 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5189 * When there is no movement on screen, after a timeout of 1 second, a switch
5190 * to low RR is made.
5191 * For integration with frontbuffer tracking code,
5192 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5193 *
5194 * DRRS can be further extended to support other internal panels and also
5195 * the scenario of video playback wherein RR is set based on the rate
5196 * requested by userspace.
5197 */
5198
5199/**
5200 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5201 * @intel_connector: eDP connector
5202 * @fixed_mode: preferred mode of panel
5203 *
5204 * This function is called only once at driver load to initialize basic
5205 * DRRS stuff.
5206 *
5207 * Returns:
5208 * Downclock mode if panel supports it, else return NULL.
5209 * DRRS support is determined by the presence of downclock mode (apart
5210 * from VBT setting).
5211 */
4f9db5b5 5212static struct drm_display_mode *
96178eeb
VK
5213intel_dp_drrs_init(struct intel_connector *intel_connector,
5214 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5215{
5216 struct drm_connector *connector = &intel_connector->base;
96178eeb 5217 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5218 struct drm_i915_private *dev_priv = dev->dev_private;
5219 struct drm_display_mode *downclock_mode = NULL;
5220
5221 if (INTEL_INFO(dev)->gen <= 6) {
5222 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5223 return NULL;
5224 }
5225
5226 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5227 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5228 return NULL;
5229 }
5230
5231 downclock_mode = intel_find_panel_downclock
5232 (dev, fixed_mode, connector);
5233
5234 if (!downclock_mode) {
a1d26342 5235 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5236 return NULL;
5237 }
5238
4e9ac947
VK
5239 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5240
96178eeb 5241 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5242
96178eeb 5243 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5244
96178eeb 5245 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5246 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5247 return downclock_mode;
5248}
5249
ed92f0b2 5250static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5251 struct intel_connector *intel_connector)
ed92f0b2
PZ
5252{
5253 struct drm_connector *connector = &intel_connector->base;
5254 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5255 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5256 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5257 struct drm_i915_private *dev_priv = dev->dev_private;
5258 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5259 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5260 bool has_dpcd;
5261 struct drm_display_mode *scan;
5262 struct edid *edid;
6517d273 5263 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5264
96178eeb 5265 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5266
ed92f0b2
PZ
5267 if (!is_edp(intel_dp))
5268 return true;
5269
49e6bc51
VS
5270 pps_lock(intel_dp);
5271 intel_edp_panel_vdd_sanitize(intel_dp);
5272 pps_unlock(intel_dp);
63635217 5273
ed92f0b2 5274 /* Cache DPCD and EDID for edp. */
ed92f0b2 5275 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5276
5277 if (has_dpcd) {
5278 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5279 dev_priv->no_aux_handshake =
5280 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5281 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5282 } else {
5283 /* if this fails, presume the device is a ghost */
5284 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5285 return false;
5286 }
5287
5288 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5289 pps_lock(intel_dp);
36b5f425 5290 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5291 pps_unlock(intel_dp);
ed92f0b2 5292
060c8778 5293 mutex_lock(&dev->mode_config.mutex);
0b99836f 5294 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5295 if (edid) {
5296 if (drm_add_edid_modes(connector, edid)) {
5297 drm_mode_connector_update_edid_property(connector,
5298 edid);
5299 drm_edid_to_eld(connector, edid);
5300 } else {
5301 kfree(edid);
5302 edid = ERR_PTR(-EINVAL);
5303 }
5304 } else {
5305 edid = ERR_PTR(-ENOENT);
5306 }
5307 intel_connector->edid = edid;
5308
5309 /* prefer fixed mode from EDID if available */
5310 list_for_each_entry(scan, &connector->probed_modes, head) {
5311 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5312 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5313 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5314 intel_connector, fixed_mode);
ed92f0b2
PZ
5315 break;
5316 }
5317 }
5318
5319 /* fallback to VBT if available for eDP */
5320 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5321 fixed_mode = drm_mode_duplicate(dev,
5322 dev_priv->vbt.lfp_lvds_vbt_mode);
5323 if (fixed_mode)
5324 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5325 }
060c8778 5326 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5327
01527b31
CT
5328 if (IS_VALLEYVIEW(dev)) {
5329 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5330 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5331
5332 /*
5333 * Figure out the current pipe for the initial backlight setup.
5334 * If the current pipe isn't valid, try the PPS pipe, and if that
5335 * fails just assume pipe A.
5336 */
5337 if (IS_CHERRYVIEW(dev))
5338 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5339 else
5340 pipe = PORT_TO_PIPE(intel_dp->DP);
5341
5342 if (pipe != PIPE_A && pipe != PIPE_B)
5343 pipe = intel_dp->pps_pipe;
5344
5345 if (pipe != PIPE_A && pipe != PIPE_B)
5346 pipe = PIPE_A;
5347
5348 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5349 pipe_name(pipe));
01527b31
CT
5350 }
5351
4f9db5b5 5352 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5353 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5354 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5355
5356 return true;
5357}
5358
16c25533 5359bool
f0fec3f2
PZ
5360intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5361 struct intel_connector *intel_connector)
a4fc5ed6 5362{
f0fec3f2
PZ
5363 struct drm_connector *connector = &intel_connector->base;
5364 struct intel_dp *intel_dp = &intel_dig_port->dp;
5365 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5366 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5367 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5368 enum port port = intel_dig_port->port;
0b99836f 5369 int type;
a4fc5ed6 5370
a4a5d2f8
VS
5371 intel_dp->pps_pipe = INVALID_PIPE;
5372
ec5b01dd 5373 /* intel_dp vfuncs */
b6b5e383
DL
5374 if (INTEL_INFO(dev)->gen >= 9)
5375 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5376 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5377 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5378 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5379 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5380 else if (HAS_PCH_SPLIT(dev))
5381 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5382 else
5383 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5384
b9ca5fad
DL
5385 if (INTEL_INFO(dev)->gen >= 9)
5386 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5387 else
5388 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5389
0767935e
DV
5390 /* Preserve the current hw state. */
5391 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5392 intel_dp->attached_connector = intel_connector;
3d3dc149 5393
3b32a35b 5394 if (intel_dp_is_edp(dev, port))
b329530c 5395 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5396 else
5397 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5398
f7d24902
ID
5399 /*
5400 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5401 * for DP the encoder type can be set by the caller to
5402 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5403 */
5404 if (type == DRM_MODE_CONNECTOR_eDP)
5405 intel_encoder->type = INTEL_OUTPUT_EDP;
5406
c17ed5b5
VS
5407 /* eDP only on port B and/or C on vlv/chv */
5408 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5409 port != PORT_B && port != PORT_C))
5410 return false;
5411
e7281eab
ID
5412 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5413 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5414 port_name(port));
5415
b329530c 5416 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5417 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5418
a4fc5ed6
KP
5419 connector->interlace_allowed = true;
5420 connector->doublescan_allowed = 0;
5421
f0fec3f2 5422 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5423 edp_panel_vdd_work);
a4fc5ed6 5424
df0e9248 5425 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5426 drm_connector_register(connector);
a4fc5ed6 5427
affa9354 5428 if (HAS_DDI(dev))
bcbc889b
PZ
5429 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5430 else
5431 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5432 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5433
0b99836f 5434 /* Set up the hotplug pin. */
ab9d7c30
PZ
5435 switch (port) {
5436 case PORT_A:
1d843f9d 5437 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5438 break;
5439 case PORT_B:
1d843f9d 5440 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5441 break;
5442 case PORT_C:
1d843f9d 5443 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5444 break;
5445 case PORT_D:
1d843f9d 5446 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5447 break;
5448 default:
ad1c0b19 5449 BUG();
5eb08b69
ZW
5450 }
5451
dada1a9f 5452 if (is_edp(intel_dp)) {
773538e8 5453 pps_lock(intel_dp);
1e74a324
VS
5454 intel_dp_init_panel_power_timestamps(intel_dp);
5455 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5456 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5457 else
36b5f425 5458 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5459 pps_unlock(intel_dp);
dada1a9f 5460 }
0095e6dc 5461
9d1a1031 5462 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5463
0e32b39c 5464 /* init MST on ports that can support it */
c86ea3d0 5465 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5466 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5467 intel_dp_mst_encoder_init(intel_dig_port,
5468 intel_connector->base.base.id);
0e32b39c
DA
5469 }
5470 }
5471
36b5f425 5472 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5473 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5474 if (is_edp(intel_dp)) {
5475 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5476 /*
5477 * vdd might still be enabled do to the delayed vdd off.
5478 * Make sure vdd is actually turned off here.
5479 */
773538e8 5480 pps_lock(intel_dp);
4be73780 5481 edp_panel_vdd_off_sync(intel_dp);
773538e8 5482 pps_unlock(intel_dp);
15b1d171 5483 }
34ea3d38 5484 drm_connector_unregister(connector);
b2f246a8 5485 drm_connector_cleanup(connector);
16c25533 5486 return false;
b2f246a8 5487 }
32f9d658 5488
f684960e
CW
5489 intel_dp_add_properties(intel_dp, connector);
5490
a4fc5ed6
KP
5491 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5492 * 0xd. Failure to do so will result in spurious interrupts being
5493 * generated on the port when a cable is not attached.
5494 */
5495 if (IS_G4X(dev) && !IS_GM45(dev)) {
5496 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5497 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5498 }
16c25533
PZ
5499
5500 return true;
a4fc5ed6 5501}
f0fec3f2
PZ
5502
5503void
5504intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5505{
13cf5504 5506 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5507 struct intel_digital_port *intel_dig_port;
5508 struct intel_encoder *intel_encoder;
5509 struct drm_encoder *encoder;
5510 struct intel_connector *intel_connector;
5511
b14c5679 5512 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5513 if (!intel_dig_port)
5514 return;
5515
b14c5679 5516 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5517 if (!intel_connector) {
5518 kfree(intel_dig_port);
5519 return;
5520 }
5521
5522 intel_encoder = &intel_dig_port->base;
5523 encoder = &intel_encoder->base;
5524
5525 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5526 DRM_MODE_ENCODER_TMDS);
5527
5bfe2ac0 5528 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5529 intel_encoder->disable = intel_disable_dp;
00c09d70 5530 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5531 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5532 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5533 if (IS_CHERRYVIEW(dev)) {
9197c88b 5534 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5535 intel_encoder->pre_enable = chv_pre_enable_dp;
5536 intel_encoder->enable = vlv_enable_dp;
580d3811 5537 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5538 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5539 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5540 intel_encoder->pre_enable = vlv_pre_enable_dp;
5541 intel_encoder->enable = vlv_enable_dp;
49277c31 5542 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5543 } else {
ecff4f3b
JN
5544 intel_encoder->pre_enable = g4x_pre_enable_dp;
5545 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5546 if (INTEL_INFO(dev)->gen >= 5)
5547 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5548 }
f0fec3f2 5549
174edf1f 5550 intel_dig_port->port = port;
f0fec3f2
PZ
5551 intel_dig_port->dp.output_reg = output_reg;
5552
00c09d70 5553 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5554 if (IS_CHERRYVIEW(dev)) {
5555 if (port == PORT_D)
5556 intel_encoder->crtc_mask = 1 << 2;
5557 else
5558 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5559 } else {
5560 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5561 }
bc079e8b 5562 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5563 intel_encoder->hot_plug = intel_dp_hot_plug;
5564
13cf5504
DA
5565 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5566 dev_priv->hpd_irq_port[port] = intel_dig_port;
5567
15b1d171
PZ
5568 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5569 drm_encoder_cleanup(encoder);
5570 kfree(intel_dig_port);
b2f246a8 5571 kfree(intel_connector);
15b1d171 5572 }
f0fec3f2 5573}
0e32b39c
DA
5574
5575void intel_dp_mst_suspend(struct drm_device *dev)
5576{
5577 struct drm_i915_private *dev_priv = dev->dev_private;
5578 int i;
5579
5580 /* disable MST */
5581 for (i = 0; i < I915_MAX_PORTS; i++) {
5582 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5583 if (!intel_dig_port)
5584 continue;
5585
5586 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5587 if (!intel_dig_port->dp.can_mst)
5588 continue;
5589 if (intel_dig_port->dp.is_mst)
5590 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5591 }
5592 }
5593}
5594
5595void intel_dp_mst_resume(struct drm_device *dev)
5596{
5597 struct drm_i915_private *dev_priv = dev->dev_private;
5598 int i;
5599
5600 for (i = 0; i < I915_MAX_PORTS; i++) {
5601 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5602 if (!intel_dig_port)
5603 continue;
5604 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5605 int ret;
5606
5607 if (!intel_dig_port->dp.can_mst)
5608 continue;
5609
5610 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5611 if (ret != 0) {
5612 intel_dp_check_mst_status(&intel_dig_port->dp);
5613 }
5614 }
5615 }
5616}