drm/i915: add common intel_digital_port_connected function
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15 97 324000, 432000, 540000 };
fe51bfb9
VS
98static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
f4896f15 101static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 102
cfcb0fc9
JB
103/**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110static bool is_edp(struct intel_dp *intel_dp)
111{
da63a9f2
PZ
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
115}
116
68b4d824 117static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 118{
68b4d824
ID
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
122}
123
df0e9248
CW
124static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125{
fa90ecef 126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
127}
128
ea5b213a 129static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 130static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 131static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 132static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
133static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
a4fc5ed6 135
ed4e9c1d
VS
136static int
137intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 138{
7183dc29 139 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 case DP_LINK_BW_2_7:
1db10e28 144 case DP_LINK_BW_5_4:
d4eead50 145 break;
a4fc5ed6 146 default:
d4eead50
ID
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
a4fc5ed6
KP
149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153}
154
eeb6324d
PZ
155static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156{
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169}
170
cd9dde44
AJ
171/*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
a4fc5ed6 188static int
c898261c 189intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 190{
cd9dde44 191 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
192}
193
fe27d53e
DA
194static int
195intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196{
197 return (max_link_clock * max_lanes * 8) / 10;
198}
199
c19de8eb 200static enum drm_mode_status
a4fc5ed6
KP
201intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203{
df0e9248 204 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 209
dd06f90e
JN
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
212 return MODE_PANEL;
213
dd06f90e 214 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 215 return MODE_PANEL;
03afc4a2
DV
216
217 target_clock = fixed_mode->clock;
7de56f43
ZY
218 }
219
50fec21a 220 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 221 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
c4867936 227 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
0af78a2b
DV
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
a4fc5ed6
KP
235 return MODE_OK;
236}
237
a4f1289e 238uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
239{
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248}
249
c2af70e2 250static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
251{
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257}
258
fb0f8fbf
KP
259/* hrawclock is 1/4 the FSB frequency */
260static int
261intel_hrawclk(struct drm_device *dev)
262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
9473c8f4
VP
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
fb0f8fbf
KP
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291}
292
bf13e81b
JN
293static void
294intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 295 struct intel_dp *intel_dp);
bf13e81b
JN
296static void
297intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 298 struct intel_dp *intel_dp);
bf13e81b 299
773538e8
VS
300static void pps_lock(struct intel_dp *intel_dp)
301{
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316}
317
318static void pps_unlock(struct intel_dp *intel_dp)
319{
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330}
331
961a0db0
VS
332static void
333vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334{
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 339 bool pll_enabled;
961a0db0
VS
340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
d288f65f
VS
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
961a0db0
VS
373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
961a0db0
VS
390}
391
bf13e81b
JN
392static enum pipe
393vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394{
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 400 enum pipe pipe;
bf13e81b 401
e39b999a 402 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 403
a8c3344e
VS
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
a4a5d2f8
VS
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
409
410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
a8c3344e
VS
432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
a4a5d2f8 435
a8c3344e
VS
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
36b5f425
VS
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 446
961a0db0
VS
447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
452
453 return intel_dp->pps_pipe;
454}
455
6491ab27
VS
456typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461{
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463}
464
465static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467{
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469}
470
471static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473{
474 return true;
475}
bf13e81b 476
a4a5d2f8 477static enum pipe
6491ab27
VS
478vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
a4a5d2f8
VS
481{
482 enum pipe pipe;
bf13e81b 483
bf13e81b
JN
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
6491ab27
VS
491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
a4a5d2f8 494 return pipe;
bf13e81b
JN
495 }
496
a4a5d2f8
VS
497 return INVALID_PIPE;
498}
499
500static void
501vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502{
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
6491ab27
VS
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
a4a5d2f8
VS
522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
bf13e81b
JN
528 }
529
a4a5d2f8
VS
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
36b5f425
VS
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
535}
536
773538e8
VS
537void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538{
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
bf13e81b
JN
564}
565
566static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567{
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
b0a08bec
VK
570 if (IS_BROXTON(dev))
571 return BXT_PP_CONTROL(0);
572 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
573 return PCH_PP_CONTROL;
574 else
575 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
576}
577
578static u32 _pp_stat_reg(struct intel_dp *intel_dp)
579{
580 struct drm_device *dev = intel_dp_to_dev(intel_dp);
581
b0a08bec
VK
582 if (IS_BROXTON(dev))
583 return BXT_PP_STATUS(0);
584 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
585 return PCH_PP_STATUS;
586 else
587 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
588}
589
01527b31
CT
590/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
591 This function only applicable when panel PM state is not to be tracked */
592static int edp_notify_handler(struct notifier_block *this, unsigned long code,
593 void *unused)
594{
595 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
596 edp_notifier);
597 struct drm_device *dev = intel_dp_to_dev(intel_dp);
598 struct drm_i915_private *dev_priv = dev->dev_private;
599 u32 pp_div;
600 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
601
602 if (!is_edp(intel_dp) || code != SYS_RESTART)
603 return 0;
604
773538e8 605 pps_lock(intel_dp);
e39b999a 606
01527b31 607 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
608 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
609
01527b31
CT
610 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
611 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
612 pp_div = I915_READ(pp_div_reg);
613 pp_div &= PP_REFERENCE_DIVIDER_MASK;
614
615 /* 0x1F write to PP_DIV_REG sets max cycle delay */
616 I915_WRITE(pp_div_reg, pp_div | 0x1F);
617 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
618 msleep(intel_dp->panel_power_cycle_delay);
619 }
620
773538e8 621 pps_unlock(intel_dp);
e39b999a 622
01527b31
CT
623 return 0;
624}
625
4be73780 626static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 627{
30add22d 628 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
629 struct drm_i915_private *dev_priv = dev->dev_private;
630
e39b999a
VS
631 lockdep_assert_held(&dev_priv->pps_mutex);
632
9a42356b
VS
633 if (IS_VALLEYVIEW(dev) &&
634 intel_dp->pps_pipe == INVALID_PIPE)
635 return false;
636
bf13e81b 637 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
638}
639
4be73780 640static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 641{
30add22d 642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
643 struct drm_i915_private *dev_priv = dev->dev_private;
644
e39b999a
VS
645 lockdep_assert_held(&dev_priv->pps_mutex);
646
9a42356b
VS
647 if (IS_VALLEYVIEW(dev) &&
648 intel_dp->pps_pipe == INVALID_PIPE)
649 return false;
650
773538e8 651 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
652}
653
9b984dae
KP
654static void
655intel_dp_check_edp(struct intel_dp *intel_dp)
656{
30add22d 657 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 658 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 659
9b984dae
KP
660 if (!is_edp(intel_dp))
661 return;
453c5420 662
4be73780 663 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
664 WARN(1, "eDP powered off while attempting aux channel communication.\n");
665 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
666 I915_READ(_pp_stat_reg(intel_dp)),
667 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
668 }
669}
670
9ee32fea
DV
671static uint32_t
672intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
673{
674 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
675 struct drm_device *dev = intel_dig_port->base.base.dev;
676 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 677 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
678 uint32_t status;
679 bool done;
680
ef04f00d 681#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 682 if (has_aux_irq)
b18ac466 683 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 684 msecs_to_jiffies_timeout(10));
9ee32fea
DV
685 else
686 done = wait_for_atomic(C, 10) == 0;
687 if (!done)
688 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 has_aux_irq);
690#undef C
691
692 return status;
693}
694
ec5b01dd 695static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 696{
174edf1f
PZ
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 699
ec5b01dd
DL
700 /*
701 * The clock divider is based off the hrawclk, and would like to run at
702 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 703 */
ec5b01dd
DL
704 return index ? 0 : intel_hrawclk(dev) / 2;
705}
706
707static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708{
709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 711 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
712
713 if (index)
714 return 0;
715
716 if (intel_dig_port->port == PORT_A) {
05024da3
VS
717 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
718
ec5b01dd
DL
719 } else {
720 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 }
722}
723
724static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
727 struct drm_device *dev = intel_dig_port->base.base.dev;
728 struct drm_i915_private *dev_priv = dev->dev_private;
729
730 if (intel_dig_port->port == PORT_A) {
731 if (index)
732 return 0;
05024da3 733 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
734 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
735 /* Workaround for non-ULT HSW */
bc86625a
CW
736 switch (index) {
737 case 0: return 63;
738 case 1: return 72;
739 default: return 0;
740 }
ec5b01dd 741 } else {
bc86625a 742 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 743 }
b84a1cf8
RV
744}
745
ec5b01dd
DL
746static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747{
748 return index ? 0 : 100;
749}
750
b6b5e383
DL
751static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
752{
753 /*
754 * SKL doesn't need us to program the AUX clock divider (Hardware will
755 * derive the clock from CDCLK automatically). We still implement the
756 * get_aux_clock_divider vfunc to plug-in into the existing code.
757 */
758 return index ? 0 : 1;
759}
760
5ed12a19
DL
761static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
762 bool has_aux_irq,
763 int send_bytes,
764 uint32_t aux_clock_divider)
765{
766 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
767 struct drm_device *dev = intel_dig_port->base.base.dev;
768 uint32_t precharge, timeout;
769
770 if (IS_GEN6(dev))
771 precharge = 3;
772 else
773 precharge = 5;
774
775 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
776 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
777 else
778 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
779
780 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 781 DP_AUX_CH_CTL_DONE |
5ed12a19 782 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 783 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 784 timeout |
788d4433 785 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
786 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
787 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 788 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
789}
790
b9ca5fad
DL
791static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
792 bool has_aux_irq,
793 int send_bytes,
794 uint32_t unused)
795{
796 return DP_AUX_CH_CTL_SEND_BUSY |
797 DP_AUX_CH_CTL_DONE |
798 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
799 DP_AUX_CH_CTL_TIME_OUT_ERROR |
800 DP_AUX_CH_CTL_TIME_OUT_1600us |
801 DP_AUX_CH_CTL_RECEIVE_ERROR |
802 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
803 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804}
805
b84a1cf8
RV
806static int
807intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 808 const uint8_t *send, int send_bytes,
b84a1cf8
RV
809 uint8_t *recv, int recv_size)
810{
811 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
812 struct drm_device *dev = intel_dig_port->base.base.dev;
813 struct drm_i915_private *dev_priv = dev->dev_private;
814 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
815 uint32_t ch_data = ch_ctl + 4;
bc86625a 816 uint32_t aux_clock_divider;
b84a1cf8
RV
817 int i, ret, recv_bytes;
818 uint32_t status;
5ed12a19 819 int try, clock = 0;
4e6b788c 820 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
821 bool vdd;
822
773538e8 823 pps_lock(intel_dp);
e39b999a 824
72c3500a
VS
825 /*
826 * We will be called with VDD already enabled for dpcd/edid/oui reads.
827 * In such cases we want to leave VDD enabled and it's up to upper layers
828 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
829 * ourselves.
830 */
1e0560e0 831 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
832
833 /* dp aux is extremely sensitive to irq latency, hence request the
834 * lowest possible wakeup latency and so prevent the cpu from going into
835 * deep sleep states.
836 */
837 pm_qos_update_request(&dev_priv->pm_qos, 0);
838
839 intel_dp_check_edp(intel_dp);
5eb08b69 840
c67a470b
PZ
841 intel_aux_display_runtime_get(dev_priv);
842
11bee43e
JB
843 /* Try to wait for any previous AUX channel activity */
844 for (try = 0; try < 3; try++) {
ef04f00d 845 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
846 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
847 break;
848 msleep(1);
849 }
850
851 if (try == 3) {
02196c77
MK
852 static u32 last_status = -1;
853 const u32 status = I915_READ(ch_ctl);
854
855 if (status != last_status) {
856 WARN(1, "dp_aux_ch not started status 0x%08x\n",
857 status);
858 last_status = status;
859 }
860
9ee32fea
DV
861 ret = -EBUSY;
862 goto out;
4f7f7b7e
CW
863 }
864
46a5ae9f
PZ
865 /* Only 5 data registers! */
866 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
867 ret = -E2BIG;
868 goto out;
869 }
870
ec5b01dd 871 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
872 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
873 has_aux_irq,
874 send_bytes,
875 aux_clock_divider);
5ed12a19 876
bc86625a
CW
877 /* Must try at least 3 times according to DP spec */
878 for (try = 0; try < 5; try++) {
879 /* Load the send data into the aux channel data registers */
880 for (i = 0; i < send_bytes; i += 4)
881 I915_WRITE(ch_data + i,
a4f1289e
RV
882 intel_dp_pack_aux(send + i,
883 send_bytes - i));
bc86625a
CW
884
885 /* Send the command and wait for it to complete */
5ed12a19 886 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
887
888 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
889
890 /* Clear done status and any errors */
891 I915_WRITE(ch_ctl,
892 status |
893 DP_AUX_CH_CTL_DONE |
894 DP_AUX_CH_CTL_TIME_OUT_ERROR |
895 DP_AUX_CH_CTL_RECEIVE_ERROR);
896
74ebf294 897 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 898 continue;
74ebf294
TP
899
900 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
901 * 400us delay required for errors and timeouts
902 * Timeout errors from the HW already meet this
903 * requirement so skip to next iteration
904 */
905 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
906 usleep_range(400, 500);
bc86625a 907 continue;
74ebf294 908 }
bc86625a 909 if (status & DP_AUX_CH_CTL_DONE)
e058c945 910 goto done;
bc86625a 911 }
a4fc5ed6
KP
912 }
913
a4fc5ed6 914 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 915 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
916 ret = -EBUSY;
917 goto out;
a4fc5ed6
KP
918 }
919
e058c945 920done:
a4fc5ed6
KP
921 /* Check for timeout or receive error.
922 * Timeouts occur when the sink is not connected
923 */
a5b3da54 924 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 925 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
926 ret = -EIO;
927 goto out;
a5b3da54 928 }
1ae8c0a5
KP
929
930 /* Timeouts occur when the device isn't connected, so they're
931 * "normal" -- don't fill the kernel log with these */
a5b3da54 932 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 933 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
934 ret = -ETIMEDOUT;
935 goto out;
a4fc5ed6
KP
936 }
937
938 /* Unload any bytes sent back from the other side */
939 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
940 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
941 if (recv_bytes > recv_size)
942 recv_bytes = recv_size;
0206e353 943
4f7f7b7e 944 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
945 intel_dp_unpack_aux(I915_READ(ch_data + i),
946 recv + i, recv_bytes - i);
a4fc5ed6 947
9ee32fea
DV
948 ret = recv_bytes;
949out:
950 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 951 intel_aux_display_runtime_put(dev_priv);
9ee32fea 952
884f19e9
JN
953 if (vdd)
954 edp_panel_vdd_off(intel_dp, false);
955
773538e8 956 pps_unlock(intel_dp);
e39b999a 957
9ee32fea 958 return ret;
a4fc5ed6
KP
959}
960
a6c8aff0
JN
961#define BARE_ADDRESS_SIZE 3
962#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
963static ssize_t
964intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 965{
9d1a1031
JN
966 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
967 uint8_t txbuf[20], rxbuf[20];
968 size_t txsize, rxsize;
a4fc5ed6 969 int ret;
a4fc5ed6 970
d2d9cbbd
VS
971 txbuf[0] = (msg->request << 4) |
972 ((msg->address >> 16) & 0xf);
973 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
974 txbuf[2] = msg->address & 0xff;
975 txbuf[3] = msg->size - 1;
46a5ae9f 976
9d1a1031
JN
977 switch (msg->request & ~DP_AUX_I2C_MOT) {
978 case DP_AUX_NATIVE_WRITE:
979 case DP_AUX_I2C_WRITE:
a6c8aff0 980 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 981 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 982
9d1a1031
JN
983 if (WARN_ON(txsize > 20))
984 return -E2BIG;
a4fc5ed6 985
9d1a1031 986 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 987
9d1a1031
JN
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 991
a1ddefd8
JN
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
9d1a1031
JN
999 }
1000 break;
46a5ae9f 1001
9d1a1031
JN
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
a6c8aff0 1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1005 rxsize = msg->size + 1;
a4fc5ed6 1006
9d1a1031
JN
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
a4fc5ed6 1009
9d1a1031
JN
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1021 }
9d1a1031
JN
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
a4fc5ed6 1027 }
f51a44b9 1028
9d1a1031 1029 return ret;
a4fc5ed6
KP
1030}
1031
9d1a1031
JN
1032static void
1033intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1034{
1035 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1036 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1037 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1038 enum port port = intel_dig_port->port;
500ea70d 1039 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1040 const char *name = NULL;
500ea70d 1041 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1042 int ret;
1043
500ea70d
RV
1044 /* On SKL we don't have Aux for port E so we rely on VBT to set
1045 * a proper alternate aux channel.
1046 */
1047 if (IS_SKYLAKE(dev) && port == PORT_E) {
1048 switch (info->alternate_aux_channel) {
1049 case DP_AUX_B:
1050 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1051 break;
1052 case DP_AUX_C:
1053 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1054 break;
1055 case DP_AUX_D:
1056 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1057 break;
1058 case DP_AUX_A:
1059 default:
1060 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1061 }
1062 }
1063
33ad6626
JN
1064 switch (port) {
1065 case PORT_A:
1066 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1067 name = "DPDDC-A";
ab2c0672 1068 break;
33ad6626
JN
1069 case PORT_B:
1070 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1071 name = "DPDDC-B";
ab2c0672 1072 break;
33ad6626
JN
1073 case PORT_C:
1074 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1075 name = "DPDDC-C";
ab2c0672 1076 break;
33ad6626
JN
1077 case PORT_D:
1078 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1079 name = "DPDDC-D";
33ad6626 1080 break;
500ea70d
RV
1081 case PORT_E:
1082 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1083 name = "DPDDC-E";
1084 break;
33ad6626
JN
1085 default:
1086 BUG();
ab2c0672
DA
1087 }
1088
1b1aad75
DL
1089 /*
1090 * The AUX_CTL register is usually DP_CTL + 0x10.
1091 *
1092 * On Haswell and Broadwell though:
1093 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1094 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1095 *
1096 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1097 */
500ea70d 1098 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1099 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1100
0b99836f 1101 intel_dp->aux.name = name;
9d1a1031
JN
1102 intel_dp->aux.dev = dev->dev;
1103 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1104
0b99836f
JN
1105 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1106 connector->base.kdev->kobj.name);
8316f337 1107
4f71d0cb 1108 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1109 if (ret < 0) {
4f71d0cb 1110 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1111 name, ret);
1112 return;
ab2c0672 1113 }
8a5e6aeb 1114
0b99836f
JN
1115 ret = sysfs_create_link(&connector->base.kdev->kobj,
1116 &intel_dp->aux.ddc.dev.kobj,
1117 intel_dp->aux.ddc.dev.kobj.name);
1118 if (ret < 0) {
1119 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1120 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1121 }
a4fc5ed6
KP
1122}
1123
80f65de3
ID
1124static void
1125intel_dp_connector_unregister(struct intel_connector *intel_connector)
1126{
1127 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1128
0e32b39c
DA
1129 if (!intel_connector->mst_port)
1130 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1131 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1132 intel_connector_unregister(intel_connector);
1133}
1134
5416d871 1135static void
840b32b7 1136skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1137{
1138 u32 ctrl1;
1139
dd3cd74a
ACO
1140 memset(&pipe_config->dpll_hw_state, 0,
1141 sizeof(pipe_config->dpll_hw_state));
1142
5416d871
DL
1143 pipe_config->ddi_pll_sel = SKL_DPLL0;
1144 pipe_config->dpll_hw_state.cfgcr1 = 0;
1145 pipe_config->dpll_hw_state.cfgcr2 = 0;
1146
1147 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1148 switch (pipe_config->port_clock / 2) {
c3346ef6 1149 case 81000:
71cd8423 1150 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1151 SKL_DPLL0);
1152 break;
c3346ef6 1153 case 135000:
71cd8423 1154 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1155 SKL_DPLL0);
1156 break;
c3346ef6 1157 case 270000:
71cd8423 1158 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1159 SKL_DPLL0);
1160 break;
c3346ef6 1161 case 162000:
71cd8423 1162 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1163 SKL_DPLL0);
1164 break;
1165 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1166 results in CDCLK change. Need to handle the change of CDCLK by
1167 disabling pipes and re-enabling them */
1168 case 108000:
71cd8423 1169 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1170 SKL_DPLL0);
1171 break;
1172 case 216000:
71cd8423 1173 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1174 SKL_DPLL0);
1175 break;
1176
5416d871
DL
1177 }
1178 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1179}
1180
0e50338c 1181static void
840b32b7 1182hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1183{
ee46f3c7
ACO
1184 memset(&pipe_config->dpll_hw_state, 0,
1185 sizeof(pipe_config->dpll_hw_state));
1186
840b32b7
VS
1187 switch (pipe_config->port_clock / 2) {
1188 case 81000:
0e50338c
DV
1189 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1190 break;
840b32b7 1191 case 135000:
0e50338c
DV
1192 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1193 break;
840b32b7 1194 case 270000:
0e50338c
DV
1195 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1196 break;
1197 }
1198}
1199
fc0f8e25 1200static int
12f6a2e2 1201intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1202{
94ca719e
VS
1203 if (intel_dp->num_sink_rates) {
1204 *sink_rates = intel_dp->sink_rates;
1205 return intel_dp->num_sink_rates;
fc0f8e25 1206 }
12f6a2e2
VS
1207
1208 *sink_rates = default_rates;
1209
1210 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1211}
1212
a8f3ef61 1213static int
1db10e28 1214intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1215{
64987fc5
SJ
1216 if (IS_BROXTON(dev)) {
1217 *source_rates = bxt_rates;
1218 return ARRAY_SIZE(bxt_rates);
1219 } else if (IS_SKYLAKE(dev)) {
637a9c63
SJ
1220 *source_rates = skl_rates;
1221 return ARRAY_SIZE(skl_rates);
fe51bfb9
VS
1222 } else if (IS_CHERRYVIEW(dev)) {
1223 *source_rates = chv_rates;
1224 return ARRAY_SIZE(chv_rates);
a8f3ef61 1225 }
636280ba
VS
1226
1227 *source_rates = default_rates;
1228
1db10e28
VS
1229 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1230 /* WaDisableHBR2:skl */
1231 return (DP_LINK_BW_2_7 >> 3) + 1;
1232 else if (INTEL_INFO(dev)->gen >= 8 ||
1233 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1234 return (DP_LINK_BW_5_4 >> 3) + 1;
1235 else
1236 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1237}
1238
c6bb3538
DV
1239static void
1240intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1241 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1242{
1243 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1244 const struct dp_link_dpll *divisor = NULL;
1245 int i, count = 0;
c6bb3538
DV
1246
1247 if (IS_G4X(dev)) {
9dd4ffdf
CML
1248 divisor = gen4_dpll;
1249 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1250 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1251 divisor = pch_dpll;
1252 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1253 } else if (IS_CHERRYVIEW(dev)) {
1254 divisor = chv_dpll;
1255 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1256 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1257 divisor = vlv_dpll;
1258 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1259 }
9dd4ffdf
CML
1260
1261 if (divisor && count) {
1262 for (i = 0; i < count; i++) {
840b32b7 1263 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1264 pipe_config->dpll = divisor[i].dpll;
1265 pipe_config->clock_set = true;
1266 break;
1267 }
1268 }
c6bb3538
DV
1269 }
1270}
1271
2ecae76a
VS
1272static int intersect_rates(const int *source_rates, int source_len,
1273 const int *sink_rates, int sink_len,
94ca719e 1274 int *common_rates)
a8f3ef61
SJ
1275{
1276 int i = 0, j = 0, k = 0;
1277
a8f3ef61
SJ
1278 while (i < source_len && j < sink_len) {
1279 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1280 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1281 return k;
94ca719e 1282 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1283 ++k;
1284 ++i;
1285 ++j;
1286 } else if (source_rates[i] < sink_rates[j]) {
1287 ++i;
1288 } else {
1289 ++j;
1290 }
1291 }
1292 return k;
1293}
1294
94ca719e
VS
1295static int intel_dp_common_rates(struct intel_dp *intel_dp,
1296 int *common_rates)
2ecae76a
VS
1297{
1298 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1299 const int *source_rates, *sink_rates;
1300 int source_len, sink_len;
1301
1302 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1303 source_len = intel_dp_source_rates(dev, &source_rates);
1304
1305 return intersect_rates(source_rates, source_len,
1306 sink_rates, sink_len,
94ca719e 1307 common_rates);
2ecae76a
VS
1308}
1309
0336400e
VS
1310static void snprintf_int_array(char *str, size_t len,
1311 const int *array, int nelem)
1312{
1313 int i;
1314
1315 str[0] = '\0';
1316
1317 for (i = 0; i < nelem; i++) {
b2f505be 1318 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1319 if (r >= len)
1320 return;
1321 str += r;
1322 len -= r;
1323 }
1324}
1325
1326static void intel_dp_print_rates(struct intel_dp *intel_dp)
1327{
1328 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1329 const int *source_rates, *sink_rates;
94ca719e
VS
1330 int source_len, sink_len, common_len;
1331 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1332 char str[128]; /* FIXME: too big for stack? */
1333
1334 if ((drm_debug & DRM_UT_KMS) == 0)
1335 return;
1336
1337 source_len = intel_dp_source_rates(dev, &source_rates);
1338 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1339 DRM_DEBUG_KMS("source rates: %s\n", str);
1340
1341 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1342 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1343 DRM_DEBUG_KMS("sink rates: %s\n", str);
1344
94ca719e
VS
1345 common_len = intel_dp_common_rates(intel_dp, common_rates);
1346 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1347 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1348}
1349
f4896f15 1350static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1351{
1352 int i = 0;
1353
1354 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1355 if (find == rates[i])
1356 break;
1357
1358 return i;
1359}
1360
50fec21a
VS
1361int
1362intel_dp_max_link_rate(struct intel_dp *intel_dp)
1363{
1364 int rates[DP_MAX_SUPPORTED_RATES] = {};
1365 int len;
1366
94ca719e 1367 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1368 if (WARN_ON(len <= 0))
1369 return 162000;
1370
1371 return rates[rate_to_index(0, rates) - 1];
1372}
1373
ed4e9c1d
VS
1374int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1375{
94ca719e 1376 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1377}
1378
04a60f9f
VS
1379static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1380 uint8_t *link_bw, uint8_t *rate_select)
1381{
1382 if (intel_dp->num_sink_rates) {
1383 *link_bw = 0;
1384 *rate_select =
1385 intel_dp_rate_select(intel_dp, port_clock);
1386 } else {
1387 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1388 *rate_select = 0;
1389 }
1390}
1391
00c09d70 1392bool
5bfe2ac0 1393intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1394 struct intel_crtc_state *pipe_config)
a4fc5ed6 1395{
5bfe2ac0 1396 struct drm_device *dev = encoder->base.dev;
36008365 1397 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1398 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1400 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1401 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1402 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1403 int lane_count, clock;
56071a20 1404 int min_lane_count = 1;
eeb6324d 1405 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1406 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1407 int min_clock = 0;
a8f3ef61 1408 int max_clock;
083f9560 1409 int bpp, mode_rate;
ff9a6750 1410 int link_avail, link_clock;
94ca719e
VS
1411 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1412 int common_len;
04a60f9f 1413 uint8_t link_bw, rate_select;
a8f3ef61 1414
94ca719e 1415 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1416
1417 /* No common link rates between source and sink */
94ca719e 1418 WARN_ON(common_len <= 0);
a8f3ef61 1419
94ca719e 1420 max_clock = common_len - 1;
a4fc5ed6 1421
bc7d38a4 1422 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1423 pipe_config->has_pch_encoder = true;
1424
03afc4a2 1425 pipe_config->has_dp_encoder = true;
f769cd24 1426 pipe_config->has_drrs = false;
9fcb1704 1427 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1428
dd06f90e
JN
1429 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1430 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1431 adjusted_mode);
a1b2278e
CK
1432
1433 if (INTEL_INFO(dev)->gen >= 9) {
1434 int ret;
e435d6e5 1435 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1436 if (ret)
1437 return ret;
1438 }
1439
2dd24552
JB
1440 if (!HAS_PCH_SPLIT(dev))
1441 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1442 intel_connector->panel.fitting_mode);
1443 else
b074cec8
JB
1444 intel_pch_panel_fitting(intel_crtc, pipe_config,
1445 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1446 }
1447
cb1793ce 1448 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1449 return false;
1450
083f9560 1451 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1452 "max bw %d pixel clock %iKHz\n",
94ca719e 1453 max_lane_count, common_rates[max_clock],
241bfc38 1454 adjusted_mode->crtc_clock);
083f9560 1455
36008365
DV
1456 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1457 * bpc in between. */
3e7ca985 1458 bpp = pipe_config->pipe_bpp;
56071a20 1459 if (is_edp(intel_dp)) {
22ce5628
TS
1460
1461 /* Get bpp from vbt only for panels that dont have bpp in edid */
1462 if (intel_connector->base.display_info.bpc == 0 &&
1463 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1464 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1465 dev_priv->vbt.edp_bpp);
1466 bpp = dev_priv->vbt.edp_bpp;
1467 }
1468
344c5bbc
JN
1469 /*
1470 * Use the maximum clock and number of lanes the eDP panel
1471 * advertizes being capable of. The panels are generally
1472 * designed to support only a single clock and lane
1473 * configuration, and typically these values correspond to the
1474 * native resolution of the panel.
1475 */
1476 min_lane_count = max_lane_count;
1477 min_clock = max_clock;
7984211e 1478 }
657445fe 1479
36008365 1480 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1481 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1482 bpp);
36008365 1483
c6930992 1484 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1485 for (lane_count = min_lane_count;
1486 lane_count <= max_lane_count;
1487 lane_count <<= 1) {
1488
94ca719e 1489 link_clock = common_rates[clock];
36008365
DV
1490 link_avail = intel_dp_max_data_rate(link_clock,
1491 lane_count);
1492
1493 if (mode_rate <= link_avail) {
1494 goto found;
1495 }
1496 }
1497 }
1498 }
c4867936 1499
36008365 1500 return false;
3685a8f3 1501
36008365 1502found:
55bc60db
VS
1503 if (intel_dp->color_range_auto) {
1504 /*
1505 * See:
1506 * CEA-861-E - 5.1 Default Encoding Parameters
1507 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1508 */
0f2a2a75
VS
1509 pipe_config->limited_color_range =
1510 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1511 } else {
1512 pipe_config->limited_color_range =
1513 intel_dp->limited_color_range;
55bc60db
VS
1514 }
1515
90a6b7b0 1516 pipe_config->lane_count = lane_count;
a8f3ef61 1517
657445fe 1518 pipe_config->pipe_bpp = bpp;
94ca719e 1519 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1520
04a60f9f
VS
1521 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1522 &link_bw, &rate_select);
1523
1524 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1525 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1526 pipe_config->port_clock, bpp);
36008365
DV
1527 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1528 mode_rate, link_avail);
a4fc5ed6 1529
03afc4a2 1530 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1531 adjusted_mode->crtc_clock,
1532 pipe_config->port_clock,
03afc4a2 1533 &pipe_config->dp_m_n);
9d1a455b 1534
439d7ac0 1535 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1536 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1537 pipe_config->has_drrs = true;
439d7ac0
PB
1538 intel_link_compute_m_n(bpp, lane_count,
1539 intel_connector->panel.downclock_mode->clock,
1540 pipe_config->port_clock,
1541 &pipe_config->dp_m2_n2);
1542 }
1543
5416d871 1544 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
840b32b7 1545 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1546 else if (IS_BROXTON(dev))
1547 /* handled in ddi */;
5416d871 1548 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1549 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1550 else
840b32b7 1551 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1552
03afc4a2 1553 return true;
a4fc5ed6
KP
1554}
1555
7c62a164 1556static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1557{
7c62a164
DV
1558 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1559 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1560 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 u32 dpa_ctl;
1563
6e3c9717
ACO
1564 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1565 crtc->config->port_clock);
ea9b6006
DV
1566 dpa_ctl = I915_READ(DP_A);
1567 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1568
6e3c9717 1569 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1570 /* For a long time we've carried around a ILK-DevA w/a for the
1571 * 160MHz clock. If we're really unlucky, it's still required.
1572 */
1573 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1574 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1575 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1576 } else {
1577 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1578 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1579 }
1ce17038 1580
ea9b6006
DV
1581 I915_WRITE(DP_A, dpa_ctl);
1582
1583 POSTING_READ(DP_A);
1584 udelay(500);
1585}
1586
901c2daf
VS
1587void intel_dp_set_link_params(struct intel_dp *intel_dp,
1588 const struct intel_crtc_state *pipe_config)
1589{
1590 intel_dp->link_rate = pipe_config->port_clock;
1591 intel_dp->lane_count = pipe_config->lane_count;
1592}
1593
8ac33ed3 1594static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1595{
b934223d 1596 struct drm_device *dev = encoder->base.dev;
417e822d 1597 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1598 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1599 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1600 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1601 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1602
901c2daf
VS
1603 intel_dp_set_link_params(intel_dp, crtc->config);
1604
417e822d 1605 /*
1a2eb460 1606 * There are four kinds of DP registers:
417e822d
KP
1607 *
1608 * IBX PCH
1a2eb460
KP
1609 * SNB CPU
1610 * IVB CPU
417e822d
KP
1611 * CPT PCH
1612 *
1613 * IBX PCH and CPU are the same for almost everything,
1614 * except that the CPU DP PLL is configured in this
1615 * register
1616 *
1617 * CPT PCH is quite different, having many bits moved
1618 * to the TRANS_DP_CTL register instead. That
1619 * configuration happens (oddly) in ironlake_pch_enable
1620 */
9c9e7927 1621
417e822d
KP
1622 /* Preserve the BIOS-computed detected bit. This is
1623 * supposed to be read-only.
1624 */
1625 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1626
417e822d 1627 /* Handle DP bits in common between all three register formats */
417e822d 1628 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1629 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1630
6e3c9717 1631 if (crtc->config->has_audio)
ea5b213a 1632 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1633
417e822d 1634 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1635
39e5fa88 1636 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1637 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1638 intel_dp->DP |= DP_SYNC_HS_HIGH;
1639 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1640 intel_dp->DP |= DP_SYNC_VS_HIGH;
1641 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1642
6aba5b6c 1643 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1644 intel_dp->DP |= DP_ENHANCED_FRAMING;
1645
7c62a164 1646 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1647 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1648 u32 trans_dp;
1649
39e5fa88 1650 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1651
1652 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1653 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1654 trans_dp |= TRANS_DP_ENH_FRAMING;
1655 else
1656 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1657 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1658 } else {
0f2a2a75
VS
1659 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1660 crtc->config->limited_color_range)
1661 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1662
1663 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1664 intel_dp->DP |= DP_SYNC_HS_HIGH;
1665 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1666 intel_dp->DP |= DP_SYNC_VS_HIGH;
1667 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1668
6aba5b6c 1669 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1670 intel_dp->DP |= DP_ENHANCED_FRAMING;
1671
39e5fa88 1672 if (IS_CHERRYVIEW(dev))
44f37d1f 1673 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1674 else if (crtc->pipe == PIPE_B)
1675 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1676 }
a4fc5ed6
KP
1677}
1678
ffd6749d
PZ
1679#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1680#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1681
1a5ef5b7
PZ
1682#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1683#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1684
ffd6749d
PZ
1685#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1686#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1687
4be73780 1688static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1689 u32 mask,
1690 u32 value)
bd943159 1691{
30add22d 1692 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1693 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1694 u32 pp_stat_reg, pp_ctrl_reg;
1695
e39b999a
VS
1696 lockdep_assert_held(&dev_priv->pps_mutex);
1697
bf13e81b
JN
1698 pp_stat_reg = _pp_stat_reg(intel_dp);
1699 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1700
99ea7127 1701 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1702 mask, value,
1703 I915_READ(pp_stat_reg),
1704 I915_READ(pp_ctrl_reg));
32ce697c 1705
453c5420 1706 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1707 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1708 I915_READ(pp_stat_reg),
1709 I915_READ(pp_ctrl_reg));
32ce697c 1710 }
54c136d4
CW
1711
1712 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1713}
32ce697c 1714
4be73780 1715static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1716{
1717 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1718 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1719}
1720
4be73780 1721static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1722{
1723 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1724 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1725}
1726
4be73780 1727static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1728{
1729 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1730
1731 /* When we disable the VDD override bit last we have to do the manual
1732 * wait. */
1733 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1734 intel_dp->panel_power_cycle_delay);
1735
4be73780 1736 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1737}
1738
4be73780 1739static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1740{
1741 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1742 intel_dp->backlight_on_delay);
1743}
1744
4be73780 1745static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1746{
1747 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1748 intel_dp->backlight_off_delay);
1749}
99ea7127 1750
832dd3c1
KP
1751/* Read the current pp_control value, unlocking the register if it
1752 * is locked
1753 */
1754
453c5420 1755static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1756{
453c5420
JB
1757 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1758 struct drm_i915_private *dev_priv = dev->dev_private;
1759 u32 control;
832dd3c1 1760
e39b999a
VS
1761 lockdep_assert_held(&dev_priv->pps_mutex);
1762
bf13e81b 1763 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1764 if (!IS_BROXTON(dev)) {
1765 control &= ~PANEL_UNLOCK_MASK;
1766 control |= PANEL_UNLOCK_REGS;
1767 }
832dd3c1 1768 return control;
bd943159
KP
1769}
1770
951468f3
VS
1771/*
1772 * Must be paired with edp_panel_vdd_off().
1773 * Must hold pps_mutex around the whole on/off sequence.
1774 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1775 */
1e0560e0 1776static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1777{
30add22d 1778 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1779 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1780 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1781 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1782 enum intel_display_power_domain power_domain;
5d613501 1783 u32 pp;
453c5420 1784 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1785 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1786
e39b999a
VS
1787 lockdep_assert_held(&dev_priv->pps_mutex);
1788
97af61f5 1789 if (!is_edp(intel_dp))
adddaaf4 1790 return false;
bd943159 1791
2c623c11 1792 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1793 intel_dp->want_panel_vdd = true;
99ea7127 1794
4be73780 1795 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1796 return need_to_disable;
b0665d57 1797
4e6e1a54
ID
1798 power_domain = intel_display_port_power_domain(intel_encoder);
1799 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1800
3936fcf4
VS
1801 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1802 port_name(intel_dig_port->port));
bd943159 1803
4be73780
DV
1804 if (!edp_have_panel_power(intel_dp))
1805 wait_panel_power_cycle(intel_dp);
99ea7127 1806
453c5420 1807 pp = ironlake_get_pp_control(intel_dp);
5d613501 1808 pp |= EDP_FORCE_VDD;
ebf33b18 1809
bf13e81b
JN
1810 pp_stat_reg = _pp_stat_reg(intel_dp);
1811 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1812
1813 I915_WRITE(pp_ctrl_reg, pp);
1814 POSTING_READ(pp_ctrl_reg);
1815 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1816 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1817 /*
1818 * If the panel wasn't on, delay before accessing aux channel
1819 */
4be73780 1820 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1821 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1822 port_name(intel_dig_port->port));
f01eca2e 1823 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1824 }
adddaaf4
JN
1825
1826 return need_to_disable;
1827}
1828
951468f3
VS
1829/*
1830 * Must be paired with intel_edp_panel_vdd_off() or
1831 * intel_edp_panel_off().
1832 * Nested calls to these functions are not allowed since
1833 * we drop the lock. Caller must use some higher level
1834 * locking to prevent nested calls from other threads.
1835 */
b80d6c78 1836void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1837{
c695b6b6 1838 bool vdd;
adddaaf4 1839
c695b6b6
VS
1840 if (!is_edp(intel_dp))
1841 return;
1842
773538e8 1843 pps_lock(intel_dp);
c695b6b6 1844 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1845 pps_unlock(intel_dp);
c695b6b6 1846
e2c719b7 1847 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1848 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1849}
1850
4be73780 1851static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1852{
30add22d 1853 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1854 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1855 struct intel_digital_port *intel_dig_port =
1856 dp_to_dig_port(intel_dp);
1857 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1858 enum intel_display_power_domain power_domain;
5d613501 1859 u32 pp;
453c5420 1860 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1861
e39b999a 1862 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1863
15e899a0 1864 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1865
15e899a0 1866 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1867 return;
b0665d57 1868
3936fcf4
VS
1869 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1870 port_name(intel_dig_port->port));
bd943159 1871
be2c9196
VS
1872 pp = ironlake_get_pp_control(intel_dp);
1873 pp &= ~EDP_FORCE_VDD;
453c5420 1874
be2c9196
VS
1875 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1876 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1877
be2c9196
VS
1878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
90791a5c 1880
be2c9196
VS
1881 /* Make sure sequencer is idle before allowing subsequent activity */
1882 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1883 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1884
be2c9196
VS
1885 if ((pp & POWER_TARGET_ON) == 0)
1886 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1887
be2c9196
VS
1888 power_domain = intel_display_port_power_domain(intel_encoder);
1889 intel_display_power_put(dev_priv, power_domain);
bd943159 1890}
5d613501 1891
4be73780 1892static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1893{
1894 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1895 struct intel_dp, panel_vdd_work);
bd943159 1896
773538e8 1897 pps_lock(intel_dp);
15e899a0
VS
1898 if (!intel_dp->want_panel_vdd)
1899 edp_panel_vdd_off_sync(intel_dp);
773538e8 1900 pps_unlock(intel_dp);
bd943159
KP
1901}
1902
aba86890
ID
1903static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1904{
1905 unsigned long delay;
1906
1907 /*
1908 * Queue the timer to fire a long time from now (relative to the power
1909 * down delay) to keep the panel power up across a sequence of
1910 * operations.
1911 */
1912 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1913 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1914}
1915
951468f3
VS
1916/*
1917 * Must be paired with edp_panel_vdd_on().
1918 * Must hold pps_mutex around the whole on/off sequence.
1919 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1920 */
4be73780 1921static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1922{
e39b999a
VS
1923 struct drm_i915_private *dev_priv =
1924 intel_dp_to_dev(intel_dp)->dev_private;
1925
1926 lockdep_assert_held(&dev_priv->pps_mutex);
1927
97af61f5
KP
1928 if (!is_edp(intel_dp))
1929 return;
5d613501 1930
e2c719b7 1931 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1932 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1933
bd943159
KP
1934 intel_dp->want_panel_vdd = false;
1935
aba86890 1936 if (sync)
4be73780 1937 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1938 else
1939 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1940}
1941
9f0fb5be 1942static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1943{
30add22d 1944 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1945 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1946 u32 pp;
453c5420 1947 u32 pp_ctrl_reg;
9934c132 1948
9f0fb5be
VS
1949 lockdep_assert_held(&dev_priv->pps_mutex);
1950
97af61f5 1951 if (!is_edp(intel_dp))
bd943159 1952 return;
99ea7127 1953
3936fcf4
VS
1954 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1955 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1956
e7a89ace
VS
1957 if (WARN(edp_have_panel_power(intel_dp),
1958 "eDP port %c panel power already on\n",
1959 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1960 return;
9934c132 1961
4be73780 1962 wait_panel_power_cycle(intel_dp);
37c6c9b0 1963
bf13e81b 1964 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1965 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1966 if (IS_GEN5(dev)) {
1967 /* ILK workaround: disable reset around power sequence */
1968 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1969 I915_WRITE(pp_ctrl_reg, pp);
1970 POSTING_READ(pp_ctrl_reg);
05ce1a49 1971 }
37c6c9b0 1972
1c0ae80a 1973 pp |= POWER_TARGET_ON;
99ea7127
KP
1974 if (!IS_GEN5(dev))
1975 pp |= PANEL_POWER_RESET;
1976
453c5420
JB
1977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
9934c132 1979
4be73780 1980 wait_panel_on(intel_dp);
dce56b3c 1981 intel_dp->last_power_on = jiffies;
9934c132 1982
05ce1a49
KP
1983 if (IS_GEN5(dev)) {
1984 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1985 I915_WRITE(pp_ctrl_reg, pp);
1986 POSTING_READ(pp_ctrl_reg);
05ce1a49 1987 }
9f0fb5be 1988}
e39b999a 1989
9f0fb5be
VS
1990void intel_edp_panel_on(struct intel_dp *intel_dp)
1991{
1992 if (!is_edp(intel_dp))
1993 return;
1994
1995 pps_lock(intel_dp);
1996 edp_panel_on(intel_dp);
773538e8 1997 pps_unlock(intel_dp);
9934c132
JB
1998}
1999
9f0fb5be
VS
2000
2001static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2002{
4e6e1a54
ID
2003 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2004 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2005 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2006 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2007 enum intel_display_power_domain power_domain;
99ea7127 2008 u32 pp;
453c5420 2009 u32 pp_ctrl_reg;
9934c132 2010
9f0fb5be
VS
2011 lockdep_assert_held(&dev_priv->pps_mutex);
2012
97af61f5
KP
2013 if (!is_edp(intel_dp))
2014 return;
37c6c9b0 2015
3936fcf4
VS
2016 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2017 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2018
3936fcf4
VS
2019 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2020 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2021
453c5420 2022 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2023 /* We need to switch off panel power _and_ force vdd, for otherwise some
2024 * panels get very unhappy and cease to work. */
b3064154
PJ
2025 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2026 EDP_BLC_ENABLE);
453c5420 2027
bf13e81b 2028 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2029
849e39f5
PZ
2030 intel_dp->want_panel_vdd = false;
2031
453c5420
JB
2032 I915_WRITE(pp_ctrl_reg, pp);
2033 POSTING_READ(pp_ctrl_reg);
9934c132 2034
dce56b3c 2035 intel_dp->last_power_cycle = jiffies;
4be73780 2036 wait_panel_off(intel_dp);
849e39f5
PZ
2037
2038 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2039 power_domain = intel_display_port_power_domain(intel_encoder);
2040 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2041}
e39b999a 2042
9f0fb5be
VS
2043void intel_edp_panel_off(struct intel_dp *intel_dp)
2044{
2045 if (!is_edp(intel_dp))
2046 return;
e39b999a 2047
9f0fb5be
VS
2048 pps_lock(intel_dp);
2049 edp_panel_off(intel_dp);
773538e8 2050 pps_unlock(intel_dp);
9934c132
JB
2051}
2052
1250d107
JN
2053/* Enable backlight in the panel power control. */
2054static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2055{
da63a9f2
PZ
2056 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2057 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2058 struct drm_i915_private *dev_priv = dev->dev_private;
2059 u32 pp;
453c5420 2060 u32 pp_ctrl_reg;
32f9d658 2061
01cb9ea6
JB
2062 /*
2063 * If we enable the backlight right away following a panel power
2064 * on, we may see slight flicker as the panel syncs with the eDP
2065 * link. So delay a bit to make sure the image is solid before
2066 * allowing it to appear.
2067 */
4be73780 2068 wait_backlight_on(intel_dp);
e39b999a 2069
773538e8 2070 pps_lock(intel_dp);
e39b999a 2071
453c5420 2072 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2073 pp |= EDP_BLC_ENABLE;
453c5420 2074
bf13e81b 2075 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2076
2077 I915_WRITE(pp_ctrl_reg, pp);
2078 POSTING_READ(pp_ctrl_reg);
e39b999a 2079
773538e8 2080 pps_unlock(intel_dp);
32f9d658
ZW
2081}
2082
1250d107
JN
2083/* Enable backlight PWM and backlight PP control. */
2084void intel_edp_backlight_on(struct intel_dp *intel_dp)
2085{
2086 if (!is_edp(intel_dp))
2087 return;
2088
2089 DRM_DEBUG_KMS("\n");
2090
2091 intel_panel_enable_backlight(intel_dp->attached_connector);
2092 _intel_edp_backlight_on(intel_dp);
2093}
2094
2095/* Disable backlight in the panel power control. */
2096static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2097{
30add22d 2098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2100 u32 pp;
453c5420 2101 u32 pp_ctrl_reg;
32f9d658 2102
f01eca2e
KP
2103 if (!is_edp(intel_dp))
2104 return;
2105
773538e8 2106 pps_lock(intel_dp);
e39b999a 2107
453c5420 2108 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2109 pp &= ~EDP_BLC_ENABLE;
453c5420 2110
bf13e81b 2111 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2112
2113 I915_WRITE(pp_ctrl_reg, pp);
2114 POSTING_READ(pp_ctrl_reg);
f7d2323c 2115
773538e8 2116 pps_unlock(intel_dp);
e39b999a
VS
2117
2118 intel_dp->last_backlight_off = jiffies;
f7d2323c 2119 edp_wait_backlight_off(intel_dp);
1250d107 2120}
f7d2323c 2121
1250d107
JN
2122/* Disable backlight PP control and backlight PWM. */
2123void intel_edp_backlight_off(struct intel_dp *intel_dp)
2124{
2125 if (!is_edp(intel_dp))
2126 return;
2127
2128 DRM_DEBUG_KMS("\n");
f7d2323c 2129
1250d107 2130 _intel_edp_backlight_off(intel_dp);
f7d2323c 2131 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2132}
a4fc5ed6 2133
73580fb7
JN
2134/*
2135 * Hook for controlling the panel power control backlight through the bl_power
2136 * sysfs attribute. Take care to handle multiple calls.
2137 */
2138static void intel_edp_backlight_power(struct intel_connector *connector,
2139 bool enable)
2140{
2141 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2142 bool is_enabled;
2143
773538e8 2144 pps_lock(intel_dp);
e39b999a 2145 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2146 pps_unlock(intel_dp);
73580fb7
JN
2147
2148 if (is_enabled == enable)
2149 return;
2150
23ba9373
JN
2151 DRM_DEBUG_KMS("panel power control backlight %s\n",
2152 enable ? "enable" : "disable");
73580fb7
JN
2153
2154 if (enable)
2155 _intel_edp_backlight_on(intel_dp);
2156 else
2157 _intel_edp_backlight_off(intel_dp);
2158}
2159
2bd2ad64 2160static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2161{
da63a9f2
PZ
2162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2163 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2164 struct drm_device *dev = crtc->dev;
d240f20f
JB
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 dpa_ctl;
2167
2bd2ad64
DV
2168 assert_pipe_disabled(dev_priv,
2169 to_intel_crtc(crtc)->pipe);
2170
d240f20f
JB
2171 DRM_DEBUG_KMS("\n");
2172 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2173 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2174 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2175
2176 /* We don't adjust intel_dp->DP while tearing down the link, to
2177 * facilitate link retraining (e.g. after hotplug). Hence clear all
2178 * enable bits here to ensure that we don't enable too much. */
2179 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2180 intel_dp->DP |= DP_PLL_ENABLE;
2181 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2182 POSTING_READ(DP_A);
2183 udelay(200);
d240f20f
JB
2184}
2185
2bd2ad64 2186static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2187{
da63a9f2
PZ
2188 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2189 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2190 struct drm_device *dev = crtc->dev;
d240f20f
JB
2191 struct drm_i915_private *dev_priv = dev->dev_private;
2192 u32 dpa_ctl;
2193
2bd2ad64
DV
2194 assert_pipe_disabled(dev_priv,
2195 to_intel_crtc(crtc)->pipe);
2196
d240f20f 2197 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2198 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2199 "dp pll off, should be on\n");
2200 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2201
2202 /* We can't rely on the value tracked for the DP register in
2203 * intel_dp->DP because link_down must not change that (otherwise link
2204 * re-training will fail. */
298b0b39 2205 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2206 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2207 POSTING_READ(DP_A);
d240f20f
JB
2208 udelay(200);
2209}
2210
c7ad3810 2211/* If the sink supports it, try to set the power state appropriately */
c19b0669 2212void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2213{
2214 int ret, i;
2215
2216 /* Should have a valid DPCD by this point */
2217 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2218 return;
2219
2220 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2221 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2222 DP_SET_POWER_D3);
c7ad3810
JB
2223 } else {
2224 /*
2225 * When turning on, we need to retry for 1ms to give the sink
2226 * time to wake up.
2227 */
2228 for (i = 0; i < 3; i++) {
9d1a1031
JN
2229 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2230 DP_SET_POWER_D0);
c7ad3810
JB
2231 if (ret == 1)
2232 break;
2233 msleep(1);
2234 }
2235 }
f9cac721
JN
2236
2237 if (ret != 1)
2238 DRM_DEBUG_KMS("failed to %s sink power state\n",
2239 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2240}
2241
19d8fe15
DV
2242static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2243 enum pipe *pipe)
d240f20f 2244{
19d8fe15 2245 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2246 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2247 struct drm_device *dev = encoder->base.dev;
2248 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2249 enum intel_display_power_domain power_domain;
2250 u32 tmp;
2251
2252 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2253 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2254 return false;
2255
2256 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2257
2258 if (!(tmp & DP_PORT_EN))
2259 return false;
2260
39e5fa88 2261 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2262 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2263 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2264 enum pipe p;
19d8fe15 2265
adc289d7
VS
2266 for_each_pipe(dev_priv, p) {
2267 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2268 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2269 *pipe = p;
19d8fe15
DV
2270 return true;
2271 }
2272 }
19d8fe15 2273
4a0833ec
DV
2274 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2275 intel_dp->output_reg);
39e5fa88
VS
2276 } else if (IS_CHERRYVIEW(dev)) {
2277 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2278 } else {
2279 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2280 }
d240f20f 2281
19d8fe15
DV
2282 return true;
2283}
d240f20f 2284
045ac3b5 2285static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2286 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2287{
2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2289 u32 tmp, flags = 0;
63000ef6
XZ
2290 struct drm_device *dev = encoder->base.dev;
2291 struct drm_i915_private *dev_priv = dev->dev_private;
2292 enum port port = dp_to_dig_port(intel_dp)->port;
2293 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2294 int dotclock;
045ac3b5 2295
9ed109a7 2296 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2297
2298 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2299
39e5fa88 2300 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2301 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2302
2303 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2304 flags |= DRM_MODE_FLAG_PHSYNC;
2305 else
2306 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2307
b81e34c2 2308 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2309 flags |= DRM_MODE_FLAG_PVSYNC;
2310 else
2311 flags |= DRM_MODE_FLAG_NVSYNC;
2312 } else {
39e5fa88 2313 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2314 flags |= DRM_MODE_FLAG_PHSYNC;
2315 else
2316 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2317
39e5fa88 2318 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2319 flags |= DRM_MODE_FLAG_PVSYNC;
2320 else
2321 flags |= DRM_MODE_FLAG_NVSYNC;
2322 }
045ac3b5 2323
2d112de7 2324 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2325
8c875fca
VS
2326 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2327 tmp & DP_COLOR_RANGE_16_235)
2328 pipe_config->limited_color_range = true;
2329
eb14cb74
VS
2330 pipe_config->has_dp_encoder = true;
2331
90a6b7b0
VS
2332 pipe_config->lane_count =
2333 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2334
eb14cb74
VS
2335 intel_dp_get_m_n(crtc, pipe_config);
2336
18442d08 2337 if (port == PORT_A) {
f1f644dc
JB
2338 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2339 pipe_config->port_clock = 162000;
2340 else
2341 pipe_config->port_clock = 270000;
2342 }
18442d08
VS
2343
2344 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2345 &pipe_config->dp_m_n);
2346
2347 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2348 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2349
2d112de7 2350 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2351
c6cd2ee2
JN
2352 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2353 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2354 /*
2355 * This is a big fat ugly hack.
2356 *
2357 * Some machines in UEFI boot mode provide us a VBT that has 18
2358 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2359 * unknown we fail to light up. Yet the same BIOS boots up with
2360 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2361 * max, not what it tells us to use.
2362 *
2363 * Note: This will still be broken if the eDP panel is not lit
2364 * up by the BIOS, and thus we can't get the mode at module
2365 * load.
2366 */
2367 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2368 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2369 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2370 }
045ac3b5
JB
2371}
2372
e8cb4558 2373static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2374{
e8cb4558 2375 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2376 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2377 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2378
6e3c9717 2379 if (crtc->config->has_audio)
495a5bb8 2380 intel_audio_codec_disable(encoder);
6cb49835 2381
b32c6f48
RV
2382 if (HAS_PSR(dev) && !HAS_DDI(dev))
2383 intel_psr_disable(intel_dp);
2384
6cb49835
DV
2385 /* Make sure the panel is off before trying to change the mode. But also
2386 * ensure that we have vdd while we switch off the panel. */
24f3e092 2387 intel_edp_panel_vdd_on(intel_dp);
4be73780 2388 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2389 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2390 intel_edp_panel_off(intel_dp);
3739850b 2391
08aff3fe
VS
2392 /* disable the port before the pipe on g4x */
2393 if (INTEL_INFO(dev)->gen < 5)
3739850b 2394 intel_dp_link_down(intel_dp);
d240f20f
JB
2395}
2396
08aff3fe 2397static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2398{
2bd2ad64 2399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2400 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2401
49277c31 2402 intel_dp_link_down(intel_dp);
08aff3fe
VS
2403 if (port == PORT_A)
2404 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2405}
2406
2407static void vlv_post_disable_dp(struct intel_encoder *encoder)
2408{
2409 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2410
2411 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2412}
2413
580d3811
VS
2414static void chv_post_disable_dp(struct intel_encoder *encoder)
2415{
2416 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2417 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2418 struct drm_device *dev = encoder->base.dev;
2419 struct drm_i915_private *dev_priv = dev->dev_private;
2420 struct intel_crtc *intel_crtc =
2421 to_intel_crtc(encoder->base.crtc);
2422 enum dpio_channel ch = vlv_dport_to_channel(dport);
2423 enum pipe pipe = intel_crtc->pipe;
2424 u32 val;
2425
2426 intel_dp_link_down(intel_dp);
2427
a580516d 2428 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2429
2430 /* Propagate soft reset to data lane reset */
97fd4d5c 2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2432 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2434
97fd4d5c
VS
2435 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2436 val |= CHV_PCS_REQ_SOFTRESET_EN;
2437 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2438
2439 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2440 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2441 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2442
2443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2444 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2445 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811 2446
a580516d 2447 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2448}
2449
7b13b58a
VS
2450static void
2451_intel_dp_set_link_train(struct intel_dp *intel_dp,
2452 uint32_t *DP,
2453 uint8_t dp_train_pat)
2454{
2455 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2456 struct drm_device *dev = intel_dig_port->base.base.dev;
2457 struct drm_i915_private *dev_priv = dev->dev_private;
2458 enum port port = intel_dig_port->port;
2459
2460 if (HAS_DDI(dev)) {
2461 uint32_t temp = I915_READ(DP_TP_CTL(port));
2462
2463 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2464 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2465 else
2466 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2467
2468 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2469 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2470 case DP_TRAINING_PATTERN_DISABLE:
2471 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2472
2473 break;
2474 case DP_TRAINING_PATTERN_1:
2475 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2476 break;
2477 case DP_TRAINING_PATTERN_2:
2478 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2479 break;
2480 case DP_TRAINING_PATTERN_3:
2481 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2482 break;
2483 }
2484 I915_WRITE(DP_TP_CTL(port), temp);
2485
39e5fa88
VS
2486 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2487 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2488 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2489
2490 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2491 case DP_TRAINING_PATTERN_DISABLE:
2492 *DP |= DP_LINK_TRAIN_OFF_CPT;
2493 break;
2494 case DP_TRAINING_PATTERN_1:
2495 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2496 break;
2497 case DP_TRAINING_PATTERN_2:
2498 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2499 break;
2500 case DP_TRAINING_PATTERN_3:
2501 DRM_ERROR("DP training pattern 3 not supported\n");
2502 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2503 break;
2504 }
2505
2506 } else {
2507 if (IS_CHERRYVIEW(dev))
2508 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2509 else
2510 *DP &= ~DP_LINK_TRAIN_MASK;
2511
2512 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2513 case DP_TRAINING_PATTERN_DISABLE:
2514 *DP |= DP_LINK_TRAIN_OFF;
2515 break;
2516 case DP_TRAINING_PATTERN_1:
2517 *DP |= DP_LINK_TRAIN_PAT_1;
2518 break;
2519 case DP_TRAINING_PATTERN_2:
2520 *DP |= DP_LINK_TRAIN_PAT_2;
2521 break;
2522 case DP_TRAINING_PATTERN_3:
2523 if (IS_CHERRYVIEW(dev)) {
2524 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2525 } else {
2526 DRM_ERROR("DP training pattern 3 not supported\n");
2527 *DP |= DP_LINK_TRAIN_PAT_2;
2528 }
2529 break;
2530 }
2531 }
2532}
2533
2534static void intel_dp_enable_port(struct intel_dp *intel_dp)
2535{
2536 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2537 struct drm_i915_private *dev_priv = dev->dev_private;
2538
7b13b58a
VS
2539 /* enable with pattern 1 (as per spec) */
2540 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2541 DP_TRAINING_PATTERN_1);
2542
2543 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2544 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2545
2546 /*
2547 * Magic for VLV/CHV. We _must_ first set up the register
2548 * without actually enabling the port, and then do another
2549 * write to enable the port. Otherwise link training will
2550 * fail when the power sequencer is freshly used for this port.
2551 */
2552 intel_dp->DP |= DP_PORT_EN;
2553
2554 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2555 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2556}
2557
e8cb4558 2558static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2559{
e8cb4558
DV
2560 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2561 struct drm_device *dev = encoder->base.dev;
2562 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2563 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2564 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2565 unsigned int lane_mask = 0x0;
5d613501 2566
0c33d8d7
DV
2567 if (WARN_ON(dp_reg & DP_PORT_EN))
2568 return;
5d613501 2569
093e3f13
VS
2570 pps_lock(intel_dp);
2571
2572 if (IS_VALLEYVIEW(dev))
2573 vlv_init_panel_power_sequencer(intel_dp);
2574
7b13b58a 2575 intel_dp_enable_port(intel_dp);
093e3f13
VS
2576
2577 edp_panel_vdd_on(intel_dp);
2578 edp_panel_on(intel_dp);
2579 edp_panel_vdd_off(intel_dp, true);
2580
2581 pps_unlock(intel_dp);
2582
61234fa5 2583 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2584 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2585 lane_mask);
61234fa5 2586
f01eca2e 2587 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2588 intel_dp_start_link_train(intel_dp);
33a34e4e 2589 intel_dp_complete_link_train(intel_dp);
3ab9c637 2590 intel_dp_stop_link_train(intel_dp);
c1dec79a 2591
6e3c9717 2592 if (crtc->config->has_audio) {
c1dec79a
JN
2593 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2594 pipe_name(crtc->pipe));
2595 intel_audio_codec_enable(encoder);
2596 }
ab1f90f9 2597}
89b667f8 2598
ecff4f3b
JN
2599static void g4x_enable_dp(struct intel_encoder *encoder)
2600{
828f5c6e
JN
2601 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2602
ecff4f3b 2603 intel_enable_dp(encoder);
4be73780 2604 intel_edp_backlight_on(intel_dp);
ab1f90f9 2605}
89b667f8 2606
ab1f90f9
JN
2607static void vlv_enable_dp(struct intel_encoder *encoder)
2608{
828f5c6e
JN
2609 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2610
4be73780 2611 intel_edp_backlight_on(intel_dp);
b32c6f48 2612 intel_psr_enable(intel_dp);
d240f20f
JB
2613}
2614
ecff4f3b 2615static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2616{
2617 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2618 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2619
8ac33ed3
DV
2620 intel_dp_prepare(encoder);
2621
d41f1efb
DV
2622 /* Only ilk+ has port A */
2623 if (dport->port == PORT_A) {
2624 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2625 ironlake_edp_pll_on(intel_dp);
d41f1efb 2626 }
ab1f90f9
JN
2627}
2628
83b84597
VS
2629static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2630{
2631 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2632 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2633 enum pipe pipe = intel_dp->pps_pipe;
2634 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2635
2636 edp_panel_vdd_off_sync(intel_dp);
2637
2638 /*
2639 * VLV seems to get confused when multiple power seqeuencers
2640 * have the same port selected (even if only one has power/vdd
2641 * enabled). The failure manifests as vlv_wait_port_ready() failing
2642 * CHV on the other hand doesn't seem to mind having the same port
2643 * selected in multiple power seqeuencers, but let's clear the
2644 * port select always when logically disconnecting a power sequencer
2645 * from a port.
2646 */
2647 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2648 pipe_name(pipe), port_name(intel_dig_port->port));
2649 I915_WRITE(pp_on_reg, 0);
2650 POSTING_READ(pp_on_reg);
2651
2652 intel_dp->pps_pipe = INVALID_PIPE;
2653}
2654
a4a5d2f8
VS
2655static void vlv_steal_power_sequencer(struct drm_device *dev,
2656 enum pipe pipe)
2657{
2658 struct drm_i915_private *dev_priv = dev->dev_private;
2659 struct intel_encoder *encoder;
2660
2661 lockdep_assert_held(&dev_priv->pps_mutex);
2662
ac3c12e4
VS
2663 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2664 return;
2665
a4a5d2f8
VS
2666 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2667 base.head) {
2668 struct intel_dp *intel_dp;
773538e8 2669 enum port port;
a4a5d2f8
VS
2670
2671 if (encoder->type != INTEL_OUTPUT_EDP)
2672 continue;
2673
2674 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2675 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2676
2677 if (intel_dp->pps_pipe != pipe)
2678 continue;
2679
2680 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2681 pipe_name(pipe), port_name(port));
a4a5d2f8 2682
e02f9a06 2683 WARN(encoder->base.crtc,
034e43c6
VS
2684 "stealing pipe %c power sequencer from active eDP port %c\n",
2685 pipe_name(pipe), port_name(port));
a4a5d2f8 2686
a4a5d2f8 2687 /* make sure vdd is off before we steal it */
83b84597 2688 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2689 }
2690}
2691
2692static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2693{
2694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2695 struct intel_encoder *encoder = &intel_dig_port->base;
2696 struct drm_device *dev = encoder->base.dev;
2697 struct drm_i915_private *dev_priv = dev->dev_private;
2698 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2699
2700 lockdep_assert_held(&dev_priv->pps_mutex);
2701
093e3f13
VS
2702 if (!is_edp(intel_dp))
2703 return;
2704
a4a5d2f8
VS
2705 if (intel_dp->pps_pipe == crtc->pipe)
2706 return;
2707
2708 /*
2709 * If another power sequencer was being used on this
2710 * port previously make sure to turn off vdd there while
2711 * we still have control of it.
2712 */
2713 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2714 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2715
2716 /*
2717 * We may be stealing the power
2718 * sequencer from another port.
2719 */
2720 vlv_steal_power_sequencer(dev, crtc->pipe);
2721
2722 /* now it's all ours */
2723 intel_dp->pps_pipe = crtc->pipe;
2724
2725 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2726 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2727
2728 /* init power sequencer on this pipe and port */
36b5f425
VS
2729 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2730 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2731}
2732
ab1f90f9 2733static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2734{
2bd2ad64 2735 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2736 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2737 struct drm_device *dev = encoder->base.dev;
89b667f8 2738 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2739 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2740 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2741 int pipe = intel_crtc->pipe;
2742 u32 val;
a4fc5ed6 2743
a580516d 2744 mutex_lock(&dev_priv->sb_lock);
89b667f8 2745
ab3c759a 2746 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2747 val = 0;
2748 if (pipe)
2749 val |= (1<<21);
2750 else
2751 val &= ~(1<<21);
2752 val |= 0x001000c4;
ab3c759a
CML
2753 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2754 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2755 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2756
a580516d 2757 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2758
2759 intel_enable_dp(encoder);
89b667f8
JB
2760}
2761
ecff4f3b 2762static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2763{
2764 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2765 struct drm_device *dev = encoder->base.dev;
2766 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2767 struct intel_crtc *intel_crtc =
2768 to_intel_crtc(encoder->base.crtc);
e4607fcf 2769 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2770 int pipe = intel_crtc->pipe;
89b667f8 2771
8ac33ed3
DV
2772 intel_dp_prepare(encoder);
2773
89b667f8 2774 /* Program Tx lane resets to default */
a580516d 2775 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2776 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2777 DPIO_PCS_TX_LANE2_RESET |
2778 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2779 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2780 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2781 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2782 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2783 DPIO_PCS_CLK_SOFT_RESET);
2784
2785 /* Fix up inter-pair skew failure */
ab3c759a
CML
2786 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2787 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2788 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2789 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2790}
2791
e4a1d846
CML
2792static void chv_pre_enable_dp(struct intel_encoder *encoder)
2793{
2794 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2795 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2796 struct drm_device *dev = encoder->base.dev;
2797 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2798 struct intel_crtc *intel_crtc =
2799 to_intel_crtc(encoder->base.crtc);
2800 enum dpio_channel ch = vlv_dport_to_channel(dport);
2801 int pipe = intel_crtc->pipe;
2e523e98 2802 int data, i, stagger;
949c1d43 2803 u32 val;
e4a1d846 2804
a580516d 2805 mutex_lock(&dev_priv->sb_lock);
949c1d43 2806
570e2a74
VS
2807 /* allow hardware to manage TX FIFO reset source */
2808 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2809 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2810 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2811
2812 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2813 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2814 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2815
949c1d43 2816 /* Deassert soft data lane reset*/
97fd4d5c 2817 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2818 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2819 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2820
2821 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2822 val |= CHV_PCS_REQ_SOFTRESET_EN;
2823 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2824
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2826 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2827 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2828
97fd4d5c 2829 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2830 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2831 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2832
2833 /* Program Tx lane latency optimal setting*/
e4a1d846 2834 for (i = 0; i < 4; i++) {
e4a1d846
CML
2835 /* Set the upar bit */
2836 data = (i == 1) ? 0x0 : 0x1;
2837 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2838 data << DPIO_UPAR_SHIFT);
2839 }
2840
2841 /* Data lane stagger programming */
2e523e98
VS
2842 if (intel_crtc->config->port_clock > 270000)
2843 stagger = 0x18;
2844 else if (intel_crtc->config->port_clock > 135000)
2845 stagger = 0xd;
2846 else if (intel_crtc->config->port_clock > 67500)
2847 stagger = 0x7;
2848 else if (intel_crtc->config->port_clock > 33750)
2849 stagger = 0x4;
2850 else
2851 stagger = 0x2;
2852
2853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2854 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2856
2857 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2858 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2860
2861 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2862 DPIO_LANESTAGGER_STRAP(stagger) |
2863 DPIO_LANESTAGGER_STRAP_OVRD |
2864 DPIO_TX1_STAGGER_MASK(0x1f) |
2865 DPIO_TX1_STAGGER_MULT(6) |
2866 DPIO_TX2_STAGGER_MULT(0));
2867
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2869 DPIO_LANESTAGGER_STRAP(stagger) |
2870 DPIO_LANESTAGGER_STRAP_OVRD |
2871 DPIO_TX1_STAGGER_MASK(0x1f) |
2872 DPIO_TX1_STAGGER_MULT(7) |
2873 DPIO_TX2_STAGGER_MULT(5));
e4a1d846 2874
a580516d 2875 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2876
e4a1d846 2877 intel_enable_dp(encoder);
e4a1d846
CML
2878}
2879
9197c88b
VS
2880static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2881{
2882 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2883 struct drm_device *dev = encoder->base.dev;
2884 struct drm_i915_private *dev_priv = dev->dev_private;
2885 struct intel_crtc *intel_crtc =
2886 to_intel_crtc(encoder->base.crtc);
2887 enum dpio_channel ch = vlv_dport_to_channel(dport);
2888 enum pipe pipe = intel_crtc->pipe;
2889 u32 val;
2890
625695f8
VS
2891 intel_dp_prepare(encoder);
2892
a580516d 2893 mutex_lock(&dev_priv->sb_lock);
9197c88b 2894
b9e5ac3c
VS
2895 /* program left/right clock distribution */
2896 if (pipe != PIPE_B) {
2897 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2898 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2899 if (ch == DPIO_CH0)
2900 val |= CHV_BUFLEFTENA1_FORCE;
2901 if (ch == DPIO_CH1)
2902 val |= CHV_BUFRIGHTENA1_FORCE;
2903 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2904 } else {
2905 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2906 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2907 if (ch == DPIO_CH0)
2908 val |= CHV_BUFLEFTENA2_FORCE;
2909 if (ch == DPIO_CH1)
2910 val |= CHV_BUFRIGHTENA2_FORCE;
2911 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2912 }
2913
9197c88b
VS
2914 /* program clock channel usage */
2915 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2916 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2917 if (pipe != PIPE_B)
2918 val &= ~CHV_PCS_USEDCLKCHANNEL;
2919 else
2920 val |= CHV_PCS_USEDCLKCHANNEL;
2921 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2922
2923 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2924 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2925 if (pipe != PIPE_B)
2926 val &= ~CHV_PCS_USEDCLKCHANNEL;
2927 else
2928 val |= CHV_PCS_USEDCLKCHANNEL;
2929 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2930
2931 /*
2932 * This a a bit weird since generally CL
2933 * matches the pipe, but here we need to
2934 * pick the CL based on the port.
2935 */
2936 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2937 if (pipe != PIPE_B)
2938 val &= ~CHV_CMN_USEDCLKCHANNEL;
2939 else
2940 val |= CHV_CMN_USEDCLKCHANNEL;
2941 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2942
a580516d 2943 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2944}
2945
d6db995f
VS
2946static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2947{
2948 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2949 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2950 u32 val;
2951
2952 mutex_lock(&dev_priv->sb_lock);
2953
2954 /* disable left/right clock distribution */
2955 if (pipe != PIPE_B) {
2956 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2957 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2958 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2959 } else {
2960 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2961 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2962 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2963 }
2964
2965 mutex_unlock(&dev_priv->sb_lock);
2966}
2967
a4fc5ed6 2968/*
df0c237d
JB
2969 * Native read with retry for link status and receiver capability reads for
2970 * cases where the sink may still be asleep.
9d1a1031
JN
2971 *
2972 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2973 * supposed to retry 3 times per the spec.
a4fc5ed6 2974 */
9d1a1031
JN
2975static ssize_t
2976intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2977 void *buffer, size_t size)
a4fc5ed6 2978{
9d1a1031
JN
2979 ssize_t ret;
2980 int i;
61da5fab 2981
f6a19066
VS
2982 /*
2983 * Sometime we just get the same incorrect byte repeated
2984 * over the entire buffer. Doing just one throw away read
2985 * initially seems to "solve" it.
2986 */
2987 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2988
61da5fab 2989 for (i = 0; i < 3; i++) {
9d1a1031
JN
2990 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2991 if (ret == size)
2992 return ret;
61da5fab
JB
2993 msleep(1);
2994 }
a4fc5ed6 2995
9d1a1031 2996 return ret;
a4fc5ed6
KP
2997}
2998
2999/*
3000 * Fetch AUX CH registers 0x202 - 0x207 which contain
3001 * link status information
3002 */
3003static bool
93f62dad 3004intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3005{
9d1a1031
JN
3006 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3007 DP_LANE0_1_STATUS,
3008 link_status,
3009 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3010}
3011
1100244e 3012/* These are source-specific values. */
a4fc5ed6 3013static uint8_t
1a2eb460 3014intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3015{
30add22d 3016 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3017 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3018 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3019
9314726b
VK
3020 if (IS_BROXTON(dev))
3021 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3022 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3023 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3024 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3025 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 3026 } else if (IS_VALLEYVIEW(dev))
bd60018a 3027 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3028 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3029 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3030 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3031 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3032 else
bd60018a 3033 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3034}
3035
3036static uint8_t
3037intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3038{
30add22d 3039 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3040 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3041
5a9d1f1a
DL
3042 if (INTEL_INFO(dev)->gen >= 9) {
3043 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3045 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3047 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3049 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3051 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3052 default:
3053 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3054 }
3055 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3056 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3062 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3064 default:
bd60018a 3065 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3066 }
e2fa6fba
P
3067 } else if (IS_VALLEYVIEW(dev)) {
3068 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3070 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3071 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3072 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3074 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3075 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3076 default:
bd60018a 3077 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3078 }
bc7d38a4 3079 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3080 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3082 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3084 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3085 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3086 default:
bd60018a 3087 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3088 }
3089 } else {
3090 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3092 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3093 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3094 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3096 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3097 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3098 default:
bd60018a 3099 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3100 }
a4fc5ed6
KP
3101 }
3102}
3103
5829975c 3104static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3105{
3106 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3107 struct drm_i915_private *dev_priv = dev->dev_private;
3108 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3109 struct intel_crtc *intel_crtc =
3110 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3111 unsigned long demph_reg_value, preemph_reg_value,
3112 uniqtranscale_reg_value;
3113 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3114 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3115 int pipe = intel_crtc->pipe;
e2fa6fba
P
3116
3117 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3118 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3119 preemph_reg_value = 0x0004000;
3120 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3122 demph_reg_value = 0x2B405555;
3123 uniqtranscale_reg_value = 0x552AB83A;
3124 break;
bd60018a 3125 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3126 demph_reg_value = 0x2B404040;
3127 uniqtranscale_reg_value = 0x5548B83A;
3128 break;
bd60018a 3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3130 demph_reg_value = 0x2B245555;
3131 uniqtranscale_reg_value = 0x5560B83A;
3132 break;
bd60018a 3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3134 demph_reg_value = 0x2B405555;
3135 uniqtranscale_reg_value = 0x5598DA3A;
3136 break;
3137 default:
3138 return 0;
3139 }
3140 break;
bd60018a 3141 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3142 preemph_reg_value = 0x0002000;
3143 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3145 demph_reg_value = 0x2B404040;
3146 uniqtranscale_reg_value = 0x5552B83A;
3147 break;
bd60018a 3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3149 demph_reg_value = 0x2B404848;
3150 uniqtranscale_reg_value = 0x5580B83A;
3151 break;
bd60018a 3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3153 demph_reg_value = 0x2B404040;
3154 uniqtranscale_reg_value = 0x55ADDA3A;
3155 break;
3156 default:
3157 return 0;
3158 }
3159 break;
bd60018a 3160 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3161 preemph_reg_value = 0x0000000;
3162 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3164 demph_reg_value = 0x2B305555;
3165 uniqtranscale_reg_value = 0x5570B83A;
3166 break;
bd60018a 3167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3168 demph_reg_value = 0x2B2B4040;
3169 uniqtranscale_reg_value = 0x55ADDA3A;
3170 break;
3171 default:
3172 return 0;
3173 }
3174 break;
bd60018a 3175 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3176 preemph_reg_value = 0x0006000;
3177 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3179 demph_reg_value = 0x1B405555;
3180 uniqtranscale_reg_value = 0x55ADDA3A;
3181 break;
3182 default:
3183 return 0;
3184 }
3185 break;
3186 default:
3187 return 0;
3188 }
3189
a580516d 3190 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3191 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3192 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3193 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3194 uniqtranscale_reg_value);
ab3c759a
CML
3195 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3196 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3197 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3198 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3199 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3200
3201 return 0;
3202}
3203
67fa24b4
VS
3204static bool chv_need_uniq_trans_scale(uint8_t train_set)
3205{
3206 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3207 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3208}
3209
5829975c 3210static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3211{
3212 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3213 struct drm_i915_private *dev_priv = dev->dev_private;
3214 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3215 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3216 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3217 uint8_t train_set = intel_dp->train_set[0];
3218 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3219 enum pipe pipe = intel_crtc->pipe;
3220 int i;
e4a1d846
CML
3221
3222 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3223 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3224 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3226 deemph_reg_value = 128;
3227 margin_reg_value = 52;
3228 break;
bd60018a 3229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3230 deemph_reg_value = 128;
3231 margin_reg_value = 77;
3232 break;
bd60018a 3233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3234 deemph_reg_value = 128;
3235 margin_reg_value = 102;
3236 break;
bd60018a 3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3238 deemph_reg_value = 128;
3239 margin_reg_value = 154;
3240 /* FIXME extra to set for 1200 */
3241 break;
3242 default:
3243 return 0;
3244 }
3245 break;
bd60018a 3246 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3247 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3248 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3249 deemph_reg_value = 85;
3250 margin_reg_value = 78;
3251 break;
bd60018a 3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3253 deemph_reg_value = 85;
3254 margin_reg_value = 116;
3255 break;
bd60018a 3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3257 deemph_reg_value = 85;
3258 margin_reg_value = 154;
3259 break;
3260 default:
3261 return 0;
3262 }
3263 break;
bd60018a 3264 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3265 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3267 deemph_reg_value = 64;
3268 margin_reg_value = 104;
3269 break;
bd60018a 3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3271 deemph_reg_value = 64;
3272 margin_reg_value = 154;
3273 break;
3274 default:
3275 return 0;
3276 }
3277 break;
bd60018a 3278 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3279 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3281 deemph_reg_value = 43;
3282 margin_reg_value = 154;
3283 break;
3284 default:
3285 return 0;
3286 }
3287 break;
3288 default:
3289 return 0;
3290 }
3291
a580516d 3292 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3293
3294 /* Clear calc init */
1966e59e
VS
3295 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3296 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3297 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3298 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3299 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3300
3301 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3302 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3303 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3304 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3305 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3306
a02ef3c7
VS
3307 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3308 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3309 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3310 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3311
3312 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3313 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3314 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3315 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3316
e4a1d846 3317 /* Program swing deemph */
f72df8db
VS
3318 for (i = 0; i < 4; i++) {
3319 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3320 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3321 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3322 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3323 }
e4a1d846
CML
3324
3325 /* Program swing margin */
f72df8db
VS
3326 for (i = 0; i < 4; i++) {
3327 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3328
1fb44505
VS
3329 val &= ~DPIO_SWING_MARGIN000_MASK;
3330 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3331
3332 /*
3333 * Supposedly this value shouldn't matter when unique transition
3334 * scale is disabled, but in fact it does matter. Let's just
3335 * always program the same value and hope it's OK.
3336 */
3337 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3338 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3339
f72df8db
VS
3340 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3341 }
e4a1d846 3342
67fa24b4
VS
3343 /*
3344 * The document said it needs to set bit 27 for ch0 and bit 26
3345 * for ch1. Might be a typo in the doc.
3346 * For now, for this unique transition scale selection, set bit
3347 * 27 for ch0 and ch1.
3348 */
f72df8db
VS
3349 for (i = 0; i < 4; i++) {
3350 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3351 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3352 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3353 else
3354 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3355 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3356 }
3357
3358 /* Start swing calculation */
1966e59e
VS
3359 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3360 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3361 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3362
3363 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3364 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3365 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3366
3367 /* LRC Bypass */
3368 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3369 val |= DPIO_LRC_BYPASS;
3370 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3371
a580516d 3372 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3373
3374 return 0;
3375}
3376
a4fc5ed6 3377static void
0301b3ac
JN
3378intel_get_adjust_train(struct intel_dp *intel_dp,
3379 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3380{
3381 uint8_t v = 0;
3382 uint8_t p = 0;
3383 int lane;
1a2eb460
KP
3384 uint8_t voltage_max;
3385 uint8_t preemph_max;
a4fc5ed6 3386
901c2daf 3387 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3388 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3389 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3390
3391 if (this_v > v)
3392 v = this_v;
3393 if (this_p > p)
3394 p = this_p;
3395 }
3396
1a2eb460 3397 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3398 if (v >= voltage_max)
3399 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3400
1a2eb460
KP
3401 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3402 if (p >= preemph_max)
3403 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3404
3405 for (lane = 0; lane < 4; lane++)
33a34e4e 3406 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3407}
3408
3409static uint32_t
5829975c 3410gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3411{
3cf2efb1 3412 uint32_t signal_levels = 0;
a4fc5ed6 3413
3cf2efb1 3414 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3416 default:
3417 signal_levels |= DP_VOLTAGE_0_4;
3418 break;
bd60018a 3419 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3420 signal_levels |= DP_VOLTAGE_0_6;
3421 break;
bd60018a 3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3423 signal_levels |= DP_VOLTAGE_0_8;
3424 break;
bd60018a 3425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3426 signal_levels |= DP_VOLTAGE_1_2;
3427 break;
3428 }
3cf2efb1 3429 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3430 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3431 default:
3432 signal_levels |= DP_PRE_EMPHASIS_0;
3433 break;
bd60018a 3434 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3435 signal_levels |= DP_PRE_EMPHASIS_3_5;
3436 break;
bd60018a 3437 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3438 signal_levels |= DP_PRE_EMPHASIS_6;
3439 break;
bd60018a 3440 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3441 signal_levels |= DP_PRE_EMPHASIS_9_5;
3442 break;
3443 }
3444 return signal_levels;
3445}
3446
e3421a18
ZW
3447/* Gen6's DP voltage swing and pre-emphasis control */
3448static uint32_t
5829975c 3449gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3450{
3c5a62b5
YL
3451 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3452 DP_TRAIN_PRE_EMPHASIS_MASK);
3453 switch (signal_levels) {
bd60018a
SJ
3454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3456 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3458 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3461 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3464 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3467 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3468 default:
3c5a62b5
YL
3469 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3470 "0x%x\n", signal_levels);
3471 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3472 }
3473}
3474
1a2eb460
KP
3475/* Gen7's DP voltage swing and pre-emphasis control */
3476static uint32_t
5829975c 3477gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3478{
3479 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3480 DP_TRAIN_PRE_EMPHASIS_MASK);
3481 switch (signal_levels) {
bd60018a 3482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3483 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3485 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3487 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3488
bd60018a 3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3490 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3492 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3493
bd60018a 3494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3495 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3496 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3497 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3498
3499 default:
3500 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3501 "0x%x\n", signal_levels);
3502 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3503 }
3504}
3505
f0a3424e
PZ
3506/* Properly updates "DP" with the correct signal levels. */
3507static void
3508intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3509{
3510 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3511 enum port port = intel_dig_port->port;
f0a3424e 3512 struct drm_device *dev = intel_dig_port->base.base.dev;
f8896f5d 3513 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3514 uint8_t train_set = intel_dp->train_set[0];
3515
f8896f5d
DW
3516 if (HAS_DDI(dev)) {
3517 signal_levels = ddi_signal_levels(intel_dp);
3518
3519 if (IS_BROXTON(dev))
3520 signal_levels = 0;
3521 else
3522 mask = DDI_BUF_EMP_MASK;
e4a1d846 3523 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3524 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3525 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3526 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3527 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3528 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3529 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3530 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3531 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3532 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3533 } else {
5829975c 3534 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3535 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3536 }
3537
96fb9f9b
VK
3538 if (mask)
3539 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3540
3541 DRM_DEBUG_KMS("Using vswing level %d\n",
3542 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3543 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3544 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3545 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3546
3547 *DP = (*DP & ~mask) | signal_levels;
3548}
3549
a4fc5ed6 3550static bool
ea5b213a 3551intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3552 uint32_t *DP,
58e10eb9 3553 uint8_t dp_train_pat)
a4fc5ed6 3554{
174edf1f 3555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3556 struct drm_i915_private *dev_priv =
3557 to_i915(intel_dig_port->base.base.dev);
2cdfe6c8
JN
3558 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3559 int ret, len;
a4fc5ed6 3560
7b13b58a 3561 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3562
70aff66c 3563 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3564 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3565
2cdfe6c8
JN
3566 buf[0] = dp_train_pat;
3567 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3568 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3569 /* don't write DP_TRAINING_LANEx_SET on disable */
3570 len = 1;
3571 } else {
3572 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
901c2daf
VS
3573 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3574 len = intel_dp->lane_count + 1;
47ea7542 3575 }
a4fc5ed6 3576
9d1a1031
JN
3577 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3578 buf, len);
2cdfe6c8
JN
3579
3580 return ret == len;
a4fc5ed6
KP
3581}
3582
70aff66c
JN
3583static bool
3584intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3585 uint8_t dp_train_pat)
3586{
4e96c977
MK
3587 if (!intel_dp->train_set_valid)
3588 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3589 intel_dp_set_signal_levels(intel_dp, DP);
3590 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3591}
3592
3593static bool
3594intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3595 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3596{
3597 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3598 struct drm_i915_private *dev_priv =
3599 to_i915(intel_dig_port->base.base.dev);
70aff66c
JN
3600 int ret;
3601
3602 intel_get_adjust_train(intel_dp, link_status);
3603 intel_dp_set_signal_levels(intel_dp, DP);
3604
3605 I915_WRITE(intel_dp->output_reg, *DP);
3606 POSTING_READ(intel_dp->output_reg);
3607
9d1a1031 3608 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
901c2daf 3609 intel_dp->train_set, intel_dp->lane_count);
70aff66c 3610
901c2daf 3611 return ret == intel_dp->lane_count;
70aff66c
JN
3612}
3613
3ab9c637
ID
3614static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3615{
3616 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3617 struct drm_device *dev = intel_dig_port->base.base.dev;
3618 struct drm_i915_private *dev_priv = dev->dev_private;
3619 enum port port = intel_dig_port->port;
3620 uint32_t val;
3621
3622 if (!HAS_DDI(dev))
3623 return;
3624
3625 val = I915_READ(DP_TP_CTL(port));
3626 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3627 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3628 I915_WRITE(DP_TP_CTL(port), val);
3629
3630 /*
3631 * On PORT_A we can have only eDP in SST mode. There the only reason
3632 * we need to set idle transmission mode is to work around a HW issue
3633 * where we enable the pipe while not in idle link-training mode.
3634 * In this case there is requirement to wait for a minimum number of
3635 * idle patterns to be sent.
3636 */
3637 if (port == PORT_A)
3638 return;
3639
3640 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3641 1))
3642 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3643}
3644
33a34e4e 3645/* Enable corresponding port and start training pattern 1 */
c19b0669 3646void
33a34e4e 3647intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3648{
da63a9f2 3649 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3650 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3651 int i;
3652 uint8_t voltage;
cdb0e95b 3653 int voltage_tries, loop_tries;
ea5b213a 3654 uint32_t DP = intel_dp->DP;
6aba5b6c 3655 uint8_t link_config[2];
04a60f9f 3656 uint8_t link_bw, rate_select;
a4fc5ed6 3657
affa9354 3658 if (HAS_DDI(dev))
c19b0669
PZ
3659 intel_ddi_prepare_link_retrain(encoder);
3660
901c2daf 3661 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
04a60f9f
VS
3662 &link_bw, &rate_select);
3663
3cf2efb1 3664 /* Write the link configuration data */
04a60f9f 3665 link_config[0] = link_bw;
901c2daf 3666 link_config[1] = intel_dp->lane_count;
6aba5b6c
JN
3667 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3668 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3669 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3670 if (intel_dp->num_sink_rates)
a8f3ef61 3671 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
04a60f9f 3672 &rate_select, 1);
6aba5b6c
JN
3673
3674 link_config[0] = 0;
3675 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3676 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3677
3678 DP |= DP_PORT_EN;
1a2eb460 3679
70aff66c
JN
3680 /* clock recovery */
3681 if (!intel_dp_reset_link_train(intel_dp, &DP,
3682 DP_TRAINING_PATTERN_1 |
3683 DP_LINK_SCRAMBLING_DISABLE)) {
3684 DRM_ERROR("failed to enable link training\n");
3685 return;
3686 }
3687
a4fc5ed6 3688 voltage = 0xff;
cdb0e95b
KP
3689 voltage_tries = 0;
3690 loop_tries = 0;
a4fc5ed6 3691 for (;;) {
70aff66c 3692 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3693
a7c9655f 3694 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3695 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3696 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3697 break;
93f62dad 3698 }
a4fc5ed6 3699
901c2daf 3700 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3701 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3702 break;
3703 }
3704
4e96c977
MK
3705 /*
3706 * if we used previously trained voltage and pre-emphasis values
3707 * and we don't get clock recovery, reset link training values
3708 */
3709 if (intel_dp->train_set_valid) {
3710 DRM_DEBUG_KMS("clock recovery not ok, reset");
3711 /* clear the flag as we are not reusing train set */
3712 intel_dp->train_set_valid = false;
3713 if (!intel_dp_reset_link_train(intel_dp, &DP,
3714 DP_TRAINING_PATTERN_1 |
3715 DP_LINK_SCRAMBLING_DISABLE)) {
3716 DRM_ERROR("failed to enable link training\n");
3717 return;
3718 }
3719 continue;
3720 }
3721
3cf2efb1 3722 /* Check to see if we've tried the max voltage */
901c2daf 3723 for (i = 0; i < intel_dp->lane_count; i++)
3cf2efb1 3724 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3725 break;
901c2daf 3726 if (i == intel_dp->lane_count) {
b06fbda3
DV
3727 ++loop_tries;
3728 if (loop_tries == 5) {
3def84b3 3729 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3730 break;
3731 }
70aff66c
JN
3732 intel_dp_reset_link_train(intel_dp, &DP,
3733 DP_TRAINING_PATTERN_1 |
3734 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3735 voltage_tries = 0;
3736 continue;
3737 }
a4fc5ed6 3738
3cf2efb1 3739 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3740 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3741 ++voltage_tries;
b06fbda3 3742 if (voltage_tries == 5) {
3def84b3 3743 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3744 break;
3745 }
3746 } else
3747 voltage_tries = 0;
3748 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3749
70aff66c
JN
3750 /* Update training set as requested by target */
3751 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3752 DRM_ERROR("failed to update link training\n");
3753 break;
3754 }
a4fc5ed6
KP
3755 }
3756
33a34e4e
JB
3757 intel_dp->DP = DP;
3758}
3759
c19b0669 3760void
33a34e4e
JB
3761intel_dp_complete_link_train(struct intel_dp *intel_dp)
3762{
33a34e4e 3763 bool channel_eq = false;
37f80975 3764 int tries, cr_tries;
33a34e4e 3765 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3766 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3767
a79b8165 3768 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
901c2daf 3769 if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
06ea66b6 3770 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3771
a4fc5ed6 3772 /* channel equalization */
70aff66c 3773 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3774 training_pattern |
70aff66c
JN
3775 DP_LINK_SCRAMBLING_DISABLE)) {
3776 DRM_ERROR("failed to start channel equalization\n");
3777 return;
3778 }
3779
a4fc5ed6 3780 tries = 0;
37f80975 3781 cr_tries = 0;
a4fc5ed6
KP
3782 channel_eq = false;
3783 for (;;) {
70aff66c 3784 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3785
37f80975
JB
3786 if (cr_tries > 5) {
3787 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3788 break;
3789 }
3790
a7c9655f 3791 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3792 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3793 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3794 break;
70aff66c 3795 }
a4fc5ed6 3796
37f80975 3797 /* Make sure clock is still ok */
90a6b7b0 3798 if (!drm_dp_clock_recovery_ok(link_status,
901c2daf 3799 intel_dp->lane_count)) {
4e96c977 3800 intel_dp->train_set_valid = false;
37f80975 3801 intel_dp_start_link_train(intel_dp);
70aff66c 3802 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3803 training_pattern |
70aff66c 3804 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3805 cr_tries++;
3806 continue;
3807 }
3808
90a6b7b0 3809 if (drm_dp_channel_eq_ok(link_status,
901c2daf 3810 intel_dp->lane_count)) {
3cf2efb1
CW
3811 channel_eq = true;
3812 break;
3813 }
a4fc5ed6 3814
37f80975
JB
3815 /* Try 5 times, then try clock recovery if that fails */
3816 if (tries > 5) {
4e96c977 3817 intel_dp->train_set_valid = false;
37f80975 3818 intel_dp_start_link_train(intel_dp);
70aff66c 3819 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3820 training_pattern |
70aff66c 3821 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3822 tries = 0;
3823 cr_tries++;
3824 continue;
3825 }
a4fc5ed6 3826
70aff66c
JN
3827 /* Update training set as requested by target */
3828 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3829 DRM_ERROR("failed to update link training\n");
3830 break;
3831 }
3cf2efb1 3832 ++tries;
869184a6 3833 }
3cf2efb1 3834
3ab9c637
ID
3835 intel_dp_set_idle_link_train(intel_dp);
3836
3837 intel_dp->DP = DP;
3838
4e96c977 3839 if (channel_eq) {
5fa836a9 3840 intel_dp->train_set_valid = true;
07f42258 3841 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3842 }
3ab9c637
ID
3843}
3844
3845void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3846{
70aff66c 3847 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3848 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3849}
3850
3851static void
ea5b213a 3852intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3853{
da63a9f2 3854 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3855 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3856 enum port port = intel_dig_port->port;
da63a9f2 3857 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3858 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3859 uint32_t DP = intel_dp->DP;
a4fc5ed6 3860
bc76e320 3861 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3862 return;
3863
0c33d8d7 3864 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3865 return;
3866
28c97730 3867 DRM_DEBUG_KMS("\n");
32f9d658 3868
39e5fa88
VS
3869 if ((IS_GEN7(dev) && port == PORT_A) ||
3870 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3871 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3872 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3873 } else {
aad3d14d
VS
3874 if (IS_CHERRYVIEW(dev))
3875 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3876 else
3877 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3878 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3879 }
1612c8bd 3880 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3881 POSTING_READ(intel_dp->output_reg);
5eb08b69 3882
1612c8bd
VS
3883 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3884 I915_WRITE(intel_dp->output_reg, DP);
3885 POSTING_READ(intel_dp->output_reg);
3886
3887 /*
3888 * HW workaround for IBX, we need to move the port
3889 * to transcoder A after disabling it to allow the
3890 * matching HDMI port to be enabled on transcoder A.
3891 */
3892 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3893 /* always enable with pattern 1 (as per spec) */
3894 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3895 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3896 I915_WRITE(intel_dp->output_reg, DP);
3897 POSTING_READ(intel_dp->output_reg);
3898
3899 DP &= ~DP_PORT_EN;
5bddd17f 3900 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3901 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3902 }
3903
f01eca2e 3904 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3905}
3906
26d61aad
KP
3907static bool
3908intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3909{
a031d709
RV
3910 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3911 struct drm_device *dev = dig_port->base.base.dev;
3912 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3913 uint8_t rev;
a031d709 3914
9d1a1031
JN
3915 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3916 sizeof(intel_dp->dpcd)) < 0)
edb39244 3917 return false; /* aux transfer failed */
92fd8fd1 3918
a8e98153 3919 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3920
edb39244
AJ
3921 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3922 return false; /* DPCD not present */
3923
2293bb5c
SK
3924 /* Check if the panel supports PSR */
3925 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3926 if (is_edp(intel_dp)) {
9d1a1031
JN
3927 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3928 intel_dp->psr_dpcd,
3929 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3930 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3931 dev_priv->psr.sink_support = true;
50003939 3932 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3933 }
474d1ec4
SJ
3934
3935 if (INTEL_INFO(dev)->gen >= 9 &&
3936 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3937 uint8_t frame_sync_cap;
3938
3939 dev_priv->psr.sink_support = true;
3940 intel_dp_dpcd_read_wake(&intel_dp->aux,
3941 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3942 &frame_sync_cap, 1);
3943 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3944 /* PSR2 needs frame sync as well */
3945 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3946 DRM_DEBUG_KMS("PSR2 %s on sink",
3947 dev_priv->psr.psr2_support ? "supported" : "not supported");
3948 }
50003939
JN
3949 }
3950
7809a611 3951 /* Training Pattern 3 support, both source and sink */
06ea66b6 3952 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3953 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3954 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3955 intel_dp->use_tps3 = true;
f8d8a672 3956 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3957 } else
3958 intel_dp->use_tps3 = false;
3959
fc0f8e25
SJ
3960 /* Intermediate frequency support */
3961 if (is_edp(intel_dp) &&
3962 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3963 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3964 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3965 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3966 int i;
3967
fc0f8e25
SJ
3968 intel_dp_dpcd_read_wake(&intel_dp->aux,
3969 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3970 sink_rates,
3971 sizeof(sink_rates));
ea2d8a42 3972
94ca719e
VS
3973 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3974 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3975
3976 if (val == 0)
3977 break;
3978
af77b974
SJ
3979 /* Value read is in kHz while drm clock is saved in deca-kHz */
3980 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3981 }
94ca719e 3982 intel_dp->num_sink_rates = i;
fc0f8e25 3983 }
0336400e
VS
3984
3985 intel_dp_print_rates(intel_dp);
3986
edb39244
AJ
3987 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3988 DP_DWN_STRM_PORT_PRESENT))
3989 return true; /* native DP sink */
3990
3991 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3992 return true; /* no per-port downstream info */
3993
9d1a1031
JN
3994 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3995 intel_dp->downstream_ports,
3996 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3997 return false; /* downstream port status fetch failed */
3998
3999 return true;
92fd8fd1
KP
4000}
4001
0d198328
AJ
4002static void
4003intel_dp_probe_oui(struct intel_dp *intel_dp)
4004{
4005 u8 buf[3];
4006
4007 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4008 return;
4009
9d1a1031 4010 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
4011 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4012 buf[0], buf[1], buf[2]);
4013
9d1a1031 4014 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
4015 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4016 buf[0], buf[1], buf[2]);
4017}
4018
0e32b39c
DA
4019static bool
4020intel_dp_probe_mst(struct intel_dp *intel_dp)
4021{
4022 u8 buf[1];
4023
4024 if (!intel_dp->can_mst)
4025 return false;
4026
4027 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4028 return false;
4029
0e32b39c
DA
4030 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4031 if (buf[0] & DP_MST_CAP) {
4032 DRM_DEBUG_KMS("Sink is MST capable\n");
4033 intel_dp->is_mst = true;
4034 } else {
4035 DRM_DEBUG_KMS("Sink is not MST capable\n");
4036 intel_dp->is_mst = false;
4037 }
4038 }
0e32b39c
DA
4039
4040 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4041 return intel_dp->is_mst;
4042}
4043
e5a1cab5 4044static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 4045{
082dcc7c
RV
4046 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4047 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 4048 u8 buf;
e5a1cab5 4049 int ret = 0;
d2e216d0 4050
082dcc7c
RV
4051 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4052 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4053 ret = -EIO;
4054 goto out;
4373f0f2
PZ
4055 }
4056
082dcc7c 4057 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4058 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4059 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4060 ret = -EIO;
4061 goto out;
4062 }
d2e216d0 4063
621d4c76 4064 intel_dp->sink_crc.started = false;
e5a1cab5 4065 out:
082dcc7c 4066 hsw_enable_ips(intel_crtc);
e5a1cab5 4067 return ret;
082dcc7c
RV
4068}
4069
4070static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4071{
4072 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4073 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4074 u8 buf;
e5a1cab5
RV
4075 int ret;
4076
621d4c76 4077 if (intel_dp->sink_crc.started) {
e5a1cab5
RV
4078 ret = intel_dp_sink_crc_stop(intel_dp);
4079 if (ret)
4080 return ret;
4081 }
082dcc7c
RV
4082
4083 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4084 return -EIO;
4085
4086 if (!(buf & DP_TEST_CRC_SUPPORTED))
4087 return -ENOTTY;
4088
621d4c76
RV
4089 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4090
082dcc7c
RV
4091 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4092 return -EIO;
4093
4094 hsw_disable_ips(intel_crtc);
1dda5f93 4095
9d1a1031 4096 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4097 buf | DP_TEST_SINK_START) < 0) {
4098 hsw_enable_ips(intel_crtc);
4099 return -EIO;
4373f0f2
PZ
4100 }
4101
621d4c76 4102 intel_dp->sink_crc.started = true;
082dcc7c
RV
4103 return 0;
4104}
4105
4106int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4107{
4108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4109 struct drm_device *dev = dig_port->base.base.dev;
4110 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4111 u8 buf;
621d4c76 4112 int count, ret;
082dcc7c 4113 int attempts = 6;
aabc95dc 4114 bool old_equal_new;
082dcc7c
RV
4115
4116 ret = intel_dp_sink_crc_start(intel_dp);
4117 if (ret)
4118 return ret;
4119
ad9dc91b 4120 do {
621d4c76
RV
4121 intel_wait_for_vblank(dev, intel_crtc->pipe);
4122
1dda5f93 4123 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4124 DP_TEST_SINK_MISC, &buf) < 0) {
4125 ret = -EIO;
afe0d67e 4126 goto stop;
4373f0f2 4127 }
621d4c76 4128 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4129
621d4c76
RV
4130 /*
4131 * Count might be reset during the loop. In this case
4132 * last known count needs to be reset as well.
4133 */
4134 if (count == 0)
4135 intel_dp->sink_crc.last_count = 0;
4136
4137 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4138 ret = -EIO;
4139 goto stop;
4140 }
aabc95dc
RV
4141
4142 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4143 !memcmp(intel_dp->sink_crc.last_crc, crc,
4144 6 * sizeof(u8)));
4145
4146 } while (--attempts && (count == 0 || old_equal_new));
621d4c76
RV
4147
4148 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4149 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
ad9dc91b
RV
4150
4151 if (attempts == 0) {
aabc95dc
RV
4152 if (old_equal_new) {
4153 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4154 } else {
4155 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4156 ret = -ETIMEDOUT;
4157 goto stop;
4158 }
ad9dc91b 4159 }
d2e216d0 4160
afe0d67e 4161stop:
082dcc7c 4162 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4163 return ret;
d2e216d0
RV
4164}
4165
a60f0e38
JB
4166static bool
4167intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4168{
9d1a1031
JN
4169 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4170 DP_DEVICE_SERVICE_IRQ_VECTOR,
4171 sink_irq_vector, 1) == 1;
a60f0e38
JB
4172}
4173
0e32b39c
DA
4174static bool
4175intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4176{
4177 int ret;
4178
4179 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4180 DP_SINK_COUNT_ESI,
4181 sink_irq_vector, 14);
4182 if (ret != 14)
4183 return false;
4184
4185 return true;
4186}
4187
c5d5ab7a
TP
4188static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4189{
4190 uint8_t test_result = DP_TEST_ACK;
4191 return test_result;
4192}
4193
4194static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4195{
4196 uint8_t test_result = DP_TEST_NAK;
4197 return test_result;
4198}
4199
4200static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4201{
c5d5ab7a 4202 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4203 struct intel_connector *intel_connector = intel_dp->attached_connector;
4204 struct drm_connector *connector = &intel_connector->base;
4205
4206 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4207 connector->edid_corrupt ||
559be30c
TP
4208 intel_dp->aux.i2c_defer_count > 6) {
4209 /* Check EDID read for NACKs, DEFERs and corruption
4210 * (DP CTS 1.2 Core r1.1)
4211 * 4.2.2.4 : Failed EDID read, I2C_NAK
4212 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4213 * 4.2.2.6 : EDID corruption detected
4214 * Use failsafe mode for all cases
4215 */
4216 if (intel_dp->aux.i2c_nack_count > 0 ||
4217 intel_dp->aux.i2c_defer_count > 0)
4218 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4219 intel_dp->aux.i2c_nack_count,
4220 intel_dp->aux.i2c_defer_count);
4221 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4222 } else {
f79b468e
TS
4223 struct edid *block = intel_connector->detect_edid;
4224
4225 /* We have to write the checksum
4226 * of the last block read
4227 */
4228 block += intel_connector->detect_edid->extensions;
4229
559be30c
TP
4230 if (!drm_dp_dpcd_write(&intel_dp->aux,
4231 DP_TEST_EDID_CHECKSUM,
f79b468e 4232 &block->checksum,
5a1cc655 4233 1))
559be30c
TP
4234 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4235
4236 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4237 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4238 }
4239
4240 /* Set test active flag here so userspace doesn't interrupt things */
4241 intel_dp->compliance_test_active = 1;
4242
c5d5ab7a
TP
4243 return test_result;
4244}
4245
4246static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4247{
c5d5ab7a
TP
4248 uint8_t test_result = DP_TEST_NAK;
4249 return test_result;
4250}
4251
4252static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4253{
4254 uint8_t response = DP_TEST_NAK;
4255 uint8_t rxdata = 0;
4256 int status = 0;
4257
559be30c 4258 intel_dp->compliance_test_active = 0;
c5d5ab7a 4259 intel_dp->compliance_test_type = 0;
559be30c
TP
4260 intel_dp->compliance_test_data = 0;
4261
c5d5ab7a
TP
4262 intel_dp->aux.i2c_nack_count = 0;
4263 intel_dp->aux.i2c_defer_count = 0;
4264
4265 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4266 if (status <= 0) {
4267 DRM_DEBUG_KMS("Could not read test request from sink\n");
4268 goto update_status;
4269 }
4270
4271 switch (rxdata) {
4272 case DP_TEST_LINK_TRAINING:
4273 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4274 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4275 response = intel_dp_autotest_link_training(intel_dp);
4276 break;
4277 case DP_TEST_LINK_VIDEO_PATTERN:
4278 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4279 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4280 response = intel_dp_autotest_video_pattern(intel_dp);
4281 break;
4282 case DP_TEST_LINK_EDID_READ:
4283 DRM_DEBUG_KMS("EDID test requested\n");
4284 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4285 response = intel_dp_autotest_edid(intel_dp);
4286 break;
4287 case DP_TEST_LINK_PHY_TEST_PATTERN:
4288 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4289 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4290 response = intel_dp_autotest_phy_pattern(intel_dp);
4291 break;
4292 default:
4293 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4294 break;
4295 }
4296
4297update_status:
4298 status = drm_dp_dpcd_write(&intel_dp->aux,
4299 DP_TEST_RESPONSE,
4300 &response, 1);
4301 if (status <= 0)
4302 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4303}
4304
0e32b39c
DA
4305static int
4306intel_dp_check_mst_status(struct intel_dp *intel_dp)
4307{
4308 bool bret;
4309
4310 if (intel_dp->is_mst) {
4311 u8 esi[16] = { 0 };
4312 int ret = 0;
4313 int retry;
4314 bool handled;
4315 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4316go_again:
4317 if (bret == true) {
4318
4319 /* check link status - esi[10] = 0x200c */
90a6b7b0 4320 if (intel_dp->active_mst_links &&
901c2daf 4321 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4322 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4323 intel_dp_start_link_train(intel_dp);
4324 intel_dp_complete_link_train(intel_dp);
4325 intel_dp_stop_link_train(intel_dp);
4326 }
4327
6f34cc39 4328 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4329 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4330
4331 if (handled) {
4332 for (retry = 0; retry < 3; retry++) {
4333 int wret;
4334 wret = drm_dp_dpcd_write(&intel_dp->aux,
4335 DP_SINK_COUNT_ESI+1,
4336 &esi[1], 3);
4337 if (wret == 3) {
4338 break;
4339 }
4340 }
4341
4342 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4343 if (bret == true) {
6f34cc39 4344 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4345 goto go_again;
4346 }
4347 } else
4348 ret = 0;
4349
4350 return ret;
4351 } else {
4352 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4353 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4354 intel_dp->is_mst = false;
4355 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4356 /* send a hotplug event */
4357 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4358 }
4359 }
4360 return -EINVAL;
4361}
4362
a4fc5ed6
KP
4363/*
4364 * According to DP spec
4365 * 5.1.2:
4366 * 1. Read DPCD
4367 * 2. Configure link according to Receiver Capabilities
4368 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4369 * 4. Check link status on receipt of hot-plug interrupt
4370 */
a5146200 4371static void
ea5b213a 4372intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4373{
5b215bcf 4374 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4375 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4376 u8 sink_irq_vector;
93f62dad 4377 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4378
5b215bcf
DA
4379 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4380
e02f9a06 4381 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4382 return;
4383
1a125d8a
ID
4384 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4385 return;
4386
92fd8fd1 4387 /* Try to read receiver status if the link appears to be up */
93f62dad 4388 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4389 return;
4390 }
4391
92fd8fd1 4392 /* Now read the DPCD to see if it's actually running */
26d61aad 4393 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4394 return;
4395 }
4396
a60f0e38
JB
4397 /* Try to read the source of the interrupt */
4398 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4399 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4400 /* Clear interrupt source */
9d1a1031
JN
4401 drm_dp_dpcd_writeb(&intel_dp->aux,
4402 DP_DEVICE_SERVICE_IRQ_VECTOR,
4403 sink_irq_vector);
a60f0e38
JB
4404
4405 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4406 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4407 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4408 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4409 }
4410
901c2daf 4411 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4412 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4413 intel_encoder->base.name);
33a34e4e
JB
4414 intel_dp_start_link_train(intel_dp);
4415 intel_dp_complete_link_train(intel_dp);
3ab9c637 4416 intel_dp_stop_link_train(intel_dp);
33a34e4e 4417 }
a4fc5ed6 4418}
a4fc5ed6 4419
caf9ab24 4420/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4421static enum drm_connector_status
26d61aad 4422intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4423{
caf9ab24 4424 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4425 uint8_t type;
4426
4427 if (!intel_dp_get_dpcd(intel_dp))
4428 return connector_status_disconnected;
4429
4430 /* if there's no downstream port, we're done */
4431 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4432 return connector_status_connected;
caf9ab24
AJ
4433
4434 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4435 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4436 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4437 uint8_t reg;
9d1a1031
JN
4438
4439 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4440 &reg, 1) < 0)
caf9ab24 4441 return connector_status_unknown;
9d1a1031 4442
23235177
AJ
4443 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4444 : connector_status_disconnected;
caf9ab24
AJ
4445 }
4446
4447 /* If no HPD, poke DDC gently */
0b99836f 4448 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4449 return connector_status_connected;
caf9ab24
AJ
4450
4451 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4452 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4453 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4454 if (type == DP_DS_PORT_TYPE_VGA ||
4455 type == DP_DS_PORT_TYPE_NON_EDID)
4456 return connector_status_unknown;
4457 } else {
4458 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4459 DP_DWN_STRM_PORT_TYPE_MASK;
4460 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4461 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4462 return connector_status_unknown;
4463 }
caf9ab24
AJ
4464
4465 /* Anything else is out of spec, warn and ignore */
4466 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4467 return connector_status_disconnected;
71ba9000
AJ
4468}
4469
d410b56d
CW
4470static enum drm_connector_status
4471edp_detect(struct intel_dp *intel_dp)
4472{
4473 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4474 enum drm_connector_status status;
4475
4476 status = intel_panel_detect(dev);
4477 if (status == connector_status_unknown)
4478 status = connector_status_connected;
4479
4480 return status;
4481}
4482
b93433cc
JN
4483static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4484 struct intel_digital_port *port)
5eb08b69 4485{
b93433cc 4486 u32 bit;
01cb9ea6 4487
b93433cc
JN
4488 if (HAS_PCH_IBX(dev_priv->dev)) {
4489 switch (port->port) {
196cabd4
JN
4490 case PORT_A:
4491 return true;
b93433cc
JN
4492 case PORT_B:
4493 bit = SDE_PORTB_HOTPLUG;
4494 break;
4495 case PORT_C:
4496 bit = SDE_PORTC_HOTPLUG;
4497 break;
4498 case PORT_D:
4499 bit = SDE_PORTD_HOTPLUG;
4500 break;
4501 default:
196cabd4
JN
4502 MISSING_CASE(port->port);
4503 return false;
b93433cc
JN
4504 }
4505 } else {
4506 switch (port->port) {
196cabd4
JN
4507 case PORT_A:
4508 return true;
b93433cc
JN
4509 case PORT_B:
4510 bit = SDE_PORTB_HOTPLUG_CPT;
4511 break;
4512 case PORT_C:
4513 bit = SDE_PORTC_HOTPLUG_CPT;
4514 break;
4515 case PORT_D:
4516 bit = SDE_PORTD_HOTPLUG_CPT;
4517 break;
4518 default:
196cabd4
JN
4519 MISSING_CASE(port->port);
4520 return false;
b93433cc
JN
4521 }
4522 }
1b469639 4523
b93433cc 4524 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4525}
4526
7e66bcf2 4527static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4528 struct intel_digital_port *port)
a4fc5ed6 4529{
10f76a38 4530 uint32_t bit;
5eb08b69 4531
7e66bcf2 4532 if (IS_VALLEYVIEW(dev_priv)) {
1d245987 4533 switch (port->port) {
232a6ee9
TP
4534 case PORT_B:
4535 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4536 break;
4537 case PORT_C:
4538 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4539 break;
4540 case PORT_D:
4541 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4542 break;
4543 default:
1d245987
JN
4544 MISSING_CASE(port->port);
4545 return false;
232a6ee9
TP
4546 }
4547 } else {
1d245987 4548 switch (port->port) {
232a6ee9
TP
4549 case PORT_B:
4550 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4551 break;
4552 case PORT_C:
4553 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4554 break;
4555 case PORT_D:
4556 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4557 break;
4558 default:
1d245987
JN
4559 MISSING_CASE(port->port);
4560 return false;
232a6ee9 4561 }
a4fc5ed6
KP
4562 }
4563
1d245987 4564 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4565}
4566
7e66bcf2
JN
4567/*
4568 * intel_digital_port_connected - is the specified port connected?
4569 * @dev_priv: i915 private structure
4570 * @port: the port to test
4571 *
4572 * Return %true if @port is connected, %false otherwise.
4573 */
4574static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4575 struct intel_digital_port *port)
4576{
4577 if (HAS_PCH_SPLIT(dev_priv))
4578 return ibx_digital_port_connected(dev_priv, port);
4579 else
4580 return g4x_digital_port_connected(dev_priv, port);
4581}
4582
b93433cc
JN
4583static enum drm_connector_status
4584ironlake_dp_detect(struct intel_dp *intel_dp)
4585{
4586 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4587 struct drm_i915_private *dev_priv = dev->dev_private;
4588 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4589
7e66bcf2 4590 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
b93433cc
JN
4591 return connector_status_disconnected;
4592
4593 return intel_dp_detect_dpcd(intel_dp);
4594}
4595
2a592bec
DA
4596static enum drm_connector_status
4597g4x_dp_detect(struct intel_dp *intel_dp)
4598{
4599 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4600 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2a592bec
DA
4601
4602 /* Can't disconnect eDP, but you can close the lid... */
4603 if (is_edp(intel_dp)) {
4604 enum drm_connector_status status;
4605
4606 status = intel_panel_detect(dev);
4607 if (status == connector_status_unknown)
4608 status = connector_status_connected;
4609 return status;
4610 }
4611
7e66bcf2 4612 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
a4fc5ed6
KP
4613 return connector_status_disconnected;
4614
26d61aad 4615 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4616}
4617
8c241fef 4618static struct edid *
beb60608 4619intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4620{
beb60608 4621 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4622
9cd300e0
JN
4623 /* use cached edid if we have one */
4624 if (intel_connector->edid) {
9cd300e0
JN
4625 /* invalid edid */
4626 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4627 return NULL;
4628
55e9edeb 4629 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4630 } else
4631 return drm_get_edid(&intel_connector->base,
4632 &intel_dp->aux.ddc);
4633}
8c241fef 4634
beb60608
CW
4635static void
4636intel_dp_set_edid(struct intel_dp *intel_dp)
4637{
4638 struct intel_connector *intel_connector = intel_dp->attached_connector;
4639 struct edid *edid;
8c241fef 4640
beb60608
CW
4641 edid = intel_dp_get_edid(intel_dp);
4642 intel_connector->detect_edid = edid;
4643
4644 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4645 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4646 else
4647 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4648}
4649
beb60608
CW
4650static void
4651intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4652{
beb60608 4653 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4654
beb60608
CW
4655 kfree(intel_connector->detect_edid);
4656 intel_connector->detect_edid = NULL;
9cd300e0 4657
beb60608
CW
4658 intel_dp->has_audio = false;
4659}
d6f24d0f 4660
beb60608
CW
4661static enum intel_display_power_domain
4662intel_dp_power_get(struct intel_dp *dp)
4663{
4664 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4665 enum intel_display_power_domain power_domain;
4666
4667 power_domain = intel_display_port_power_domain(encoder);
4668 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4669
4670 return power_domain;
4671}
d6f24d0f 4672
beb60608
CW
4673static void
4674intel_dp_power_put(struct intel_dp *dp,
4675 enum intel_display_power_domain power_domain)
4676{
4677 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4678 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4679}
4680
a9756bb5
ZW
4681static enum drm_connector_status
4682intel_dp_detect(struct drm_connector *connector, bool force)
4683{
4684 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4686 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4687 struct drm_device *dev = connector->dev;
a9756bb5 4688 enum drm_connector_status status;
671dedd2 4689 enum intel_display_power_domain power_domain;
0e32b39c 4690 bool ret;
09b1eb13 4691 u8 sink_irq_vector;
a9756bb5 4692
164c8598 4693 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4694 connector->base.id, connector->name);
beb60608 4695 intel_dp_unset_edid(intel_dp);
164c8598 4696
0e32b39c
DA
4697 if (intel_dp->is_mst) {
4698 /* MST devices are disconnected from a monitor POV */
4699 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4700 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4701 return connector_status_disconnected;
0e32b39c
DA
4702 }
4703
beb60608 4704 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4705
d410b56d
CW
4706 /* Can't disconnect eDP, but you can close the lid... */
4707 if (is_edp(intel_dp))
4708 status = edp_detect(intel_dp);
4709 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4710 status = ironlake_dp_detect(intel_dp);
4711 else
4712 status = g4x_dp_detect(intel_dp);
4713 if (status != connector_status_connected)
c8c8fb33 4714 goto out;
a9756bb5 4715
0d198328
AJ
4716 intel_dp_probe_oui(intel_dp);
4717
0e32b39c
DA
4718 ret = intel_dp_probe_mst(intel_dp);
4719 if (ret) {
4720 /* if we are in MST mode then this connector
4721 won't appear connected or have anything with EDID on it */
4722 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4723 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4724 status = connector_status_disconnected;
4725 goto out;
4726 }
4727
beb60608 4728 intel_dp_set_edid(intel_dp);
a9756bb5 4729
d63885da
PZ
4730 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4731 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4732 status = connector_status_connected;
4733
09b1eb13
TP
4734 /* Try to read the source of the interrupt */
4735 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4736 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4737 /* Clear interrupt source */
4738 drm_dp_dpcd_writeb(&intel_dp->aux,
4739 DP_DEVICE_SERVICE_IRQ_VECTOR,
4740 sink_irq_vector);
4741
4742 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4743 intel_dp_handle_test_request(intel_dp);
4744 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4745 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4746 }
4747
c8c8fb33 4748out:
beb60608 4749 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4750 return status;
a4fc5ed6
KP
4751}
4752
beb60608
CW
4753static void
4754intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4755{
df0e9248 4756 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4757 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4758 enum intel_display_power_domain power_domain;
a4fc5ed6 4759
beb60608
CW
4760 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4761 connector->base.id, connector->name);
4762 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4763
beb60608
CW
4764 if (connector->status != connector_status_connected)
4765 return;
671dedd2 4766
beb60608
CW
4767 power_domain = intel_dp_power_get(intel_dp);
4768
4769 intel_dp_set_edid(intel_dp);
4770
4771 intel_dp_power_put(intel_dp, power_domain);
4772
4773 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4774 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4775}
4776
4777static int intel_dp_get_modes(struct drm_connector *connector)
4778{
4779 struct intel_connector *intel_connector = to_intel_connector(connector);
4780 struct edid *edid;
4781
4782 edid = intel_connector->detect_edid;
4783 if (edid) {
4784 int ret = intel_connector_update_modes(connector, edid);
4785 if (ret)
4786 return ret;
4787 }
32f9d658 4788
f8779fda 4789 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4790 if (is_edp(intel_attached_dp(connector)) &&
4791 intel_connector->panel.fixed_mode) {
f8779fda 4792 struct drm_display_mode *mode;
beb60608
CW
4793
4794 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4795 intel_connector->panel.fixed_mode);
f8779fda 4796 if (mode) {
32f9d658
ZW
4797 drm_mode_probed_add(connector, mode);
4798 return 1;
4799 }
4800 }
beb60608 4801
32f9d658 4802 return 0;
a4fc5ed6
KP
4803}
4804
1aad7ac0
CW
4805static bool
4806intel_dp_detect_audio(struct drm_connector *connector)
4807{
1aad7ac0 4808 bool has_audio = false;
beb60608 4809 struct edid *edid;
1aad7ac0 4810
beb60608
CW
4811 edid = to_intel_connector(connector)->detect_edid;
4812 if (edid)
1aad7ac0 4813 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4814
1aad7ac0
CW
4815 return has_audio;
4816}
4817
f684960e
CW
4818static int
4819intel_dp_set_property(struct drm_connector *connector,
4820 struct drm_property *property,
4821 uint64_t val)
4822{
e953fd7b 4823 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4824 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4825 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4826 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4827 int ret;
4828
662595df 4829 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4830 if (ret)
4831 return ret;
4832
3f43c48d 4833 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4834 int i = val;
4835 bool has_audio;
4836
4837 if (i == intel_dp->force_audio)
f684960e
CW
4838 return 0;
4839
1aad7ac0 4840 intel_dp->force_audio = i;
f684960e 4841
c3e5f67b 4842 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4843 has_audio = intel_dp_detect_audio(connector);
4844 else
c3e5f67b 4845 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4846
4847 if (has_audio == intel_dp->has_audio)
f684960e
CW
4848 return 0;
4849
1aad7ac0 4850 intel_dp->has_audio = has_audio;
f684960e
CW
4851 goto done;
4852 }
4853
e953fd7b 4854 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4855 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4856 bool old_range = intel_dp->limited_color_range;
ae4edb80 4857
55bc60db
VS
4858 switch (val) {
4859 case INTEL_BROADCAST_RGB_AUTO:
4860 intel_dp->color_range_auto = true;
4861 break;
4862 case INTEL_BROADCAST_RGB_FULL:
4863 intel_dp->color_range_auto = false;
0f2a2a75 4864 intel_dp->limited_color_range = false;
55bc60db
VS
4865 break;
4866 case INTEL_BROADCAST_RGB_LIMITED:
4867 intel_dp->color_range_auto = false;
0f2a2a75 4868 intel_dp->limited_color_range = true;
55bc60db
VS
4869 break;
4870 default:
4871 return -EINVAL;
4872 }
ae4edb80
DV
4873
4874 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4875 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4876 return 0;
4877
e953fd7b
CW
4878 goto done;
4879 }
4880
53b41837
YN
4881 if (is_edp(intel_dp) &&
4882 property == connector->dev->mode_config.scaling_mode_property) {
4883 if (val == DRM_MODE_SCALE_NONE) {
4884 DRM_DEBUG_KMS("no scaling not supported\n");
4885 return -EINVAL;
4886 }
4887
4888 if (intel_connector->panel.fitting_mode == val) {
4889 /* the eDP scaling property is not changed */
4890 return 0;
4891 }
4892 intel_connector->panel.fitting_mode = val;
4893
4894 goto done;
4895 }
4896
f684960e
CW
4897 return -EINVAL;
4898
4899done:
c0c36b94
CW
4900 if (intel_encoder->base.crtc)
4901 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4902
4903 return 0;
4904}
4905
a4fc5ed6 4906static void
73845adf 4907intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4908{
1d508706 4909 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4910
10e972d3 4911 kfree(intel_connector->detect_edid);
beb60608 4912
9cd300e0
JN
4913 if (!IS_ERR_OR_NULL(intel_connector->edid))
4914 kfree(intel_connector->edid);
4915
acd8db10
PZ
4916 /* Can't call is_edp() since the encoder may have been destroyed
4917 * already. */
4918 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4919 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4920
a4fc5ed6 4921 drm_connector_cleanup(connector);
55f78c43 4922 kfree(connector);
a4fc5ed6
KP
4923}
4924
00c09d70 4925void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4926{
da63a9f2
PZ
4927 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4928 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4929
4f71d0cb 4930 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4931 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4932 if (is_edp(intel_dp)) {
4933 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4934 /*
4935 * vdd might still be enabled do to the delayed vdd off.
4936 * Make sure vdd is actually turned off here.
4937 */
773538e8 4938 pps_lock(intel_dp);
4be73780 4939 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4940 pps_unlock(intel_dp);
4941
01527b31
CT
4942 if (intel_dp->edp_notifier.notifier_call) {
4943 unregister_reboot_notifier(&intel_dp->edp_notifier);
4944 intel_dp->edp_notifier.notifier_call = NULL;
4945 }
bd943159 4946 }
c8bd0e49 4947 drm_encoder_cleanup(encoder);
da63a9f2 4948 kfree(intel_dig_port);
24d05927
DV
4949}
4950
07f9cd0b
ID
4951static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4952{
4953 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4954
4955 if (!is_edp(intel_dp))
4956 return;
4957
951468f3
VS
4958 /*
4959 * vdd might still be enabled do to the delayed vdd off.
4960 * Make sure vdd is actually turned off here.
4961 */
afa4e53a 4962 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4963 pps_lock(intel_dp);
07f9cd0b 4964 edp_panel_vdd_off_sync(intel_dp);
773538e8 4965 pps_unlock(intel_dp);
07f9cd0b
ID
4966}
4967
49e6bc51
VS
4968static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4969{
4970 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4971 struct drm_device *dev = intel_dig_port->base.base.dev;
4972 struct drm_i915_private *dev_priv = dev->dev_private;
4973 enum intel_display_power_domain power_domain;
4974
4975 lockdep_assert_held(&dev_priv->pps_mutex);
4976
4977 if (!edp_have_panel_vdd(intel_dp))
4978 return;
4979
4980 /*
4981 * The VDD bit needs a power domain reference, so if the bit is
4982 * already enabled when we boot or resume, grab this reference and
4983 * schedule a vdd off, so we don't hold on to the reference
4984 * indefinitely.
4985 */
4986 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4987 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4988 intel_display_power_get(dev_priv, power_domain);
4989
4990 edp_panel_vdd_schedule_off(intel_dp);
4991}
4992
6d93c0c4
ID
4993static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4994{
49e6bc51
VS
4995 struct intel_dp *intel_dp;
4996
4997 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4998 return;
4999
5000 intel_dp = enc_to_intel_dp(encoder);
5001
5002 pps_lock(intel_dp);
5003
5004 /*
5005 * Read out the current power sequencer assignment,
5006 * in case the BIOS did something with it.
5007 */
5008 if (IS_VALLEYVIEW(encoder->dev))
5009 vlv_initial_power_sequencer_setup(intel_dp);
5010
5011 intel_edp_panel_vdd_sanitize(intel_dp);
5012
5013 pps_unlock(intel_dp);
6d93c0c4
ID
5014}
5015
a4fc5ed6 5016static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 5017 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 5018 .detect = intel_dp_detect,
beb60608 5019 .force = intel_dp_force,
a4fc5ed6 5020 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 5021 .set_property = intel_dp_set_property,
2545e4a6 5022 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 5023 .destroy = intel_dp_connector_destroy,
c6f95f27 5024 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 5025 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
5026};
5027
5028static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5029 .get_modes = intel_dp_get_modes,
5030 .mode_valid = intel_dp_mode_valid,
df0e9248 5031 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
5032};
5033
a4fc5ed6 5034static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 5035 .reset = intel_dp_encoder_reset,
24d05927 5036 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
5037};
5038
b2c5c181 5039enum irqreturn
13cf5504
DA
5040intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5041{
5042 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 5043 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
5044 struct drm_device *dev = intel_dig_port->base.base.dev;
5045 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 5046 enum intel_display_power_domain power_domain;
b2c5c181 5047 enum irqreturn ret = IRQ_NONE;
1c767b33 5048
0e32b39c
DA
5049 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5050 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5051
7a7f84cc
VS
5052 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5053 /*
5054 * vdd off can generate a long pulse on eDP which
5055 * would require vdd on to handle it, and thus we
5056 * would end up in an endless cycle of
5057 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5058 */
5059 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5060 port_name(intel_dig_port->port));
a8b3d52f 5061 return IRQ_HANDLED;
7a7f84cc
VS
5062 }
5063
26fbb774
VS
5064 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5065 port_name(intel_dig_port->port),
0e32b39c 5066 long_hpd ? "long" : "short");
13cf5504 5067
1c767b33
ID
5068 power_domain = intel_display_port_power_domain(intel_encoder);
5069 intel_display_power_get(dev_priv, power_domain);
5070
0e32b39c 5071 if (long_hpd) {
5fa836a9
MK
5072 /* indicate that we need to restart link training */
5073 intel_dp->train_set_valid = false;
2a592bec 5074
7e66bcf2
JN
5075 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5076 goto mst_fail;
0e32b39c
DA
5077
5078 if (!intel_dp_get_dpcd(intel_dp)) {
5079 goto mst_fail;
5080 }
5081
5082 intel_dp_probe_oui(intel_dp);
5083
5084 if (!intel_dp_probe_mst(intel_dp))
5085 goto mst_fail;
5086
5087 } else {
5088 if (intel_dp->is_mst) {
1c767b33 5089 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5090 goto mst_fail;
5091 }
5092
5093 if (!intel_dp->is_mst) {
5094 /*
5095 * we'll check the link status via the normal hot plug path later -
5096 * but for short hpds we should check it now
5097 */
5b215bcf 5098 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5099 intel_dp_check_link_status(intel_dp);
5b215bcf 5100 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5101 }
5102 }
b2c5c181
DV
5103
5104 ret = IRQ_HANDLED;
5105
1c767b33 5106 goto put_power;
0e32b39c
DA
5107mst_fail:
5108 /* if we were in MST mode, and device is not there get out of MST mode */
5109 if (intel_dp->is_mst) {
5110 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5111 intel_dp->is_mst = false;
5112 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5113 }
1c767b33
ID
5114put_power:
5115 intel_display_power_put(dev_priv, power_domain);
5116
5117 return ret;
13cf5504
DA
5118}
5119
e3421a18
ZW
5120/* Return which DP Port should be selected for Transcoder DP control */
5121int
0206e353 5122intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5123{
5124 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5125 struct intel_encoder *intel_encoder;
5126 struct intel_dp *intel_dp;
e3421a18 5127
fa90ecef
PZ
5128 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5129 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5130
fa90ecef
PZ
5131 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5132 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5133 return intel_dp->output_reg;
e3421a18 5134 }
ea5b213a 5135
e3421a18
ZW
5136 return -1;
5137}
5138
36e83a18 5139/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5140bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5141{
5142 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5143 union child_device_config *p_child;
36e83a18 5144 int i;
5d8a7752
VS
5145 static const short port_mapping[] = {
5146 [PORT_B] = PORT_IDPB,
5147 [PORT_C] = PORT_IDPC,
5148 [PORT_D] = PORT_IDPD,
5149 };
36e83a18 5150
3b32a35b
VS
5151 if (port == PORT_A)
5152 return true;
5153
41aa3448 5154 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5155 return false;
5156
41aa3448
RV
5157 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5158 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5159
5d8a7752 5160 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5161 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5162 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5163 return true;
5164 }
5165 return false;
5166}
5167
0e32b39c 5168void
f684960e
CW
5169intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5170{
53b41837
YN
5171 struct intel_connector *intel_connector = to_intel_connector(connector);
5172
3f43c48d 5173 intel_attach_force_audio_property(connector);
e953fd7b 5174 intel_attach_broadcast_rgb_property(connector);
55bc60db 5175 intel_dp->color_range_auto = true;
53b41837
YN
5176
5177 if (is_edp(intel_dp)) {
5178 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5179 drm_object_attach_property(
5180 &connector->base,
53b41837 5181 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5182 DRM_MODE_SCALE_ASPECT);
5183 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5184 }
f684960e
CW
5185}
5186
dada1a9f
ID
5187static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5188{
5189 intel_dp->last_power_cycle = jiffies;
5190 intel_dp->last_power_on = jiffies;
5191 intel_dp->last_backlight_off = jiffies;
5192}
5193
67a54566
DV
5194static void
5195intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5196 struct intel_dp *intel_dp)
67a54566
DV
5197{
5198 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5199 struct edp_power_seq cur, vbt, spec,
5200 *final = &intel_dp->pps_delays;
b0a08bec
VK
5201 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5202 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5203
e39b999a
VS
5204 lockdep_assert_held(&dev_priv->pps_mutex);
5205
81ddbc69
VS
5206 /* already initialized? */
5207 if (final->t11_t12 != 0)
5208 return;
5209
b0a08bec
VK
5210 if (IS_BROXTON(dev)) {
5211 /*
5212 * TODO: BXT has 2 sets of PPS registers.
5213 * Correct Register for Broxton need to be identified
5214 * using VBT. hardcoding for now
5215 */
5216 pp_ctrl_reg = BXT_PP_CONTROL(0);
5217 pp_on_reg = BXT_PP_ON_DELAYS(0);
5218 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5219 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5220 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5221 pp_on_reg = PCH_PP_ON_DELAYS;
5222 pp_off_reg = PCH_PP_OFF_DELAYS;
5223 pp_div_reg = PCH_PP_DIVISOR;
5224 } else {
bf13e81b
JN
5225 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5226
5227 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5228 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5229 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5230 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5231 }
67a54566
DV
5232
5233 /* Workaround: Need to write PP_CONTROL with the unlock key as
5234 * the very first thing. */
b0a08bec 5235 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5236
453c5420
JB
5237 pp_on = I915_READ(pp_on_reg);
5238 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5239 if (!IS_BROXTON(dev)) {
5240 I915_WRITE(pp_ctrl_reg, pp_ctl);
5241 pp_div = I915_READ(pp_div_reg);
5242 }
67a54566
DV
5243
5244 /* Pull timing values out of registers */
5245 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5246 PANEL_POWER_UP_DELAY_SHIFT;
5247
5248 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5249 PANEL_LIGHT_ON_DELAY_SHIFT;
5250
5251 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5252 PANEL_LIGHT_OFF_DELAY_SHIFT;
5253
5254 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5255 PANEL_POWER_DOWN_DELAY_SHIFT;
5256
b0a08bec
VK
5257 if (IS_BROXTON(dev)) {
5258 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5259 BXT_POWER_CYCLE_DELAY_SHIFT;
5260 if (tmp > 0)
5261 cur.t11_t12 = (tmp - 1) * 1000;
5262 else
5263 cur.t11_t12 = 0;
5264 } else {
5265 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5266 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5267 }
67a54566
DV
5268
5269 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5270 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5271
41aa3448 5272 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5273
5274 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5275 * our hw here, which are all in 100usec. */
5276 spec.t1_t3 = 210 * 10;
5277 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5278 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5279 spec.t10 = 500 * 10;
5280 /* This one is special and actually in units of 100ms, but zero
5281 * based in the hw (so we need to add 100 ms). But the sw vbt
5282 * table multiplies it with 1000 to make it in units of 100usec,
5283 * too. */
5284 spec.t11_t12 = (510 + 100) * 10;
5285
5286 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5287 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5288
5289 /* Use the max of the register settings and vbt. If both are
5290 * unset, fall back to the spec limits. */
36b5f425 5291#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5292 spec.field : \
5293 max(cur.field, vbt.field))
5294 assign_final(t1_t3);
5295 assign_final(t8);
5296 assign_final(t9);
5297 assign_final(t10);
5298 assign_final(t11_t12);
5299#undef assign_final
5300
36b5f425 5301#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5302 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5303 intel_dp->backlight_on_delay = get_delay(t8);
5304 intel_dp->backlight_off_delay = get_delay(t9);
5305 intel_dp->panel_power_down_delay = get_delay(t10);
5306 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5307#undef get_delay
5308
f30d26e4
JN
5309 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5310 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5311 intel_dp->panel_power_cycle_delay);
5312
5313 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5314 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5315}
5316
5317static void
5318intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5319 struct intel_dp *intel_dp)
f30d26e4
JN
5320{
5321 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5322 u32 pp_on, pp_off, pp_div, port_sel = 0;
5323 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5324 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5325 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5326 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5327
e39b999a 5328 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5329
b0a08bec
VK
5330 if (IS_BROXTON(dev)) {
5331 /*
5332 * TODO: BXT has 2 sets of PPS registers.
5333 * Correct Register for Broxton need to be identified
5334 * using VBT. hardcoding for now
5335 */
5336 pp_ctrl_reg = BXT_PP_CONTROL(0);
5337 pp_on_reg = BXT_PP_ON_DELAYS(0);
5338 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5339
5340 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5341 pp_on_reg = PCH_PP_ON_DELAYS;
5342 pp_off_reg = PCH_PP_OFF_DELAYS;
5343 pp_div_reg = PCH_PP_DIVISOR;
5344 } else {
bf13e81b
JN
5345 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5346
5347 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5348 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5349 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5350 }
5351
b2f19d1a
PZ
5352 /*
5353 * And finally store the new values in the power sequencer. The
5354 * backlight delays are set to 1 because we do manual waits on them. For
5355 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5356 * we'll end up waiting for the backlight off delay twice: once when we
5357 * do the manual sleep, and once when we disable the panel and wait for
5358 * the PP_STATUS bit to become zero.
5359 */
f30d26e4 5360 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5361 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5362 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5363 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5364 /* Compute the divisor for the pp clock, simply match the Bspec
5365 * formula. */
b0a08bec
VK
5366 if (IS_BROXTON(dev)) {
5367 pp_div = I915_READ(pp_ctrl_reg);
5368 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5369 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5370 << BXT_POWER_CYCLE_DELAY_SHIFT);
5371 } else {
5372 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5373 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5374 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5375 }
67a54566
DV
5376
5377 /* Haswell doesn't have any port selection bits for the panel
5378 * power sequencer any more. */
bc7d38a4 5379 if (IS_VALLEYVIEW(dev)) {
ad933b56 5380 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5381 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5382 if (port == PORT_A)
a24c144c 5383 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5384 else
a24c144c 5385 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5386 }
5387
453c5420
JB
5388 pp_on |= port_sel;
5389
5390 I915_WRITE(pp_on_reg, pp_on);
5391 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5392 if (IS_BROXTON(dev))
5393 I915_WRITE(pp_ctrl_reg, pp_div);
5394 else
5395 I915_WRITE(pp_div_reg, pp_div);
67a54566 5396
67a54566 5397 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5398 I915_READ(pp_on_reg),
5399 I915_READ(pp_off_reg),
b0a08bec
VK
5400 IS_BROXTON(dev) ?
5401 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5402 I915_READ(pp_div_reg));
f684960e
CW
5403}
5404
b33a2815
VK
5405/**
5406 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5407 * @dev: DRM device
5408 * @refresh_rate: RR to be programmed
5409 *
5410 * This function gets called when refresh rate (RR) has to be changed from
5411 * one frequency to another. Switches can be between high and low RR
5412 * supported by the panel or to any other RR based on media playback (in
5413 * this case, RR value needs to be passed from user space).
5414 *
5415 * The caller of this function needs to take a lock on dev_priv->drrs.
5416 */
96178eeb 5417static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5418{
5419 struct drm_i915_private *dev_priv = dev->dev_private;
5420 struct intel_encoder *encoder;
96178eeb
VK
5421 struct intel_digital_port *dig_port = NULL;
5422 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5423 struct intel_crtc_state *config = NULL;
439d7ac0 5424 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5425 u32 reg, val;
96178eeb 5426 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5427
5428 if (refresh_rate <= 0) {
5429 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5430 return;
5431 }
5432
96178eeb
VK
5433 if (intel_dp == NULL) {
5434 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5435 return;
5436 }
5437
1fcc9d1c 5438 /*
e4d59f6b
RV
5439 * FIXME: This needs proper synchronization with psr state for some
5440 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5441 */
439d7ac0 5442
96178eeb
VK
5443 dig_port = dp_to_dig_port(intel_dp);
5444 encoder = &dig_port->base;
723f9aab 5445 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5446
5447 if (!intel_crtc) {
5448 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5449 return;
5450 }
5451
6e3c9717 5452 config = intel_crtc->config;
439d7ac0 5453
96178eeb 5454 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5455 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5456 return;
5457 }
5458
96178eeb
VK
5459 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5460 refresh_rate)
439d7ac0
PB
5461 index = DRRS_LOW_RR;
5462
96178eeb 5463 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5464 DRM_DEBUG_KMS(
5465 "DRRS requested for previously set RR...ignoring\n");
5466 return;
5467 }
5468
5469 if (!intel_crtc->active) {
5470 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5471 return;
5472 }
5473
44395bfe 5474 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5475 switch (index) {
5476 case DRRS_HIGH_RR:
5477 intel_dp_set_m_n(intel_crtc, M1_N1);
5478 break;
5479 case DRRS_LOW_RR:
5480 intel_dp_set_m_n(intel_crtc, M2_N2);
5481 break;
5482 case DRRS_MAX_RR:
5483 default:
5484 DRM_ERROR("Unsupported refreshrate type\n");
5485 }
5486 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5487 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5488 val = I915_READ(reg);
a4c30b1d 5489
439d7ac0 5490 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5491 if (IS_VALLEYVIEW(dev))
5492 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5493 else
5494 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5495 } else {
6fa7aec1
VK
5496 if (IS_VALLEYVIEW(dev))
5497 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5498 else
5499 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5500 }
5501 I915_WRITE(reg, val);
5502 }
5503
4e9ac947
VK
5504 dev_priv->drrs.refresh_rate_type = index;
5505
5506 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5507}
5508
b33a2815
VK
5509/**
5510 * intel_edp_drrs_enable - init drrs struct if supported
5511 * @intel_dp: DP struct
5512 *
5513 * Initializes frontbuffer_bits and drrs.dp
5514 */
c395578e
VK
5515void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5516{
5517 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5518 struct drm_i915_private *dev_priv = dev->dev_private;
5519 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5520 struct drm_crtc *crtc = dig_port->base.base.crtc;
5521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5522
5523 if (!intel_crtc->config->has_drrs) {
5524 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5525 return;
5526 }
5527
5528 mutex_lock(&dev_priv->drrs.mutex);
5529 if (WARN_ON(dev_priv->drrs.dp)) {
5530 DRM_ERROR("DRRS already enabled\n");
5531 goto unlock;
5532 }
5533
5534 dev_priv->drrs.busy_frontbuffer_bits = 0;
5535
5536 dev_priv->drrs.dp = intel_dp;
5537
5538unlock:
5539 mutex_unlock(&dev_priv->drrs.mutex);
5540}
5541
b33a2815
VK
5542/**
5543 * intel_edp_drrs_disable - Disable DRRS
5544 * @intel_dp: DP struct
5545 *
5546 */
c395578e
VK
5547void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5548{
5549 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5550 struct drm_i915_private *dev_priv = dev->dev_private;
5551 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5552 struct drm_crtc *crtc = dig_port->base.base.crtc;
5553 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5554
5555 if (!intel_crtc->config->has_drrs)
5556 return;
5557
5558 mutex_lock(&dev_priv->drrs.mutex);
5559 if (!dev_priv->drrs.dp) {
5560 mutex_unlock(&dev_priv->drrs.mutex);
5561 return;
5562 }
5563
5564 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5565 intel_dp_set_drrs_state(dev_priv->dev,
5566 intel_dp->attached_connector->panel.
5567 fixed_mode->vrefresh);
5568
5569 dev_priv->drrs.dp = NULL;
5570 mutex_unlock(&dev_priv->drrs.mutex);
5571
5572 cancel_delayed_work_sync(&dev_priv->drrs.work);
5573}
5574
4e9ac947
VK
5575static void intel_edp_drrs_downclock_work(struct work_struct *work)
5576{
5577 struct drm_i915_private *dev_priv =
5578 container_of(work, typeof(*dev_priv), drrs.work.work);
5579 struct intel_dp *intel_dp;
5580
5581 mutex_lock(&dev_priv->drrs.mutex);
5582
5583 intel_dp = dev_priv->drrs.dp;
5584
5585 if (!intel_dp)
5586 goto unlock;
5587
439d7ac0 5588 /*
4e9ac947
VK
5589 * The delayed work can race with an invalidate hence we need to
5590 * recheck.
439d7ac0
PB
5591 */
5592
4e9ac947
VK
5593 if (dev_priv->drrs.busy_frontbuffer_bits)
5594 goto unlock;
439d7ac0 5595
4e9ac947
VK
5596 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5597 intel_dp_set_drrs_state(dev_priv->dev,
5598 intel_dp->attached_connector->panel.
5599 downclock_mode->vrefresh);
439d7ac0 5600
4e9ac947 5601unlock:
4e9ac947 5602 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5603}
5604
b33a2815 5605/**
0ddfd203 5606 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5607 * @dev: DRM device
5608 * @frontbuffer_bits: frontbuffer plane tracking bits
5609 *
0ddfd203
R
5610 * This function gets called everytime rendering on the given planes start.
5611 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5612 *
5613 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5614 */
a93fad0f
VK
5615void intel_edp_drrs_invalidate(struct drm_device *dev,
5616 unsigned frontbuffer_bits)
5617{
5618 struct drm_i915_private *dev_priv = dev->dev_private;
5619 struct drm_crtc *crtc;
5620 enum pipe pipe;
5621
9da7d693 5622 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5623 return;
5624
88f933a8 5625 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5626
a93fad0f 5627 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5628 if (!dev_priv->drrs.dp) {
5629 mutex_unlock(&dev_priv->drrs.mutex);
5630 return;
5631 }
5632
a93fad0f
VK
5633 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5634 pipe = to_intel_crtc(crtc)->pipe;
5635
c1d038c6
DV
5636 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5637 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5638
0ddfd203 5639 /* invalidate means busy screen hence upclock */
c1d038c6 5640 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5641 intel_dp_set_drrs_state(dev_priv->dev,
5642 dev_priv->drrs.dp->attached_connector->panel.
5643 fixed_mode->vrefresh);
a93fad0f 5644
a93fad0f
VK
5645 mutex_unlock(&dev_priv->drrs.mutex);
5646}
5647
b33a2815 5648/**
0ddfd203 5649 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5650 * @dev: DRM device
5651 * @frontbuffer_bits: frontbuffer plane tracking bits
5652 *
0ddfd203
R
5653 * This function gets called every time rendering on the given planes has
5654 * completed or flip on a crtc is completed. So DRRS should be upclocked
5655 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5656 * if no other planes are dirty.
b33a2815
VK
5657 *
5658 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5659 */
a93fad0f
VK
5660void intel_edp_drrs_flush(struct drm_device *dev,
5661 unsigned frontbuffer_bits)
5662{
5663 struct drm_i915_private *dev_priv = dev->dev_private;
5664 struct drm_crtc *crtc;
5665 enum pipe pipe;
5666
9da7d693 5667 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5668 return;
5669
88f933a8 5670 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5671
a93fad0f 5672 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5673 if (!dev_priv->drrs.dp) {
5674 mutex_unlock(&dev_priv->drrs.mutex);
5675 return;
5676 }
5677
a93fad0f
VK
5678 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5679 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5680
5681 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5682 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5683
0ddfd203 5684 /* flush means busy screen hence upclock */
c1d038c6 5685 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5686 intel_dp_set_drrs_state(dev_priv->dev,
5687 dev_priv->drrs.dp->attached_connector->panel.
5688 fixed_mode->vrefresh);
5689
5690 /*
5691 * flush also means no more activity hence schedule downclock, if all
5692 * other fbs are quiescent too
5693 */
5694 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5695 schedule_delayed_work(&dev_priv->drrs.work,
5696 msecs_to_jiffies(1000));
5697 mutex_unlock(&dev_priv->drrs.mutex);
5698}
5699
b33a2815
VK
5700/**
5701 * DOC: Display Refresh Rate Switching (DRRS)
5702 *
5703 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5704 * which enables swtching between low and high refresh rates,
5705 * dynamically, based on the usage scenario. This feature is applicable
5706 * for internal panels.
5707 *
5708 * Indication that the panel supports DRRS is given by the panel EDID, which
5709 * would list multiple refresh rates for one resolution.
5710 *
5711 * DRRS is of 2 types - static and seamless.
5712 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5713 * (may appear as a blink on screen) and is used in dock-undock scenario.
5714 * Seamless DRRS involves changing RR without any visual effect to the user
5715 * and can be used during normal system usage. This is done by programming
5716 * certain registers.
5717 *
5718 * Support for static/seamless DRRS may be indicated in the VBT based on
5719 * inputs from the panel spec.
5720 *
5721 * DRRS saves power by switching to low RR based on usage scenarios.
5722 *
5723 * eDP DRRS:-
5724 * The implementation is based on frontbuffer tracking implementation.
5725 * When there is a disturbance on the screen triggered by user activity or a
5726 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5727 * When there is no movement on screen, after a timeout of 1 second, a switch
5728 * to low RR is made.
5729 * For integration with frontbuffer tracking code,
5730 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5731 *
5732 * DRRS can be further extended to support other internal panels and also
5733 * the scenario of video playback wherein RR is set based on the rate
5734 * requested by userspace.
5735 */
5736
5737/**
5738 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5739 * @intel_connector: eDP connector
5740 * @fixed_mode: preferred mode of panel
5741 *
5742 * This function is called only once at driver load to initialize basic
5743 * DRRS stuff.
5744 *
5745 * Returns:
5746 * Downclock mode if panel supports it, else return NULL.
5747 * DRRS support is determined by the presence of downclock mode (apart
5748 * from VBT setting).
5749 */
4f9db5b5 5750static struct drm_display_mode *
96178eeb
VK
5751intel_dp_drrs_init(struct intel_connector *intel_connector,
5752 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5753{
5754 struct drm_connector *connector = &intel_connector->base;
96178eeb 5755 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5756 struct drm_i915_private *dev_priv = dev->dev_private;
5757 struct drm_display_mode *downclock_mode = NULL;
5758
9da7d693
DV
5759 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5760 mutex_init(&dev_priv->drrs.mutex);
5761
4f9db5b5
PB
5762 if (INTEL_INFO(dev)->gen <= 6) {
5763 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5764 return NULL;
5765 }
5766
5767 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5768 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5769 return NULL;
5770 }
5771
5772 downclock_mode = intel_find_panel_downclock
5773 (dev, fixed_mode, connector);
5774
5775 if (!downclock_mode) {
a1d26342 5776 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5777 return NULL;
5778 }
5779
96178eeb 5780 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5781
96178eeb 5782 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5783 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5784 return downclock_mode;
5785}
5786
ed92f0b2 5787static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5788 struct intel_connector *intel_connector)
ed92f0b2
PZ
5789{
5790 struct drm_connector *connector = &intel_connector->base;
5791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5792 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5793 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5794 struct drm_i915_private *dev_priv = dev->dev_private;
5795 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5796 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5797 bool has_dpcd;
5798 struct drm_display_mode *scan;
5799 struct edid *edid;
6517d273 5800 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5801
5802 if (!is_edp(intel_dp))
5803 return true;
5804
49e6bc51
VS
5805 pps_lock(intel_dp);
5806 intel_edp_panel_vdd_sanitize(intel_dp);
5807 pps_unlock(intel_dp);
63635217 5808
ed92f0b2 5809 /* Cache DPCD and EDID for edp. */
ed92f0b2 5810 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5811
5812 if (has_dpcd) {
5813 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5814 dev_priv->no_aux_handshake =
5815 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5816 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5817 } else {
5818 /* if this fails, presume the device is a ghost */
5819 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5820 return false;
5821 }
5822
5823 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5824 pps_lock(intel_dp);
36b5f425 5825 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5826 pps_unlock(intel_dp);
ed92f0b2 5827
060c8778 5828 mutex_lock(&dev->mode_config.mutex);
0b99836f 5829 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5830 if (edid) {
5831 if (drm_add_edid_modes(connector, edid)) {
5832 drm_mode_connector_update_edid_property(connector,
5833 edid);
5834 drm_edid_to_eld(connector, edid);
5835 } else {
5836 kfree(edid);
5837 edid = ERR_PTR(-EINVAL);
5838 }
5839 } else {
5840 edid = ERR_PTR(-ENOENT);
5841 }
5842 intel_connector->edid = edid;
5843
5844 /* prefer fixed mode from EDID if available */
5845 list_for_each_entry(scan, &connector->probed_modes, head) {
5846 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5847 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5848 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5849 intel_connector, fixed_mode);
ed92f0b2
PZ
5850 break;
5851 }
5852 }
5853
5854 /* fallback to VBT if available for eDP */
5855 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5856 fixed_mode = drm_mode_duplicate(dev,
5857 dev_priv->vbt.lfp_lvds_vbt_mode);
5858 if (fixed_mode)
5859 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5860 }
060c8778 5861 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5862
01527b31
CT
5863 if (IS_VALLEYVIEW(dev)) {
5864 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5865 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5866
5867 /*
5868 * Figure out the current pipe for the initial backlight setup.
5869 * If the current pipe isn't valid, try the PPS pipe, and if that
5870 * fails just assume pipe A.
5871 */
5872 if (IS_CHERRYVIEW(dev))
5873 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5874 else
5875 pipe = PORT_TO_PIPE(intel_dp->DP);
5876
5877 if (pipe != PIPE_A && pipe != PIPE_B)
5878 pipe = intel_dp->pps_pipe;
5879
5880 if (pipe != PIPE_A && pipe != PIPE_B)
5881 pipe = PIPE_A;
5882
5883 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5884 pipe_name(pipe));
01527b31
CT
5885 }
5886
4f9db5b5 5887 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5888 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5889 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5890
5891 return true;
5892}
5893
16c25533 5894bool
f0fec3f2
PZ
5895intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5896 struct intel_connector *intel_connector)
a4fc5ed6 5897{
f0fec3f2
PZ
5898 struct drm_connector *connector = &intel_connector->base;
5899 struct intel_dp *intel_dp = &intel_dig_port->dp;
5900 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5901 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5902 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5903 enum port port = intel_dig_port->port;
0b99836f 5904 int type;
a4fc5ed6 5905
a4a5d2f8
VS
5906 intel_dp->pps_pipe = INVALID_PIPE;
5907
ec5b01dd 5908 /* intel_dp vfuncs */
b6b5e383
DL
5909 if (INTEL_INFO(dev)->gen >= 9)
5910 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5911 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5912 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5913 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5914 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5915 else if (HAS_PCH_SPLIT(dev))
5916 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5917 else
5918 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5919
b9ca5fad
DL
5920 if (INTEL_INFO(dev)->gen >= 9)
5921 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5922 else
5923 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5924
0767935e
DV
5925 /* Preserve the current hw state. */
5926 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5927 intel_dp->attached_connector = intel_connector;
3d3dc149 5928
3b32a35b 5929 if (intel_dp_is_edp(dev, port))
b329530c 5930 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5931 else
5932 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5933
f7d24902
ID
5934 /*
5935 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5936 * for DP the encoder type can be set by the caller to
5937 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5938 */
5939 if (type == DRM_MODE_CONNECTOR_eDP)
5940 intel_encoder->type = INTEL_OUTPUT_EDP;
5941
c17ed5b5
VS
5942 /* eDP only on port B and/or C on vlv/chv */
5943 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5944 port != PORT_B && port != PORT_C))
5945 return false;
5946
e7281eab
ID
5947 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5948 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5949 port_name(port));
5950
b329530c 5951 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5952 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5953
a4fc5ed6
KP
5954 connector->interlace_allowed = true;
5955 connector->doublescan_allowed = 0;
5956
f0fec3f2 5957 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5958 edp_panel_vdd_work);
a4fc5ed6 5959
df0e9248 5960 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5961 drm_connector_register(connector);
a4fc5ed6 5962
affa9354 5963 if (HAS_DDI(dev))
bcbc889b
PZ
5964 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5965 else
5966 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5967 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5968
0b99836f 5969 /* Set up the hotplug pin. */
ab9d7c30
PZ
5970 switch (port) {
5971 case PORT_A:
1d843f9d 5972 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5973 break;
5974 case PORT_B:
1d843f9d 5975 intel_encoder->hpd_pin = HPD_PORT_B;
cf1d5883
SJ
5976 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
5977 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5978 break;
5979 case PORT_C:
1d843f9d 5980 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5981 break;
5982 case PORT_D:
1d843f9d 5983 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5984 break;
5985 default:
ad1c0b19 5986 BUG();
5eb08b69
ZW
5987 }
5988
dada1a9f 5989 if (is_edp(intel_dp)) {
773538e8 5990 pps_lock(intel_dp);
1e74a324
VS
5991 intel_dp_init_panel_power_timestamps(intel_dp);
5992 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5993 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5994 else
36b5f425 5995 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5996 pps_unlock(intel_dp);
dada1a9f 5997 }
0095e6dc 5998
9d1a1031 5999 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 6000
0e32b39c 6001 /* init MST on ports that can support it */
0c9b3715
JN
6002 if (HAS_DP_MST(dev) &&
6003 (port == PORT_B || port == PORT_C || port == PORT_D))
6004 intel_dp_mst_encoder_init(intel_dig_port,
6005 intel_connector->base.base.id);
0e32b39c 6006
36b5f425 6007 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 6008 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
6009 if (is_edp(intel_dp)) {
6010 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
6011 /*
6012 * vdd might still be enabled do to the delayed vdd off.
6013 * Make sure vdd is actually turned off here.
6014 */
773538e8 6015 pps_lock(intel_dp);
4be73780 6016 edp_panel_vdd_off_sync(intel_dp);
773538e8 6017 pps_unlock(intel_dp);
15b1d171 6018 }
34ea3d38 6019 drm_connector_unregister(connector);
b2f246a8 6020 drm_connector_cleanup(connector);
16c25533 6021 return false;
b2f246a8 6022 }
32f9d658 6023
f684960e
CW
6024 intel_dp_add_properties(intel_dp, connector);
6025
a4fc5ed6
KP
6026 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6027 * 0xd. Failure to do so will result in spurious interrupts being
6028 * generated on the port when a cable is not attached.
6029 */
6030 if (IS_G4X(dev) && !IS_GM45(dev)) {
6031 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6032 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6033 }
16c25533 6034
aa7471d2
JN
6035 i915_debugfs_connector_add(connector);
6036
16c25533 6037 return true;
a4fc5ed6 6038}
f0fec3f2
PZ
6039
6040void
6041intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6042{
13cf5504 6043 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6044 struct intel_digital_port *intel_dig_port;
6045 struct intel_encoder *intel_encoder;
6046 struct drm_encoder *encoder;
6047 struct intel_connector *intel_connector;
6048
b14c5679 6049 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6050 if (!intel_dig_port)
6051 return;
6052
08d9bc92 6053 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
6054 if (!intel_connector) {
6055 kfree(intel_dig_port);
6056 return;
6057 }
6058
6059 intel_encoder = &intel_dig_port->base;
6060 encoder = &intel_encoder->base;
6061
6062 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6063 DRM_MODE_ENCODER_TMDS);
6064
5bfe2ac0 6065 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6066 intel_encoder->disable = intel_disable_dp;
00c09d70 6067 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6068 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6069 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6070 if (IS_CHERRYVIEW(dev)) {
9197c88b 6071 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6072 intel_encoder->pre_enable = chv_pre_enable_dp;
6073 intel_encoder->enable = vlv_enable_dp;
580d3811 6074 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6075 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6076 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6077 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6078 intel_encoder->pre_enable = vlv_pre_enable_dp;
6079 intel_encoder->enable = vlv_enable_dp;
49277c31 6080 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6081 } else {
ecff4f3b
JN
6082 intel_encoder->pre_enable = g4x_pre_enable_dp;
6083 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6084 if (INTEL_INFO(dev)->gen >= 5)
6085 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6086 }
f0fec3f2 6087
174edf1f 6088 intel_dig_port->port = port;
f0fec3f2
PZ
6089 intel_dig_port->dp.output_reg = output_reg;
6090
00c09d70 6091 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6092 if (IS_CHERRYVIEW(dev)) {
6093 if (port == PORT_D)
6094 intel_encoder->crtc_mask = 1 << 2;
6095 else
6096 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6097 } else {
6098 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6099 }
bc079e8b 6100 intel_encoder->cloneable = 0;
f0fec3f2 6101
13cf5504 6102 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6103 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6104
15b1d171
PZ
6105 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6106 drm_encoder_cleanup(encoder);
6107 kfree(intel_dig_port);
b2f246a8 6108 kfree(intel_connector);
15b1d171 6109 }
f0fec3f2 6110}
0e32b39c
DA
6111
6112void intel_dp_mst_suspend(struct drm_device *dev)
6113{
6114 struct drm_i915_private *dev_priv = dev->dev_private;
6115 int i;
6116
6117 /* disable MST */
6118 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6119 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6120 if (!intel_dig_port)
6121 continue;
6122
6123 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6124 if (!intel_dig_port->dp.can_mst)
6125 continue;
6126 if (intel_dig_port->dp.is_mst)
6127 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6128 }
6129 }
6130}
6131
6132void intel_dp_mst_resume(struct drm_device *dev)
6133{
6134 struct drm_i915_private *dev_priv = dev->dev_private;
6135 int i;
6136
6137 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6138 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6139 if (!intel_dig_port)
6140 continue;
6141 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6142 int ret;
6143
6144 if (!intel_dig_port->dp.can_mst)
6145 continue;
6146
6147 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6148 if (ret != 0) {
6149 intel_dp_check_mst_status(&intel_dig_port->dp);
6150 }
6151 }
6152 }
6153}