drm/i915/bxt: edp1.4 Intermediate Freq support
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf
CML
50struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5
CML
69static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15 97 324000, 432000, 540000 };
fe51bfb9
VS
98static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
f4896f15 101static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 102
cfcb0fc9
JB
103/**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110static bool is_edp(struct intel_dp *intel_dp)
111{
da63a9f2
PZ
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
115}
116
68b4d824 117static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 118{
68b4d824
ID
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
122}
123
df0e9248
CW
124static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125{
fa90ecef 126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
127}
128
ea5b213a 129static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 130static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 131static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 132static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
133static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
a4fc5ed6 135
ed4e9c1d
VS
136static int
137intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 138{
7183dc29 139 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 case DP_LINK_BW_2_7:
1db10e28 144 case DP_LINK_BW_5_4:
d4eead50 145 break;
a4fc5ed6 146 default:
d4eead50
ID
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
a4fc5ed6
KP
149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153}
154
eeb6324d
PZ
155static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156{
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169}
170
cd9dde44
AJ
171/*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
a4fc5ed6 188static int
c898261c 189intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 190{
cd9dde44 191 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
192}
193
fe27d53e
DA
194static int
195intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196{
197 return (max_link_clock * max_lanes * 8) / 10;
198}
199
c19de8eb 200static enum drm_mode_status
a4fc5ed6
KP
201intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203{
df0e9248 204 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 209
dd06f90e
JN
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
212 return MODE_PANEL;
213
dd06f90e 214 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 215 return MODE_PANEL;
03afc4a2
DV
216
217 target_clock = fixed_mode->clock;
7de56f43
ZY
218 }
219
50fec21a 220 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 221 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
c4867936 227 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
0af78a2b
DV
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
a4fc5ed6
KP
235 return MODE_OK;
236}
237
a4f1289e 238uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
239{
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248}
249
c2af70e2 250static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
251{
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257}
258
fb0f8fbf
KP
259/* hrawclock is 1/4 the FSB frequency */
260static int
261intel_hrawclk(struct drm_device *dev)
262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
9473c8f4
VP
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
fb0f8fbf
KP
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291}
292
bf13e81b
JN
293static void
294intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 295 struct intel_dp *intel_dp);
bf13e81b
JN
296static void
297intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 298 struct intel_dp *intel_dp);
bf13e81b 299
773538e8
VS
300static void pps_lock(struct intel_dp *intel_dp)
301{
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316}
317
318static void pps_unlock(struct intel_dp *intel_dp)
319{
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330}
331
961a0db0
VS
332static void
333vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334{
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 339 bool pll_enabled;
961a0db0
VS
340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
d288f65f
VS
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
961a0db0
VS
373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
961a0db0
VS
390}
391
bf13e81b
JN
392static enum pipe
393vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394{
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 400 enum pipe pipe;
bf13e81b 401
e39b999a 402 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 403
a8c3344e
VS
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
a4a5d2f8
VS
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
409
410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
a8c3344e
VS
432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
a4a5d2f8 435
a8c3344e
VS
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
36b5f425
VS
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 446
961a0db0
VS
447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
452
453 return intel_dp->pps_pipe;
454}
455
6491ab27
VS
456typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461{
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463}
464
465static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467{
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469}
470
471static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473{
474 return true;
475}
bf13e81b 476
a4a5d2f8 477static enum pipe
6491ab27
VS
478vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
a4a5d2f8
VS
481{
482 enum pipe pipe;
bf13e81b 483
bf13e81b
JN
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
6491ab27
VS
491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
a4a5d2f8 494 return pipe;
bf13e81b
JN
495 }
496
a4a5d2f8
VS
497 return INVALID_PIPE;
498}
499
500static void
501vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502{
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
6491ab27
VS
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
a4a5d2f8
VS
522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
bf13e81b
JN
528 }
529
a4a5d2f8
VS
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
36b5f425
VS
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
535}
536
773538e8
VS
537void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538{
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
bf13e81b
JN
564}
565
566static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567{
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
570 if (HAS_PCH_SPLIT(dev))
571 return PCH_PP_CONTROL;
572 else
573 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
574}
575
576static u32 _pp_stat_reg(struct intel_dp *intel_dp)
577{
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
579
580 if (HAS_PCH_SPLIT(dev))
581 return PCH_PP_STATUS;
582 else
583 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
584}
585
01527b31
CT
586/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
587 This function only applicable when panel PM state is not to be tracked */
588static int edp_notify_handler(struct notifier_block *this, unsigned long code,
589 void *unused)
590{
591 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
592 edp_notifier);
593 struct drm_device *dev = intel_dp_to_dev(intel_dp);
594 struct drm_i915_private *dev_priv = dev->dev_private;
595 u32 pp_div;
596 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
597
598 if (!is_edp(intel_dp) || code != SYS_RESTART)
599 return 0;
600
773538e8 601 pps_lock(intel_dp);
e39b999a 602
01527b31 603 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
604 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
605
01527b31
CT
606 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
607 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
608 pp_div = I915_READ(pp_div_reg);
609 pp_div &= PP_REFERENCE_DIVIDER_MASK;
610
611 /* 0x1F write to PP_DIV_REG sets max cycle delay */
612 I915_WRITE(pp_div_reg, pp_div | 0x1F);
613 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
614 msleep(intel_dp->panel_power_cycle_delay);
615 }
616
773538e8 617 pps_unlock(intel_dp);
e39b999a 618
01527b31
CT
619 return 0;
620}
621
4be73780 622static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 623{
30add22d 624 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
625 struct drm_i915_private *dev_priv = dev->dev_private;
626
e39b999a
VS
627 lockdep_assert_held(&dev_priv->pps_mutex);
628
9a42356b
VS
629 if (IS_VALLEYVIEW(dev) &&
630 intel_dp->pps_pipe == INVALID_PIPE)
631 return false;
632
bf13e81b 633 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
634}
635
4be73780 636static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 637{
30add22d 638 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
639 struct drm_i915_private *dev_priv = dev->dev_private;
640
e39b999a
VS
641 lockdep_assert_held(&dev_priv->pps_mutex);
642
9a42356b
VS
643 if (IS_VALLEYVIEW(dev) &&
644 intel_dp->pps_pipe == INVALID_PIPE)
645 return false;
646
773538e8 647 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
648}
649
9b984dae
KP
650static void
651intel_dp_check_edp(struct intel_dp *intel_dp)
652{
30add22d 653 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 654 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 655
9b984dae
KP
656 if (!is_edp(intel_dp))
657 return;
453c5420 658
4be73780 659 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
660 WARN(1, "eDP powered off while attempting aux channel communication.\n");
661 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
662 I915_READ(_pp_stat_reg(intel_dp)),
663 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
664 }
665}
666
9ee32fea
DV
667static uint32_t
668intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
669{
670 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
671 struct drm_device *dev = intel_dig_port->base.base.dev;
672 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 673 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
674 uint32_t status;
675 bool done;
676
ef04f00d 677#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 678 if (has_aux_irq)
b18ac466 679 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 680 msecs_to_jiffies_timeout(10));
9ee32fea
DV
681 else
682 done = wait_for_atomic(C, 10) == 0;
683 if (!done)
684 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
685 has_aux_irq);
686#undef C
687
688 return status;
689}
690
ec5b01dd 691static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 692{
174edf1f
PZ
693 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
694 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 695
ec5b01dd
DL
696 /*
697 * The clock divider is based off the hrawclk, and would like to run at
698 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 699 */
ec5b01dd
DL
700 return index ? 0 : intel_hrawclk(dev) / 2;
701}
702
703static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704{
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 707 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
708
709 if (index)
710 return 0;
711
712 if (intel_dig_port->port == PORT_A) {
469d4b2a 713 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
ec5b01dd
DL
714 } else {
715 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
716 }
717}
718
719static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
720{
721 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
722 struct drm_device *dev = intel_dig_port->base.base.dev;
723 struct drm_i915_private *dev_priv = dev->dev_private;
724
725 if (intel_dig_port->port == PORT_A) {
726 if (index)
727 return 0;
1652d19e 728 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
2c55c336
JN
729 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
730 /* Workaround for non-ULT HSW */
bc86625a
CW
731 switch (index) {
732 case 0: return 63;
733 case 1: return 72;
734 default: return 0;
735 }
ec5b01dd 736 } else {
bc86625a 737 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 738 }
b84a1cf8
RV
739}
740
ec5b01dd
DL
741static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
742{
743 return index ? 0 : 100;
744}
745
b6b5e383
DL
746static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747{
748 /*
749 * SKL doesn't need us to program the AUX clock divider (Hardware will
750 * derive the clock from CDCLK automatically). We still implement the
751 * get_aux_clock_divider vfunc to plug-in into the existing code.
752 */
753 return index ? 0 : 1;
754}
755
5ed12a19
DL
756static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
757 bool has_aux_irq,
758 int send_bytes,
759 uint32_t aux_clock_divider)
760{
761 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
762 struct drm_device *dev = intel_dig_port->base.base.dev;
763 uint32_t precharge, timeout;
764
765 if (IS_GEN6(dev))
766 precharge = 3;
767 else
768 precharge = 5;
769
770 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
771 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
772 else
773 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
774
775 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 776 DP_AUX_CH_CTL_DONE |
5ed12a19 777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 779 timeout |
788d4433 780 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 783 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
784}
785
b9ca5fad
DL
786static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
787 bool has_aux_irq,
788 int send_bytes,
789 uint32_t unused)
790{
791 return DP_AUX_CH_CTL_SEND_BUSY |
792 DP_AUX_CH_CTL_DONE |
793 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
794 DP_AUX_CH_CTL_TIME_OUT_ERROR |
795 DP_AUX_CH_CTL_TIME_OUT_1600us |
796 DP_AUX_CH_CTL_RECEIVE_ERROR |
797 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
798 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
799}
800
b84a1cf8
RV
801static int
802intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 803 const uint8_t *send, int send_bytes,
b84a1cf8
RV
804 uint8_t *recv, int recv_size)
805{
806 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
807 struct drm_device *dev = intel_dig_port->base.base.dev;
808 struct drm_i915_private *dev_priv = dev->dev_private;
809 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
810 uint32_t ch_data = ch_ctl + 4;
bc86625a 811 uint32_t aux_clock_divider;
b84a1cf8
RV
812 int i, ret, recv_bytes;
813 uint32_t status;
5ed12a19 814 int try, clock = 0;
4e6b788c 815 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
816 bool vdd;
817
773538e8 818 pps_lock(intel_dp);
e39b999a 819
72c3500a
VS
820 /*
821 * We will be called with VDD already enabled for dpcd/edid/oui reads.
822 * In such cases we want to leave VDD enabled and it's up to upper layers
823 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
824 * ourselves.
825 */
1e0560e0 826 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
827
828 /* dp aux is extremely sensitive to irq latency, hence request the
829 * lowest possible wakeup latency and so prevent the cpu from going into
830 * deep sleep states.
831 */
832 pm_qos_update_request(&dev_priv->pm_qos, 0);
833
834 intel_dp_check_edp(intel_dp);
5eb08b69 835
c67a470b
PZ
836 intel_aux_display_runtime_get(dev_priv);
837
11bee43e
JB
838 /* Try to wait for any previous AUX channel activity */
839 for (try = 0; try < 3; try++) {
ef04f00d 840 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
841 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
842 break;
843 msleep(1);
844 }
845
846 if (try == 3) {
847 WARN(1, "dp_aux_ch not started status 0x%08x\n",
848 I915_READ(ch_ctl));
9ee32fea
DV
849 ret = -EBUSY;
850 goto out;
4f7f7b7e
CW
851 }
852
46a5ae9f
PZ
853 /* Only 5 data registers! */
854 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
855 ret = -E2BIG;
856 goto out;
857 }
858
ec5b01dd 859 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
860 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
861 has_aux_irq,
862 send_bytes,
863 aux_clock_divider);
5ed12a19 864
bc86625a
CW
865 /* Must try at least 3 times according to DP spec */
866 for (try = 0; try < 5; try++) {
867 /* Load the send data into the aux channel data registers */
868 for (i = 0; i < send_bytes; i += 4)
869 I915_WRITE(ch_data + i,
a4f1289e
RV
870 intel_dp_pack_aux(send + i,
871 send_bytes - i));
bc86625a
CW
872
873 /* Send the command and wait for it to complete */
5ed12a19 874 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
875
876 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
877
878 /* Clear done status and any errors */
879 I915_WRITE(ch_ctl,
880 status |
881 DP_AUX_CH_CTL_DONE |
882 DP_AUX_CH_CTL_TIME_OUT_ERROR |
883 DP_AUX_CH_CTL_RECEIVE_ERROR);
884
74ebf294 885 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 886 continue;
74ebf294
TP
887
888 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
889 * 400us delay required for errors and timeouts
890 * Timeout errors from the HW already meet this
891 * requirement so skip to next iteration
892 */
893 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
894 usleep_range(400, 500);
bc86625a 895 continue;
74ebf294 896 }
bc86625a
CW
897 if (status & DP_AUX_CH_CTL_DONE)
898 break;
899 }
4f7f7b7e 900 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
901 break;
902 }
903
a4fc5ed6 904 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 905 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
906 ret = -EBUSY;
907 goto out;
a4fc5ed6
KP
908 }
909
910 /* Check for timeout or receive error.
911 * Timeouts occur when the sink is not connected
912 */
a5b3da54 913 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 914 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
915 ret = -EIO;
916 goto out;
a5b3da54 917 }
1ae8c0a5
KP
918
919 /* Timeouts occur when the device isn't connected, so they're
920 * "normal" -- don't fill the kernel log with these */
a5b3da54 921 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 922 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
923 ret = -ETIMEDOUT;
924 goto out;
a4fc5ed6
KP
925 }
926
927 /* Unload any bytes sent back from the other side */
928 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
929 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
930 if (recv_bytes > recv_size)
931 recv_bytes = recv_size;
0206e353 932
4f7f7b7e 933 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
934 intel_dp_unpack_aux(I915_READ(ch_data + i),
935 recv + i, recv_bytes - i);
a4fc5ed6 936
9ee32fea
DV
937 ret = recv_bytes;
938out:
939 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 940 intel_aux_display_runtime_put(dev_priv);
9ee32fea 941
884f19e9
JN
942 if (vdd)
943 edp_panel_vdd_off(intel_dp, false);
944
773538e8 945 pps_unlock(intel_dp);
e39b999a 946
9ee32fea 947 return ret;
a4fc5ed6
KP
948}
949
a6c8aff0
JN
950#define BARE_ADDRESS_SIZE 3
951#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
952static ssize_t
953intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 954{
9d1a1031
JN
955 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
956 uint8_t txbuf[20], rxbuf[20];
957 size_t txsize, rxsize;
a4fc5ed6 958 int ret;
a4fc5ed6 959
d2d9cbbd
VS
960 txbuf[0] = (msg->request << 4) |
961 ((msg->address >> 16) & 0xf);
962 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
963 txbuf[2] = msg->address & 0xff;
964 txbuf[3] = msg->size - 1;
46a5ae9f 965
9d1a1031
JN
966 switch (msg->request & ~DP_AUX_I2C_MOT) {
967 case DP_AUX_NATIVE_WRITE:
968 case DP_AUX_I2C_WRITE:
a6c8aff0 969 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 970 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 971
9d1a1031
JN
972 if (WARN_ON(txsize > 20))
973 return -E2BIG;
a4fc5ed6 974
9d1a1031 975 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 976
9d1a1031
JN
977 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
978 if (ret > 0) {
979 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 980
a1ddefd8
JN
981 if (ret > 1) {
982 /* Number of bytes written in a short write. */
983 ret = clamp_t(int, rxbuf[1], 0, msg->size);
984 } else {
985 /* Return payload size. */
986 ret = msg->size;
987 }
9d1a1031
JN
988 }
989 break;
46a5ae9f 990
9d1a1031
JN
991 case DP_AUX_NATIVE_READ:
992 case DP_AUX_I2C_READ:
a6c8aff0 993 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 994 rxsize = msg->size + 1;
a4fc5ed6 995
9d1a1031
JN
996 if (WARN_ON(rxsize > 20))
997 return -E2BIG;
a4fc5ed6 998
9d1a1031
JN
999 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1000 if (ret > 0) {
1001 msg->reply = rxbuf[0] >> 4;
1002 /*
1003 * Assume happy day, and copy the data. The caller is
1004 * expected to check msg->reply before touching it.
1005 *
1006 * Return payload size.
1007 */
1008 ret--;
1009 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1010 }
9d1a1031
JN
1011 break;
1012
1013 default:
1014 ret = -EINVAL;
1015 break;
a4fc5ed6 1016 }
f51a44b9 1017
9d1a1031 1018 return ret;
a4fc5ed6
KP
1019}
1020
9d1a1031
JN
1021static void
1022intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1023{
1024 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1025 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1026 enum port port = intel_dig_port->port;
0b99836f 1027 const char *name = NULL;
ab2c0672
DA
1028 int ret;
1029
33ad6626
JN
1030 switch (port) {
1031 case PORT_A:
1032 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1033 name = "DPDDC-A";
ab2c0672 1034 break;
33ad6626
JN
1035 case PORT_B:
1036 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1037 name = "DPDDC-B";
ab2c0672 1038 break;
33ad6626
JN
1039 case PORT_C:
1040 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1041 name = "DPDDC-C";
ab2c0672 1042 break;
33ad6626
JN
1043 case PORT_D:
1044 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1045 name = "DPDDC-D";
33ad6626
JN
1046 break;
1047 default:
1048 BUG();
ab2c0672
DA
1049 }
1050
1b1aad75
DL
1051 /*
1052 * The AUX_CTL register is usually DP_CTL + 0x10.
1053 *
1054 * On Haswell and Broadwell though:
1055 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1056 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1057 *
1058 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1059 */
1060 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1061 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1062
0b99836f 1063 intel_dp->aux.name = name;
9d1a1031
JN
1064 intel_dp->aux.dev = dev->dev;
1065 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1066
0b99836f
JN
1067 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1068 connector->base.kdev->kobj.name);
8316f337 1069
4f71d0cb 1070 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1071 if (ret < 0) {
4f71d0cb 1072 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1073 name, ret);
1074 return;
ab2c0672 1075 }
8a5e6aeb 1076
0b99836f
JN
1077 ret = sysfs_create_link(&connector->base.kdev->kobj,
1078 &intel_dp->aux.ddc.dev.kobj,
1079 intel_dp->aux.ddc.dev.kobj.name);
1080 if (ret < 0) {
1081 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1082 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1083 }
a4fc5ed6
KP
1084}
1085
80f65de3
ID
1086static void
1087intel_dp_connector_unregister(struct intel_connector *intel_connector)
1088{
1089 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1090
0e32b39c
DA
1091 if (!intel_connector->mst_port)
1092 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1093 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1094 intel_connector_unregister(intel_connector);
1095}
1096
5416d871 1097static void
c3346ef6 1098skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1099{
1100 u32 ctrl1;
1101
dd3cd74a
ACO
1102 memset(&pipe_config->dpll_hw_state, 0,
1103 sizeof(pipe_config->dpll_hw_state));
1104
5416d871
DL
1105 pipe_config->ddi_pll_sel = SKL_DPLL0;
1106 pipe_config->dpll_hw_state.cfgcr1 = 0;
1107 pipe_config->dpll_hw_state.cfgcr2 = 0;
1108
1109 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1110 switch (link_clock / 2) {
1111 case 81000:
71cd8423 1112 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1113 SKL_DPLL0);
1114 break;
c3346ef6 1115 case 135000:
71cd8423 1116 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1117 SKL_DPLL0);
1118 break;
c3346ef6 1119 case 270000:
71cd8423 1120 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1121 SKL_DPLL0);
1122 break;
c3346ef6 1123 case 162000:
71cd8423 1124 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1125 SKL_DPLL0);
1126 break;
1127 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1128 results in CDCLK change. Need to handle the change of CDCLK by
1129 disabling pipes and re-enabling them */
1130 case 108000:
71cd8423 1131 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1132 SKL_DPLL0);
1133 break;
1134 case 216000:
71cd8423 1135 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1136 SKL_DPLL0);
1137 break;
1138
5416d871
DL
1139 }
1140 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1141}
1142
0e50338c 1143static void
5cec258b 1144hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1145{
1146 switch (link_bw) {
1147 case DP_LINK_BW_1_62:
1148 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1149 break;
1150 case DP_LINK_BW_2_7:
1151 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1152 break;
1153 case DP_LINK_BW_5_4:
1154 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1155 break;
1156 }
1157}
1158
fc0f8e25 1159static int
12f6a2e2 1160intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1161{
94ca719e
VS
1162 if (intel_dp->num_sink_rates) {
1163 *sink_rates = intel_dp->sink_rates;
1164 return intel_dp->num_sink_rates;
fc0f8e25 1165 }
12f6a2e2
VS
1166
1167 *sink_rates = default_rates;
1168
1169 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1170}
1171
a8f3ef61 1172static int
1db10e28 1173intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1174{
64987fc5
SJ
1175 if (IS_BROXTON(dev)) {
1176 *source_rates = bxt_rates;
1177 return ARRAY_SIZE(bxt_rates);
1178 } else if (IS_SKYLAKE(dev)) {
637a9c63
SJ
1179 *source_rates = skl_rates;
1180 return ARRAY_SIZE(skl_rates);
fe51bfb9
VS
1181 } else if (IS_CHERRYVIEW(dev)) {
1182 *source_rates = chv_rates;
1183 return ARRAY_SIZE(chv_rates);
a8f3ef61 1184 }
636280ba
VS
1185
1186 *source_rates = default_rates;
1187
1db10e28
VS
1188 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1189 /* WaDisableHBR2:skl */
1190 return (DP_LINK_BW_2_7 >> 3) + 1;
1191 else if (INTEL_INFO(dev)->gen >= 8 ||
1192 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1193 return (DP_LINK_BW_5_4 >> 3) + 1;
1194 else
1195 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1196}
1197
c6bb3538
DV
1198static void
1199intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1200 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1201{
1202 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1203 const struct dp_link_dpll *divisor = NULL;
1204 int i, count = 0;
c6bb3538
DV
1205
1206 if (IS_G4X(dev)) {
9dd4ffdf
CML
1207 divisor = gen4_dpll;
1208 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1209 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1210 divisor = pch_dpll;
1211 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1212 } else if (IS_CHERRYVIEW(dev)) {
1213 divisor = chv_dpll;
1214 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1215 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1216 divisor = vlv_dpll;
1217 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1218 }
9dd4ffdf
CML
1219
1220 if (divisor && count) {
1221 for (i = 0; i < count; i++) {
1222 if (link_bw == divisor[i].link_bw) {
1223 pipe_config->dpll = divisor[i].dpll;
1224 pipe_config->clock_set = true;
1225 break;
1226 }
1227 }
c6bb3538
DV
1228 }
1229}
1230
2ecae76a
VS
1231static int intersect_rates(const int *source_rates, int source_len,
1232 const int *sink_rates, int sink_len,
94ca719e 1233 int *common_rates)
a8f3ef61
SJ
1234{
1235 int i = 0, j = 0, k = 0;
1236
a8f3ef61
SJ
1237 while (i < source_len && j < sink_len) {
1238 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1239 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1240 return k;
94ca719e 1241 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1242 ++k;
1243 ++i;
1244 ++j;
1245 } else if (source_rates[i] < sink_rates[j]) {
1246 ++i;
1247 } else {
1248 ++j;
1249 }
1250 }
1251 return k;
1252}
1253
94ca719e
VS
1254static int intel_dp_common_rates(struct intel_dp *intel_dp,
1255 int *common_rates)
2ecae76a
VS
1256{
1257 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1258 const int *source_rates, *sink_rates;
1259 int source_len, sink_len;
1260
1261 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1262 source_len = intel_dp_source_rates(dev, &source_rates);
1263
1264 return intersect_rates(source_rates, source_len,
1265 sink_rates, sink_len,
94ca719e 1266 common_rates);
2ecae76a
VS
1267}
1268
0336400e
VS
1269static void snprintf_int_array(char *str, size_t len,
1270 const int *array, int nelem)
1271{
1272 int i;
1273
1274 str[0] = '\0';
1275
1276 for (i = 0; i < nelem; i++) {
b2f505be 1277 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1278 if (r >= len)
1279 return;
1280 str += r;
1281 len -= r;
1282 }
1283}
1284
1285static void intel_dp_print_rates(struct intel_dp *intel_dp)
1286{
1287 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1288 const int *source_rates, *sink_rates;
94ca719e
VS
1289 int source_len, sink_len, common_len;
1290 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1291 char str[128]; /* FIXME: too big for stack? */
1292
1293 if ((drm_debug & DRM_UT_KMS) == 0)
1294 return;
1295
1296 source_len = intel_dp_source_rates(dev, &source_rates);
1297 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1298 DRM_DEBUG_KMS("source rates: %s\n", str);
1299
1300 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1301 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1302 DRM_DEBUG_KMS("sink rates: %s\n", str);
1303
94ca719e
VS
1304 common_len = intel_dp_common_rates(intel_dp, common_rates);
1305 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1306 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1307}
1308
f4896f15 1309static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1310{
1311 int i = 0;
1312
1313 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1314 if (find == rates[i])
1315 break;
1316
1317 return i;
1318}
1319
50fec21a
VS
1320int
1321intel_dp_max_link_rate(struct intel_dp *intel_dp)
1322{
1323 int rates[DP_MAX_SUPPORTED_RATES] = {};
1324 int len;
1325
94ca719e 1326 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1327 if (WARN_ON(len <= 0))
1328 return 162000;
1329
1330 return rates[rate_to_index(0, rates) - 1];
1331}
1332
ed4e9c1d
VS
1333int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1334{
94ca719e 1335 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1336}
1337
00c09d70 1338bool
5bfe2ac0 1339intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1340 struct intel_crtc_state *pipe_config)
a4fc5ed6 1341{
5bfe2ac0 1342 struct drm_device *dev = encoder->base.dev;
36008365 1343 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1344 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1345 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1346 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1347 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1348 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1349 int lane_count, clock;
56071a20 1350 int min_lane_count = 1;
eeb6324d 1351 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1352 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1353 int min_clock = 0;
a8f3ef61 1354 int max_clock;
083f9560 1355 int bpp, mode_rate;
ff9a6750 1356 int link_avail, link_clock;
94ca719e
VS
1357 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1358 int common_len;
a8f3ef61 1359
94ca719e 1360 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1361
1362 /* No common link rates between source and sink */
94ca719e 1363 WARN_ON(common_len <= 0);
a8f3ef61 1364
94ca719e 1365 max_clock = common_len - 1;
a4fc5ed6 1366
bc7d38a4 1367 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1368 pipe_config->has_pch_encoder = true;
1369
03afc4a2 1370 pipe_config->has_dp_encoder = true;
f769cd24 1371 pipe_config->has_drrs = false;
9fcb1704 1372 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1373
dd06f90e
JN
1374 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1375 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1376 adjusted_mode);
a1b2278e
CK
1377
1378 if (INTEL_INFO(dev)->gen >= 9) {
1379 int ret;
1380 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1381 if (ret)
1382 return ret;
1383 }
1384
2dd24552
JB
1385 if (!HAS_PCH_SPLIT(dev))
1386 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1387 intel_connector->panel.fitting_mode);
1388 else
b074cec8
JB
1389 intel_pch_panel_fitting(intel_crtc, pipe_config,
1390 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1391 }
1392
cb1793ce 1393 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1394 return false;
1395
083f9560 1396 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1397 "max bw %d pixel clock %iKHz\n",
94ca719e 1398 max_lane_count, common_rates[max_clock],
241bfc38 1399 adjusted_mode->crtc_clock);
083f9560 1400
36008365
DV
1401 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1402 * bpc in between. */
3e7ca985 1403 bpp = pipe_config->pipe_bpp;
56071a20
JN
1404 if (is_edp(intel_dp)) {
1405 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1406 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1407 dev_priv->vbt.edp_bpp);
1408 bpp = dev_priv->vbt.edp_bpp;
1409 }
1410
344c5bbc
JN
1411 /*
1412 * Use the maximum clock and number of lanes the eDP panel
1413 * advertizes being capable of. The panels are generally
1414 * designed to support only a single clock and lane
1415 * configuration, and typically these values correspond to the
1416 * native resolution of the panel.
1417 */
1418 min_lane_count = max_lane_count;
1419 min_clock = max_clock;
7984211e 1420 }
657445fe 1421
36008365 1422 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1423 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1424 bpp);
36008365 1425
c6930992 1426 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1427 for (lane_count = min_lane_count;
1428 lane_count <= max_lane_count;
1429 lane_count <<= 1) {
1430
94ca719e 1431 link_clock = common_rates[clock];
36008365
DV
1432 link_avail = intel_dp_max_data_rate(link_clock,
1433 lane_count);
1434
1435 if (mode_rate <= link_avail) {
1436 goto found;
1437 }
1438 }
1439 }
1440 }
c4867936 1441
36008365 1442 return false;
3685a8f3 1443
36008365 1444found:
55bc60db
VS
1445 if (intel_dp->color_range_auto) {
1446 /*
1447 * See:
1448 * CEA-861-E - 5.1 Default Encoding Parameters
1449 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1450 */
18316c8c 1451 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1452 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1453 else
1454 intel_dp->color_range = 0;
1455 }
1456
3685a8f3 1457 if (intel_dp->color_range)
50f3b016 1458 pipe_config->limited_color_range = true;
a4fc5ed6 1459
36008365 1460 intel_dp->lane_count = lane_count;
a8f3ef61 1461
94ca719e 1462 if (intel_dp->num_sink_rates) {
bc27b7d3 1463 intel_dp->link_bw = 0;
a8f3ef61 1464 intel_dp->rate_select =
94ca719e 1465 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1466 } else {
1467 intel_dp->link_bw =
94ca719e 1468 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1469 intel_dp->rate_select = 0;
a8f3ef61
SJ
1470 }
1471
657445fe 1472 pipe_config->pipe_bpp = bpp;
94ca719e 1473 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1474
36008365
DV
1475 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1476 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1477 pipe_config->port_clock, bpp);
36008365
DV
1478 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1479 mode_rate, link_avail);
a4fc5ed6 1480
03afc4a2 1481 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1482 adjusted_mode->crtc_clock,
1483 pipe_config->port_clock,
03afc4a2 1484 &pipe_config->dp_m_n);
9d1a455b 1485
439d7ac0 1486 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1487 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1488 pipe_config->has_drrs = true;
439d7ac0
PB
1489 intel_link_compute_m_n(bpp, lane_count,
1490 intel_connector->panel.downclock_mode->clock,
1491 pipe_config->port_clock,
1492 &pipe_config->dp_m2_n2);
1493 }
1494
5416d871 1495 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1496 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
977bb38d
S
1497 else if (IS_BROXTON(dev))
1498 /* handled in ddi */;
5416d871 1499 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1500 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1501 else
1502 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1503
03afc4a2 1504 return true;
a4fc5ed6
KP
1505}
1506
7c62a164 1507static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1508{
7c62a164
DV
1509 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1510 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1511 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1513 u32 dpa_ctl;
1514
6e3c9717
ACO
1515 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1516 crtc->config->port_clock);
ea9b6006
DV
1517 dpa_ctl = I915_READ(DP_A);
1518 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1519
6e3c9717 1520 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1521 /* For a long time we've carried around a ILK-DevA w/a for the
1522 * 160MHz clock. If we're really unlucky, it's still required.
1523 */
1524 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1525 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1526 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1527 } else {
1528 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1529 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1530 }
1ce17038 1531
ea9b6006
DV
1532 I915_WRITE(DP_A, dpa_ctl);
1533
1534 POSTING_READ(DP_A);
1535 udelay(500);
1536}
1537
8ac33ed3 1538static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1539{
b934223d 1540 struct drm_device *dev = encoder->base.dev;
417e822d 1541 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1542 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1543 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1544 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1545 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1546
417e822d 1547 /*
1a2eb460 1548 * There are four kinds of DP registers:
417e822d
KP
1549 *
1550 * IBX PCH
1a2eb460
KP
1551 * SNB CPU
1552 * IVB CPU
417e822d
KP
1553 * CPT PCH
1554 *
1555 * IBX PCH and CPU are the same for almost everything,
1556 * except that the CPU DP PLL is configured in this
1557 * register
1558 *
1559 * CPT PCH is quite different, having many bits moved
1560 * to the TRANS_DP_CTL register instead. That
1561 * configuration happens (oddly) in ironlake_pch_enable
1562 */
9c9e7927 1563
417e822d
KP
1564 /* Preserve the BIOS-computed detected bit. This is
1565 * supposed to be read-only.
1566 */
1567 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1568
417e822d 1569 /* Handle DP bits in common between all three register formats */
417e822d 1570 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1571 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1572
6e3c9717 1573 if (crtc->config->has_audio)
ea5b213a 1574 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1575
417e822d 1576 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1577
39e5fa88 1578 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1579 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1580 intel_dp->DP |= DP_SYNC_HS_HIGH;
1581 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1582 intel_dp->DP |= DP_SYNC_VS_HIGH;
1583 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1584
6aba5b6c 1585 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1586 intel_dp->DP |= DP_ENHANCED_FRAMING;
1587
7c62a164 1588 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1589 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1590 u32 trans_dp;
1591
39e5fa88 1592 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1593
1594 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1595 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1596 trans_dp |= TRANS_DP_ENH_FRAMING;
1597 else
1598 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1599 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1600 } else {
b2634017 1601 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1602 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1603
1604 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1605 intel_dp->DP |= DP_SYNC_HS_HIGH;
1606 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1607 intel_dp->DP |= DP_SYNC_VS_HIGH;
1608 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1609
6aba5b6c 1610 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1611 intel_dp->DP |= DP_ENHANCED_FRAMING;
1612
39e5fa88 1613 if (IS_CHERRYVIEW(dev))
44f37d1f 1614 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1615 else if (crtc->pipe == PIPE_B)
1616 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1617 }
a4fc5ed6
KP
1618}
1619
ffd6749d
PZ
1620#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1621#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1622
1a5ef5b7
PZ
1623#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1624#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1625
ffd6749d
PZ
1626#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1627#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1628
4be73780 1629static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1630 u32 mask,
1631 u32 value)
bd943159 1632{
30add22d 1633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1634 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1635 u32 pp_stat_reg, pp_ctrl_reg;
1636
e39b999a
VS
1637 lockdep_assert_held(&dev_priv->pps_mutex);
1638
bf13e81b
JN
1639 pp_stat_reg = _pp_stat_reg(intel_dp);
1640 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1641
99ea7127 1642 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1643 mask, value,
1644 I915_READ(pp_stat_reg),
1645 I915_READ(pp_ctrl_reg));
32ce697c 1646
453c5420 1647 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1648 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1649 I915_READ(pp_stat_reg),
1650 I915_READ(pp_ctrl_reg));
32ce697c 1651 }
54c136d4
CW
1652
1653 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1654}
32ce697c 1655
4be73780 1656static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1657{
1658 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1659 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1660}
1661
4be73780 1662static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1663{
1664 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1665 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1666}
1667
4be73780 1668static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1669{
1670 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1671
1672 /* When we disable the VDD override bit last we have to do the manual
1673 * wait. */
1674 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1675 intel_dp->panel_power_cycle_delay);
1676
4be73780 1677 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1678}
1679
4be73780 1680static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1681{
1682 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1683 intel_dp->backlight_on_delay);
1684}
1685
4be73780 1686static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1687{
1688 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1689 intel_dp->backlight_off_delay);
1690}
99ea7127 1691
832dd3c1
KP
1692/* Read the current pp_control value, unlocking the register if it
1693 * is locked
1694 */
1695
453c5420 1696static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1697{
453c5420
JB
1698 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1699 struct drm_i915_private *dev_priv = dev->dev_private;
1700 u32 control;
832dd3c1 1701
e39b999a
VS
1702 lockdep_assert_held(&dev_priv->pps_mutex);
1703
bf13e81b 1704 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1705 control &= ~PANEL_UNLOCK_MASK;
1706 control |= PANEL_UNLOCK_REGS;
1707 return control;
bd943159
KP
1708}
1709
951468f3
VS
1710/*
1711 * Must be paired with edp_panel_vdd_off().
1712 * Must hold pps_mutex around the whole on/off sequence.
1713 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1714 */
1e0560e0 1715static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1716{
30add22d 1717 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1718 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1719 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1720 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1721 enum intel_display_power_domain power_domain;
5d613501 1722 u32 pp;
453c5420 1723 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1724 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1725
e39b999a
VS
1726 lockdep_assert_held(&dev_priv->pps_mutex);
1727
97af61f5 1728 if (!is_edp(intel_dp))
adddaaf4 1729 return false;
bd943159 1730
2c623c11 1731 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1732 intel_dp->want_panel_vdd = true;
99ea7127 1733
4be73780 1734 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1735 return need_to_disable;
b0665d57 1736
4e6e1a54
ID
1737 power_domain = intel_display_port_power_domain(intel_encoder);
1738 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1739
3936fcf4
VS
1740 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1741 port_name(intel_dig_port->port));
bd943159 1742
4be73780
DV
1743 if (!edp_have_panel_power(intel_dp))
1744 wait_panel_power_cycle(intel_dp);
99ea7127 1745
453c5420 1746 pp = ironlake_get_pp_control(intel_dp);
5d613501 1747 pp |= EDP_FORCE_VDD;
ebf33b18 1748
bf13e81b
JN
1749 pp_stat_reg = _pp_stat_reg(intel_dp);
1750 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1751
1752 I915_WRITE(pp_ctrl_reg, pp);
1753 POSTING_READ(pp_ctrl_reg);
1754 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1755 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1756 /*
1757 * If the panel wasn't on, delay before accessing aux channel
1758 */
4be73780 1759 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1760 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1761 port_name(intel_dig_port->port));
f01eca2e 1762 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1763 }
adddaaf4
JN
1764
1765 return need_to_disable;
1766}
1767
951468f3
VS
1768/*
1769 * Must be paired with intel_edp_panel_vdd_off() or
1770 * intel_edp_panel_off().
1771 * Nested calls to these functions are not allowed since
1772 * we drop the lock. Caller must use some higher level
1773 * locking to prevent nested calls from other threads.
1774 */
b80d6c78 1775void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1776{
c695b6b6 1777 bool vdd;
adddaaf4 1778
c695b6b6
VS
1779 if (!is_edp(intel_dp))
1780 return;
1781
773538e8 1782 pps_lock(intel_dp);
c695b6b6 1783 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1784 pps_unlock(intel_dp);
c695b6b6 1785
e2c719b7 1786 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1787 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1788}
1789
4be73780 1790static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1791{
30add22d 1792 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1793 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1794 struct intel_digital_port *intel_dig_port =
1795 dp_to_dig_port(intel_dp);
1796 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1797 enum intel_display_power_domain power_domain;
5d613501 1798 u32 pp;
453c5420 1799 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1800
e39b999a 1801 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1802
15e899a0 1803 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1804
15e899a0 1805 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1806 return;
b0665d57 1807
3936fcf4
VS
1808 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1809 port_name(intel_dig_port->port));
bd943159 1810
be2c9196
VS
1811 pp = ironlake_get_pp_control(intel_dp);
1812 pp &= ~EDP_FORCE_VDD;
453c5420 1813
be2c9196
VS
1814 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1815 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1816
be2c9196
VS
1817 I915_WRITE(pp_ctrl_reg, pp);
1818 POSTING_READ(pp_ctrl_reg);
90791a5c 1819
be2c9196
VS
1820 /* Make sure sequencer is idle before allowing subsequent activity */
1821 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1822 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1823
be2c9196
VS
1824 if ((pp & POWER_TARGET_ON) == 0)
1825 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1826
be2c9196
VS
1827 power_domain = intel_display_port_power_domain(intel_encoder);
1828 intel_display_power_put(dev_priv, power_domain);
bd943159 1829}
5d613501 1830
4be73780 1831static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1832{
1833 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1834 struct intel_dp, panel_vdd_work);
bd943159 1835
773538e8 1836 pps_lock(intel_dp);
15e899a0
VS
1837 if (!intel_dp->want_panel_vdd)
1838 edp_panel_vdd_off_sync(intel_dp);
773538e8 1839 pps_unlock(intel_dp);
bd943159
KP
1840}
1841
aba86890
ID
1842static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1843{
1844 unsigned long delay;
1845
1846 /*
1847 * Queue the timer to fire a long time from now (relative to the power
1848 * down delay) to keep the panel power up across a sequence of
1849 * operations.
1850 */
1851 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1852 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1853}
1854
951468f3
VS
1855/*
1856 * Must be paired with edp_panel_vdd_on().
1857 * Must hold pps_mutex around the whole on/off sequence.
1858 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1859 */
4be73780 1860static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1861{
e39b999a
VS
1862 struct drm_i915_private *dev_priv =
1863 intel_dp_to_dev(intel_dp)->dev_private;
1864
1865 lockdep_assert_held(&dev_priv->pps_mutex);
1866
97af61f5
KP
1867 if (!is_edp(intel_dp))
1868 return;
5d613501 1869
e2c719b7 1870 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1871 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1872
bd943159
KP
1873 intel_dp->want_panel_vdd = false;
1874
aba86890 1875 if (sync)
4be73780 1876 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1877 else
1878 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1879}
1880
9f0fb5be 1881static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1882{
30add22d 1883 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1884 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1885 u32 pp;
453c5420 1886 u32 pp_ctrl_reg;
9934c132 1887
9f0fb5be
VS
1888 lockdep_assert_held(&dev_priv->pps_mutex);
1889
97af61f5 1890 if (!is_edp(intel_dp))
bd943159 1891 return;
99ea7127 1892
3936fcf4
VS
1893 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1894 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1895
e7a89ace
VS
1896 if (WARN(edp_have_panel_power(intel_dp),
1897 "eDP port %c panel power already on\n",
1898 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1899 return;
9934c132 1900
4be73780 1901 wait_panel_power_cycle(intel_dp);
37c6c9b0 1902
bf13e81b 1903 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1904 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1905 if (IS_GEN5(dev)) {
1906 /* ILK workaround: disable reset around power sequence */
1907 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1908 I915_WRITE(pp_ctrl_reg, pp);
1909 POSTING_READ(pp_ctrl_reg);
05ce1a49 1910 }
37c6c9b0 1911
1c0ae80a 1912 pp |= POWER_TARGET_ON;
99ea7127
KP
1913 if (!IS_GEN5(dev))
1914 pp |= PANEL_POWER_RESET;
1915
453c5420
JB
1916 I915_WRITE(pp_ctrl_reg, pp);
1917 POSTING_READ(pp_ctrl_reg);
9934c132 1918
4be73780 1919 wait_panel_on(intel_dp);
dce56b3c 1920 intel_dp->last_power_on = jiffies;
9934c132 1921
05ce1a49
KP
1922 if (IS_GEN5(dev)) {
1923 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1924 I915_WRITE(pp_ctrl_reg, pp);
1925 POSTING_READ(pp_ctrl_reg);
05ce1a49 1926 }
9f0fb5be 1927}
e39b999a 1928
9f0fb5be
VS
1929void intel_edp_panel_on(struct intel_dp *intel_dp)
1930{
1931 if (!is_edp(intel_dp))
1932 return;
1933
1934 pps_lock(intel_dp);
1935 edp_panel_on(intel_dp);
773538e8 1936 pps_unlock(intel_dp);
9934c132
JB
1937}
1938
9f0fb5be
VS
1939
1940static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1941{
4e6e1a54
ID
1942 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1943 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1944 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1945 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1946 enum intel_display_power_domain power_domain;
99ea7127 1947 u32 pp;
453c5420 1948 u32 pp_ctrl_reg;
9934c132 1949
9f0fb5be
VS
1950 lockdep_assert_held(&dev_priv->pps_mutex);
1951
97af61f5
KP
1952 if (!is_edp(intel_dp))
1953 return;
37c6c9b0 1954
3936fcf4
VS
1955 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1956 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1957
3936fcf4
VS
1958 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1959 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1960
453c5420 1961 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1962 /* We need to switch off panel power _and_ force vdd, for otherwise some
1963 * panels get very unhappy and cease to work. */
b3064154
PJ
1964 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1965 EDP_BLC_ENABLE);
453c5420 1966
bf13e81b 1967 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1968
849e39f5
PZ
1969 intel_dp->want_panel_vdd = false;
1970
453c5420
JB
1971 I915_WRITE(pp_ctrl_reg, pp);
1972 POSTING_READ(pp_ctrl_reg);
9934c132 1973
dce56b3c 1974 intel_dp->last_power_cycle = jiffies;
4be73780 1975 wait_panel_off(intel_dp);
849e39f5
PZ
1976
1977 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1978 power_domain = intel_display_port_power_domain(intel_encoder);
1979 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1980}
e39b999a 1981
9f0fb5be
VS
1982void intel_edp_panel_off(struct intel_dp *intel_dp)
1983{
1984 if (!is_edp(intel_dp))
1985 return;
e39b999a 1986
9f0fb5be
VS
1987 pps_lock(intel_dp);
1988 edp_panel_off(intel_dp);
773538e8 1989 pps_unlock(intel_dp);
9934c132
JB
1990}
1991
1250d107
JN
1992/* Enable backlight in the panel power control. */
1993static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1994{
da63a9f2
PZ
1995 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1996 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1997 struct drm_i915_private *dev_priv = dev->dev_private;
1998 u32 pp;
453c5420 1999 u32 pp_ctrl_reg;
32f9d658 2000
01cb9ea6
JB
2001 /*
2002 * If we enable the backlight right away following a panel power
2003 * on, we may see slight flicker as the panel syncs with the eDP
2004 * link. So delay a bit to make sure the image is solid before
2005 * allowing it to appear.
2006 */
4be73780 2007 wait_backlight_on(intel_dp);
e39b999a 2008
773538e8 2009 pps_lock(intel_dp);
e39b999a 2010
453c5420 2011 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2012 pp |= EDP_BLC_ENABLE;
453c5420 2013
bf13e81b 2014 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2015
2016 I915_WRITE(pp_ctrl_reg, pp);
2017 POSTING_READ(pp_ctrl_reg);
e39b999a 2018
773538e8 2019 pps_unlock(intel_dp);
32f9d658
ZW
2020}
2021
1250d107
JN
2022/* Enable backlight PWM and backlight PP control. */
2023void intel_edp_backlight_on(struct intel_dp *intel_dp)
2024{
2025 if (!is_edp(intel_dp))
2026 return;
2027
2028 DRM_DEBUG_KMS("\n");
2029
2030 intel_panel_enable_backlight(intel_dp->attached_connector);
2031 _intel_edp_backlight_on(intel_dp);
2032}
2033
2034/* Disable backlight in the panel power control. */
2035static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2036{
30add22d 2037 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2038 struct drm_i915_private *dev_priv = dev->dev_private;
2039 u32 pp;
453c5420 2040 u32 pp_ctrl_reg;
32f9d658 2041
f01eca2e
KP
2042 if (!is_edp(intel_dp))
2043 return;
2044
773538e8 2045 pps_lock(intel_dp);
e39b999a 2046
453c5420 2047 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2048 pp &= ~EDP_BLC_ENABLE;
453c5420 2049
bf13e81b 2050 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2051
2052 I915_WRITE(pp_ctrl_reg, pp);
2053 POSTING_READ(pp_ctrl_reg);
f7d2323c 2054
773538e8 2055 pps_unlock(intel_dp);
e39b999a
VS
2056
2057 intel_dp->last_backlight_off = jiffies;
f7d2323c 2058 edp_wait_backlight_off(intel_dp);
1250d107 2059}
f7d2323c 2060
1250d107
JN
2061/* Disable backlight PP control and backlight PWM. */
2062void intel_edp_backlight_off(struct intel_dp *intel_dp)
2063{
2064 if (!is_edp(intel_dp))
2065 return;
2066
2067 DRM_DEBUG_KMS("\n");
f7d2323c 2068
1250d107 2069 _intel_edp_backlight_off(intel_dp);
f7d2323c 2070 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2071}
a4fc5ed6 2072
73580fb7
JN
2073/*
2074 * Hook for controlling the panel power control backlight through the bl_power
2075 * sysfs attribute. Take care to handle multiple calls.
2076 */
2077static void intel_edp_backlight_power(struct intel_connector *connector,
2078 bool enable)
2079{
2080 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2081 bool is_enabled;
2082
773538e8 2083 pps_lock(intel_dp);
e39b999a 2084 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2085 pps_unlock(intel_dp);
73580fb7
JN
2086
2087 if (is_enabled == enable)
2088 return;
2089
23ba9373
JN
2090 DRM_DEBUG_KMS("panel power control backlight %s\n",
2091 enable ? "enable" : "disable");
73580fb7
JN
2092
2093 if (enable)
2094 _intel_edp_backlight_on(intel_dp);
2095 else
2096 _intel_edp_backlight_off(intel_dp);
2097}
2098
2bd2ad64 2099static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2100{
da63a9f2
PZ
2101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2102 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2103 struct drm_device *dev = crtc->dev;
d240f20f
JB
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 u32 dpa_ctl;
2106
2bd2ad64
DV
2107 assert_pipe_disabled(dev_priv,
2108 to_intel_crtc(crtc)->pipe);
2109
d240f20f
JB
2110 DRM_DEBUG_KMS("\n");
2111 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2112 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2113 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2114
2115 /* We don't adjust intel_dp->DP while tearing down the link, to
2116 * facilitate link retraining (e.g. after hotplug). Hence clear all
2117 * enable bits here to ensure that we don't enable too much. */
2118 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2119 intel_dp->DP |= DP_PLL_ENABLE;
2120 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2121 POSTING_READ(DP_A);
2122 udelay(200);
d240f20f
JB
2123}
2124
2bd2ad64 2125static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2126{
da63a9f2
PZ
2127 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2128 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2129 struct drm_device *dev = crtc->dev;
d240f20f
JB
2130 struct drm_i915_private *dev_priv = dev->dev_private;
2131 u32 dpa_ctl;
2132
2bd2ad64
DV
2133 assert_pipe_disabled(dev_priv,
2134 to_intel_crtc(crtc)->pipe);
2135
d240f20f 2136 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2137 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2138 "dp pll off, should be on\n");
2139 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2140
2141 /* We can't rely on the value tracked for the DP register in
2142 * intel_dp->DP because link_down must not change that (otherwise link
2143 * re-training will fail. */
298b0b39 2144 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2145 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2146 POSTING_READ(DP_A);
d240f20f
JB
2147 udelay(200);
2148}
2149
c7ad3810 2150/* If the sink supports it, try to set the power state appropriately */
c19b0669 2151void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2152{
2153 int ret, i;
2154
2155 /* Should have a valid DPCD by this point */
2156 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2157 return;
2158
2159 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2160 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2161 DP_SET_POWER_D3);
c7ad3810
JB
2162 } else {
2163 /*
2164 * When turning on, we need to retry for 1ms to give the sink
2165 * time to wake up.
2166 */
2167 for (i = 0; i < 3; i++) {
9d1a1031
JN
2168 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2169 DP_SET_POWER_D0);
c7ad3810
JB
2170 if (ret == 1)
2171 break;
2172 msleep(1);
2173 }
2174 }
f9cac721
JN
2175
2176 if (ret != 1)
2177 DRM_DEBUG_KMS("failed to %s sink power state\n",
2178 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2179}
2180
19d8fe15
DV
2181static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2182 enum pipe *pipe)
d240f20f 2183{
19d8fe15 2184 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2185 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2186 struct drm_device *dev = encoder->base.dev;
2187 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2188 enum intel_display_power_domain power_domain;
2189 u32 tmp;
2190
2191 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2192 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2193 return false;
2194
2195 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2196
2197 if (!(tmp & DP_PORT_EN))
2198 return false;
2199
39e5fa88 2200 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2201 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2202 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2203 enum pipe p;
19d8fe15 2204
adc289d7
VS
2205 for_each_pipe(dev_priv, p) {
2206 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2207 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2208 *pipe = p;
19d8fe15
DV
2209 return true;
2210 }
2211 }
19d8fe15 2212
4a0833ec
DV
2213 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2214 intel_dp->output_reg);
39e5fa88
VS
2215 } else if (IS_CHERRYVIEW(dev)) {
2216 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2217 } else {
2218 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2219 }
d240f20f 2220
19d8fe15
DV
2221 return true;
2222}
d240f20f 2223
045ac3b5 2224static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2225 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2226{
2227 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2228 u32 tmp, flags = 0;
63000ef6
XZ
2229 struct drm_device *dev = encoder->base.dev;
2230 struct drm_i915_private *dev_priv = dev->dev_private;
2231 enum port port = dp_to_dig_port(intel_dp)->port;
2232 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2233 int dotclock;
045ac3b5 2234
9ed109a7 2235 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2236
2237 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2238
39e5fa88
VS
2239 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2240 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2241 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2242 flags |= DRM_MODE_FLAG_PHSYNC;
2243 else
2244 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2245
39e5fa88 2246 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2247 flags |= DRM_MODE_FLAG_PVSYNC;
2248 else
2249 flags |= DRM_MODE_FLAG_NVSYNC;
2250 } else {
39e5fa88 2251 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2252 flags |= DRM_MODE_FLAG_PHSYNC;
2253 else
2254 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2255
39e5fa88 2256 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2257 flags |= DRM_MODE_FLAG_PVSYNC;
2258 else
2259 flags |= DRM_MODE_FLAG_NVSYNC;
2260 }
045ac3b5 2261
2d112de7 2262 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2263
8c875fca
VS
2264 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2265 tmp & DP_COLOR_RANGE_16_235)
2266 pipe_config->limited_color_range = true;
2267
eb14cb74
VS
2268 pipe_config->has_dp_encoder = true;
2269
2270 intel_dp_get_m_n(crtc, pipe_config);
2271
18442d08 2272 if (port == PORT_A) {
f1f644dc
JB
2273 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2274 pipe_config->port_clock = 162000;
2275 else
2276 pipe_config->port_clock = 270000;
2277 }
18442d08
VS
2278
2279 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2280 &pipe_config->dp_m_n);
2281
2282 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2283 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2284
2d112de7 2285 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2286
c6cd2ee2
JN
2287 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2288 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2289 /*
2290 * This is a big fat ugly hack.
2291 *
2292 * Some machines in UEFI boot mode provide us a VBT that has 18
2293 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2294 * unknown we fail to light up. Yet the same BIOS boots up with
2295 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2296 * max, not what it tells us to use.
2297 *
2298 * Note: This will still be broken if the eDP panel is not lit
2299 * up by the BIOS, and thus we can't get the mode at module
2300 * load.
2301 */
2302 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2303 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2304 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2305 }
045ac3b5
JB
2306}
2307
e8cb4558 2308static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2309{
e8cb4558 2310 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2311 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2312 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2313
6e3c9717 2314 if (crtc->config->has_audio)
495a5bb8 2315 intel_audio_codec_disable(encoder);
6cb49835 2316
b32c6f48
RV
2317 if (HAS_PSR(dev) && !HAS_DDI(dev))
2318 intel_psr_disable(intel_dp);
2319
6cb49835
DV
2320 /* Make sure the panel is off before trying to change the mode. But also
2321 * ensure that we have vdd while we switch off the panel. */
24f3e092 2322 intel_edp_panel_vdd_on(intel_dp);
4be73780 2323 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2324 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2325 intel_edp_panel_off(intel_dp);
3739850b 2326
08aff3fe
VS
2327 /* disable the port before the pipe on g4x */
2328 if (INTEL_INFO(dev)->gen < 5)
3739850b 2329 intel_dp_link_down(intel_dp);
d240f20f
JB
2330}
2331
08aff3fe 2332static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2333{
2bd2ad64 2334 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2335 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2336
49277c31 2337 intel_dp_link_down(intel_dp);
08aff3fe
VS
2338 if (port == PORT_A)
2339 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2340}
2341
2342static void vlv_post_disable_dp(struct intel_encoder *encoder)
2343{
2344 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2345
2346 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2347}
2348
580d3811
VS
2349static void chv_post_disable_dp(struct intel_encoder *encoder)
2350{
2351 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2352 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2353 struct drm_device *dev = encoder->base.dev;
2354 struct drm_i915_private *dev_priv = dev->dev_private;
2355 struct intel_crtc *intel_crtc =
2356 to_intel_crtc(encoder->base.crtc);
2357 enum dpio_channel ch = vlv_dport_to_channel(dport);
2358 enum pipe pipe = intel_crtc->pipe;
2359 u32 val;
2360
2361 intel_dp_link_down(intel_dp);
2362
a580516d 2363 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2364
2365 /* Propagate soft reset to data lane reset */
97fd4d5c 2366 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2367 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2368 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2369
97fd4d5c
VS
2370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2371 val |= CHV_PCS_REQ_SOFTRESET_EN;
2372 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2373
2374 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2375 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2376 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2377
2378 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2379 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2380 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811 2381
a580516d 2382 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2383}
2384
7b13b58a
VS
2385static void
2386_intel_dp_set_link_train(struct intel_dp *intel_dp,
2387 uint32_t *DP,
2388 uint8_t dp_train_pat)
2389{
2390 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2391 struct drm_device *dev = intel_dig_port->base.base.dev;
2392 struct drm_i915_private *dev_priv = dev->dev_private;
2393 enum port port = intel_dig_port->port;
2394
2395 if (HAS_DDI(dev)) {
2396 uint32_t temp = I915_READ(DP_TP_CTL(port));
2397
2398 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2399 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2400 else
2401 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2402
2403 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2404 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2405 case DP_TRAINING_PATTERN_DISABLE:
2406 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2407
2408 break;
2409 case DP_TRAINING_PATTERN_1:
2410 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2411 break;
2412 case DP_TRAINING_PATTERN_2:
2413 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2414 break;
2415 case DP_TRAINING_PATTERN_3:
2416 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2417 break;
2418 }
2419 I915_WRITE(DP_TP_CTL(port), temp);
2420
39e5fa88
VS
2421 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2422 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2423 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2424
2425 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2426 case DP_TRAINING_PATTERN_DISABLE:
2427 *DP |= DP_LINK_TRAIN_OFF_CPT;
2428 break;
2429 case DP_TRAINING_PATTERN_1:
2430 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2431 break;
2432 case DP_TRAINING_PATTERN_2:
2433 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2434 break;
2435 case DP_TRAINING_PATTERN_3:
2436 DRM_ERROR("DP training pattern 3 not supported\n");
2437 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2438 break;
2439 }
2440
2441 } else {
2442 if (IS_CHERRYVIEW(dev))
2443 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2444 else
2445 *DP &= ~DP_LINK_TRAIN_MASK;
2446
2447 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2448 case DP_TRAINING_PATTERN_DISABLE:
2449 *DP |= DP_LINK_TRAIN_OFF;
2450 break;
2451 case DP_TRAINING_PATTERN_1:
2452 *DP |= DP_LINK_TRAIN_PAT_1;
2453 break;
2454 case DP_TRAINING_PATTERN_2:
2455 *DP |= DP_LINK_TRAIN_PAT_2;
2456 break;
2457 case DP_TRAINING_PATTERN_3:
2458 if (IS_CHERRYVIEW(dev)) {
2459 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2460 } else {
2461 DRM_ERROR("DP training pattern 3 not supported\n");
2462 *DP |= DP_LINK_TRAIN_PAT_2;
2463 }
2464 break;
2465 }
2466 }
2467}
2468
2469static void intel_dp_enable_port(struct intel_dp *intel_dp)
2470{
2471 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2472 struct drm_i915_private *dev_priv = dev->dev_private;
2473
7b13b58a
VS
2474 /* enable with pattern 1 (as per spec) */
2475 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2476 DP_TRAINING_PATTERN_1);
2477
2478 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2479 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2480
2481 /*
2482 * Magic for VLV/CHV. We _must_ first set up the register
2483 * without actually enabling the port, and then do another
2484 * write to enable the port. Otherwise link training will
2485 * fail when the power sequencer is freshly used for this port.
2486 */
2487 intel_dp->DP |= DP_PORT_EN;
2488
2489 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2490 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2491}
2492
e8cb4558 2493static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2494{
e8cb4558
DV
2495 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2496 struct drm_device *dev = encoder->base.dev;
2497 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2498 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2499 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2500 unsigned int lane_mask = 0x0;
5d613501 2501
0c33d8d7
DV
2502 if (WARN_ON(dp_reg & DP_PORT_EN))
2503 return;
5d613501 2504
093e3f13
VS
2505 pps_lock(intel_dp);
2506
2507 if (IS_VALLEYVIEW(dev))
2508 vlv_init_panel_power_sequencer(intel_dp);
2509
7b13b58a 2510 intel_dp_enable_port(intel_dp);
093e3f13
VS
2511
2512 edp_panel_vdd_on(intel_dp);
2513 edp_panel_on(intel_dp);
2514 edp_panel_vdd_off(intel_dp, true);
2515
2516 pps_unlock(intel_dp);
2517
61234fa5 2518 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2519 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2520 lane_mask);
61234fa5 2521
f01eca2e 2522 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2523 intel_dp_start_link_train(intel_dp);
33a34e4e 2524 intel_dp_complete_link_train(intel_dp);
3ab9c637 2525 intel_dp_stop_link_train(intel_dp);
c1dec79a 2526
6e3c9717 2527 if (crtc->config->has_audio) {
c1dec79a
JN
2528 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2529 pipe_name(crtc->pipe));
2530 intel_audio_codec_enable(encoder);
2531 }
ab1f90f9 2532}
89b667f8 2533
ecff4f3b
JN
2534static void g4x_enable_dp(struct intel_encoder *encoder)
2535{
828f5c6e
JN
2536 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2537
ecff4f3b 2538 intel_enable_dp(encoder);
4be73780 2539 intel_edp_backlight_on(intel_dp);
ab1f90f9 2540}
89b667f8 2541
ab1f90f9
JN
2542static void vlv_enable_dp(struct intel_encoder *encoder)
2543{
828f5c6e
JN
2544 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2545
4be73780 2546 intel_edp_backlight_on(intel_dp);
b32c6f48 2547 intel_psr_enable(intel_dp);
d240f20f
JB
2548}
2549
ecff4f3b 2550static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2551{
2552 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2553 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2554
8ac33ed3
DV
2555 intel_dp_prepare(encoder);
2556
d41f1efb
DV
2557 /* Only ilk+ has port A */
2558 if (dport->port == PORT_A) {
2559 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2560 ironlake_edp_pll_on(intel_dp);
d41f1efb 2561 }
ab1f90f9
JN
2562}
2563
83b84597
VS
2564static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2565{
2566 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2567 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2568 enum pipe pipe = intel_dp->pps_pipe;
2569 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2570
2571 edp_panel_vdd_off_sync(intel_dp);
2572
2573 /*
2574 * VLV seems to get confused when multiple power seqeuencers
2575 * have the same port selected (even if only one has power/vdd
2576 * enabled). The failure manifests as vlv_wait_port_ready() failing
2577 * CHV on the other hand doesn't seem to mind having the same port
2578 * selected in multiple power seqeuencers, but let's clear the
2579 * port select always when logically disconnecting a power sequencer
2580 * from a port.
2581 */
2582 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2583 pipe_name(pipe), port_name(intel_dig_port->port));
2584 I915_WRITE(pp_on_reg, 0);
2585 POSTING_READ(pp_on_reg);
2586
2587 intel_dp->pps_pipe = INVALID_PIPE;
2588}
2589
a4a5d2f8
VS
2590static void vlv_steal_power_sequencer(struct drm_device *dev,
2591 enum pipe pipe)
2592{
2593 struct drm_i915_private *dev_priv = dev->dev_private;
2594 struct intel_encoder *encoder;
2595
2596 lockdep_assert_held(&dev_priv->pps_mutex);
2597
ac3c12e4
VS
2598 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2599 return;
2600
a4a5d2f8
VS
2601 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2602 base.head) {
2603 struct intel_dp *intel_dp;
773538e8 2604 enum port port;
a4a5d2f8
VS
2605
2606 if (encoder->type != INTEL_OUTPUT_EDP)
2607 continue;
2608
2609 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2610 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2611
2612 if (intel_dp->pps_pipe != pipe)
2613 continue;
2614
2615 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2616 pipe_name(pipe), port_name(port));
a4a5d2f8 2617
034e43c6
VS
2618 WARN(encoder->connectors_active,
2619 "stealing pipe %c power sequencer from active eDP port %c\n",
2620 pipe_name(pipe), port_name(port));
a4a5d2f8 2621
a4a5d2f8 2622 /* make sure vdd is off before we steal it */
83b84597 2623 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2624 }
2625}
2626
2627static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2628{
2629 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2630 struct intel_encoder *encoder = &intel_dig_port->base;
2631 struct drm_device *dev = encoder->base.dev;
2632 struct drm_i915_private *dev_priv = dev->dev_private;
2633 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2634
2635 lockdep_assert_held(&dev_priv->pps_mutex);
2636
093e3f13
VS
2637 if (!is_edp(intel_dp))
2638 return;
2639
a4a5d2f8
VS
2640 if (intel_dp->pps_pipe == crtc->pipe)
2641 return;
2642
2643 /*
2644 * If another power sequencer was being used on this
2645 * port previously make sure to turn off vdd there while
2646 * we still have control of it.
2647 */
2648 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2649 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2650
2651 /*
2652 * We may be stealing the power
2653 * sequencer from another port.
2654 */
2655 vlv_steal_power_sequencer(dev, crtc->pipe);
2656
2657 /* now it's all ours */
2658 intel_dp->pps_pipe = crtc->pipe;
2659
2660 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2661 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2662
2663 /* init power sequencer on this pipe and port */
36b5f425
VS
2664 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2665 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2666}
2667
ab1f90f9 2668static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2669{
2bd2ad64 2670 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2671 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2672 struct drm_device *dev = encoder->base.dev;
89b667f8 2673 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2674 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2675 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2676 int pipe = intel_crtc->pipe;
2677 u32 val;
a4fc5ed6 2678
a580516d 2679 mutex_lock(&dev_priv->sb_lock);
89b667f8 2680
ab3c759a 2681 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2682 val = 0;
2683 if (pipe)
2684 val |= (1<<21);
2685 else
2686 val &= ~(1<<21);
2687 val |= 0x001000c4;
ab3c759a
CML
2688 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2690 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2691
a580516d 2692 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2693
2694 intel_enable_dp(encoder);
89b667f8
JB
2695}
2696
ecff4f3b 2697static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2698{
2699 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2700 struct drm_device *dev = encoder->base.dev;
2701 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2702 struct intel_crtc *intel_crtc =
2703 to_intel_crtc(encoder->base.crtc);
e4607fcf 2704 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2705 int pipe = intel_crtc->pipe;
89b667f8 2706
8ac33ed3
DV
2707 intel_dp_prepare(encoder);
2708
89b667f8 2709 /* Program Tx lane resets to default */
a580516d 2710 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2711 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2712 DPIO_PCS_TX_LANE2_RESET |
2713 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2714 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2715 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2716 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2717 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2718 DPIO_PCS_CLK_SOFT_RESET);
2719
2720 /* Fix up inter-pair skew failure */
ab3c759a
CML
2721 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2722 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2723 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2724 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2725}
2726
e4a1d846
CML
2727static void chv_pre_enable_dp(struct intel_encoder *encoder)
2728{
2729 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2730 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2731 struct drm_device *dev = encoder->base.dev;
2732 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2733 struct intel_crtc *intel_crtc =
2734 to_intel_crtc(encoder->base.crtc);
2735 enum dpio_channel ch = vlv_dport_to_channel(dport);
2736 int pipe = intel_crtc->pipe;
2e523e98 2737 int data, i, stagger;
949c1d43 2738 u32 val;
e4a1d846 2739
a580516d 2740 mutex_lock(&dev_priv->sb_lock);
949c1d43 2741
570e2a74
VS
2742 /* allow hardware to manage TX FIFO reset source */
2743 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2744 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2745 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2746
2747 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2748 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2749 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2750
949c1d43 2751 /* Deassert soft data lane reset*/
97fd4d5c 2752 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2753 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2754 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2755
2756 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2757 val |= CHV_PCS_REQ_SOFTRESET_EN;
2758 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2759
2760 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2761 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2762 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2763
97fd4d5c 2764 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2765 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2766 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2767
2768 /* Program Tx lane latency optimal setting*/
e4a1d846 2769 for (i = 0; i < 4; i++) {
e4a1d846
CML
2770 /* Set the upar bit */
2771 data = (i == 1) ? 0x0 : 0x1;
2772 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2773 data << DPIO_UPAR_SHIFT);
2774 }
2775
2776 /* Data lane stagger programming */
2e523e98
VS
2777 if (intel_crtc->config->port_clock > 270000)
2778 stagger = 0x18;
2779 else if (intel_crtc->config->port_clock > 135000)
2780 stagger = 0xd;
2781 else if (intel_crtc->config->port_clock > 67500)
2782 stagger = 0x7;
2783 else if (intel_crtc->config->port_clock > 33750)
2784 stagger = 0x4;
2785 else
2786 stagger = 0x2;
2787
2788 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2789 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2790 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2791
2792 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2793 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2794 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2795
2796 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2797 DPIO_LANESTAGGER_STRAP(stagger) |
2798 DPIO_LANESTAGGER_STRAP_OVRD |
2799 DPIO_TX1_STAGGER_MASK(0x1f) |
2800 DPIO_TX1_STAGGER_MULT(6) |
2801 DPIO_TX2_STAGGER_MULT(0));
2802
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2804 DPIO_LANESTAGGER_STRAP(stagger) |
2805 DPIO_LANESTAGGER_STRAP_OVRD |
2806 DPIO_TX1_STAGGER_MASK(0x1f) |
2807 DPIO_TX1_STAGGER_MULT(7) |
2808 DPIO_TX2_STAGGER_MULT(5));
e4a1d846 2809
a580516d 2810 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2811
e4a1d846 2812 intel_enable_dp(encoder);
e4a1d846
CML
2813}
2814
9197c88b
VS
2815static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2816{
2817 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2818 struct drm_device *dev = encoder->base.dev;
2819 struct drm_i915_private *dev_priv = dev->dev_private;
2820 struct intel_crtc *intel_crtc =
2821 to_intel_crtc(encoder->base.crtc);
2822 enum dpio_channel ch = vlv_dport_to_channel(dport);
2823 enum pipe pipe = intel_crtc->pipe;
2824 u32 val;
2825
625695f8
VS
2826 intel_dp_prepare(encoder);
2827
a580516d 2828 mutex_lock(&dev_priv->sb_lock);
9197c88b 2829
b9e5ac3c
VS
2830 /* program left/right clock distribution */
2831 if (pipe != PIPE_B) {
2832 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2833 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2834 if (ch == DPIO_CH0)
2835 val |= CHV_BUFLEFTENA1_FORCE;
2836 if (ch == DPIO_CH1)
2837 val |= CHV_BUFRIGHTENA1_FORCE;
2838 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2839 } else {
2840 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2841 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2842 if (ch == DPIO_CH0)
2843 val |= CHV_BUFLEFTENA2_FORCE;
2844 if (ch == DPIO_CH1)
2845 val |= CHV_BUFRIGHTENA2_FORCE;
2846 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2847 }
2848
9197c88b
VS
2849 /* program clock channel usage */
2850 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2851 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2852 if (pipe != PIPE_B)
2853 val &= ~CHV_PCS_USEDCLKCHANNEL;
2854 else
2855 val |= CHV_PCS_USEDCLKCHANNEL;
2856 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2857
2858 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2859 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2860 if (pipe != PIPE_B)
2861 val &= ~CHV_PCS_USEDCLKCHANNEL;
2862 else
2863 val |= CHV_PCS_USEDCLKCHANNEL;
2864 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2865
2866 /*
2867 * This a a bit weird since generally CL
2868 * matches the pipe, but here we need to
2869 * pick the CL based on the port.
2870 */
2871 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2872 if (pipe != PIPE_B)
2873 val &= ~CHV_CMN_USEDCLKCHANNEL;
2874 else
2875 val |= CHV_CMN_USEDCLKCHANNEL;
2876 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2877
a580516d 2878 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2879}
2880
a4fc5ed6 2881/*
df0c237d
JB
2882 * Native read with retry for link status and receiver capability reads for
2883 * cases where the sink may still be asleep.
9d1a1031
JN
2884 *
2885 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2886 * supposed to retry 3 times per the spec.
a4fc5ed6 2887 */
9d1a1031
JN
2888static ssize_t
2889intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2890 void *buffer, size_t size)
a4fc5ed6 2891{
9d1a1031
JN
2892 ssize_t ret;
2893 int i;
61da5fab 2894
f6a19066
VS
2895 /*
2896 * Sometime we just get the same incorrect byte repeated
2897 * over the entire buffer. Doing just one throw away read
2898 * initially seems to "solve" it.
2899 */
2900 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2901
61da5fab 2902 for (i = 0; i < 3; i++) {
9d1a1031
JN
2903 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2904 if (ret == size)
2905 return ret;
61da5fab
JB
2906 msleep(1);
2907 }
a4fc5ed6 2908
9d1a1031 2909 return ret;
a4fc5ed6
KP
2910}
2911
2912/*
2913 * Fetch AUX CH registers 0x202 - 0x207 which contain
2914 * link status information
2915 */
2916static bool
93f62dad 2917intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2918{
9d1a1031
JN
2919 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2920 DP_LANE0_1_STATUS,
2921 link_status,
2922 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2923}
2924
1100244e 2925/* These are source-specific values. */
a4fc5ed6 2926static uint8_t
1a2eb460 2927intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2928{
30add22d 2929 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2930 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2931 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2932
9314726b
VK
2933 if (IS_BROXTON(dev))
2934 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2935 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 2936 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 2937 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2938 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2939 } else if (IS_VALLEYVIEW(dev))
bd60018a 2940 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2941 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2942 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2943 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2944 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2945 else
bd60018a 2946 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2947}
2948
2949static uint8_t
2950intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2951{
30add22d 2952 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2953 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2954
5a9d1f1a
DL
2955 if (INTEL_INFO(dev)->gen >= 9) {
2956 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2958 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2959 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2960 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2961 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2962 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2963 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2964 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2965 default:
2966 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2967 }
2968 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2969 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2970 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2971 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2972 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2973 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2974 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2975 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2977 default:
bd60018a 2978 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2979 }
e2fa6fba
P
2980 } else if (IS_VALLEYVIEW(dev)) {
2981 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2982 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2983 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2985 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2986 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2987 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2989 default:
bd60018a 2990 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2991 }
bc7d38a4 2992 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2993 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2994 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2995 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2996 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2997 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2998 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2999 default:
bd60018a 3000 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3001 }
3002 } else {
3003 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3005 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3007 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3009 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3011 default:
bd60018a 3012 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3013 }
a4fc5ed6
KP
3014 }
3015}
3016
5829975c 3017static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3018{
3019 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3020 struct drm_i915_private *dev_priv = dev->dev_private;
3021 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3022 struct intel_crtc *intel_crtc =
3023 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3024 unsigned long demph_reg_value, preemph_reg_value,
3025 uniqtranscale_reg_value;
3026 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3027 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3028 int pipe = intel_crtc->pipe;
e2fa6fba
P
3029
3030 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3031 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3032 preemph_reg_value = 0x0004000;
3033 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3034 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3035 demph_reg_value = 0x2B405555;
3036 uniqtranscale_reg_value = 0x552AB83A;
3037 break;
bd60018a 3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3039 demph_reg_value = 0x2B404040;
3040 uniqtranscale_reg_value = 0x5548B83A;
3041 break;
bd60018a 3042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3043 demph_reg_value = 0x2B245555;
3044 uniqtranscale_reg_value = 0x5560B83A;
3045 break;
bd60018a 3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3047 demph_reg_value = 0x2B405555;
3048 uniqtranscale_reg_value = 0x5598DA3A;
3049 break;
3050 default:
3051 return 0;
3052 }
3053 break;
bd60018a 3054 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3055 preemph_reg_value = 0x0002000;
3056 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3058 demph_reg_value = 0x2B404040;
3059 uniqtranscale_reg_value = 0x5552B83A;
3060 break;
bd60018a 3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3062 demph_reg_value = 0x2B404848;
3063 uniqtranscale_reg_value = 0x5580B83A;
3064 break;
bd60018a 3065 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3066 demph_reg_value = 0x2B404040;
3067 uniqtranscale_reg_value = 0x55ADDA3A;
3068 break;
3069 default:
3070 return 0;
3071 }
3072 break;
bd60018a 3073 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3074 preemph_reg_value = 0x0000000;
3075 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3076 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3077 demph_reg_value = 0x2B305555;
3078 uniqtranscale_reg_value = 0x5570B83A;
3079 break;
bd60018a 3080 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3081 demph_reg_value = 0x2B2B4040;
3082 uniqtranscale_reg_value = 0x55ADDA3A;
3083 break;
3084 default:
3085 return 0;
3086 }
3087 break;
bd60018a 3088 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3089 preemph_reg_value = 0x0006000;
3090 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3092 demph_reg_value = 0x1B405555;
3093 uniqtranscale_reg_value = 0x55ADDA3A;
3094 break;
3095 default:
3096 return 0;
3097 }
3098 break;
3099 default:
3100 return 0;
3101 }
3102
a580516d 3103 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3104 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3105 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3106 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3107 uniqtranscale_reg_value);
ab3c759a
CML
3108 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3109 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3110 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3111 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3112 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3113
3114 return 0;
3115}
3116
5829975c 3117static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3118{
3119 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3120 struct drm_i915_private *dev_priv = dev->dev_private;
3121 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3122 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3123 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3124 uint8_t train_set = intel_dp->train_set[0];
3125 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3126 enum pipe pipe = intel_crtc->pipe;
3127 int i;
e4a1d846
CML
3128
3129 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3130 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3131 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3133 deemph_reg_value = 128;
3134 margin_reg_value = 52;
3135 break;
bd60018a 3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3137 deemph_reg_value = 128;
3138 margin_reg_value = 77;
3139 break;
bd60018a 3140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3141 deemph_reg_value = 128;
3142 margin_reg_value = 102;
3143 break;
bd60018a 3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3145 deemph_reg_value = 128;
3146 margin_reg_value = 154;
3147 /* FIXME extra to set for 1200 */
3148 break;
3149 default:
3150 return 0;
3151 }
3152 break;
bd60018a 3153 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3154 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3156 deemph_reg_value = 85;
3157 margin_reg_value = 78;
3158 break;
bd60018a 3159 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3160 deemph_reg_value = 85;
3161 margin_reg_value = 116;
3162 break;
bd60018a 3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3164 deemph_reg_value = 85;
3165 margin_reg_value = 154;
3166 break;
3167 default:
3168 return 0;
3169 }
3170 break;
bd60018a 3171 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3172 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3173 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3174 deemph_reg_value = 64;
3175 margin_reg_value = 104;
3176 break;
bd60018a 3177 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3178 deemph_reg_value = 64;
3179 margin_reg_value = 154;
3180 break;
3181 default:
3182 return 0;
3183 }
3184 break;
bd60018a 3185 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3186 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3187 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3188 deemph_reg_value = 43;
3189 margin_reg_value = 154;
3190 break;
3191 default:
3192 return 0;
3193 }
3194 break;
3195 default:
3196 return 0;
3197 }
3198
a580516d 3199 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3200
3201 /* Clear calc init */
1966e59e
VS
3202 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3203 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3204 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3205 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3206 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3207
3208 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3209 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3210 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3211 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3212 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3213
a02ef3c7
VS
3214 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3215 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3216 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3217 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3218
3219 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3220 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3221 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3222 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3223
e4a1d846 3224 /* Program swing deemph */
f72df8db
VS
3225 for (i = 0; i < 4; i++) {
3226 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3227 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3228 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3229 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3230 }
e4a1d846
CML
3231
3232 /* Program swing margin */
f72df8db
VS
3233 for (i = 0; i < 4; i++) {
3234 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3235 val &= ~DPIO_SWING_MARGIN000_MASK;
3236 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3237 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3238 }
e4a1d846
CML
3239
3240 /* Disable unique transition scale */
f72df8db
VS
3241 for (i = 0; i < 4; i++) {
3242 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3243 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3244 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3245 }
e4a1d846
CML
3246
3247 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3248 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3249 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3250 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3251
3252 /*
3253 * The document said it needs to set bit 27 for ch0 and bit 26
3254 * for ch1. Might be a typo in the doc.
3255 * For now, for this unique transition scale selection, set bit
3256 * 27 for ch0 and ch1.
3257 */
f72df8db
VS
3258 for (i = 0; i < 4; i++) {
3259 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3260 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3261 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3262 }
e4a1d846 3263
f72df8db
VS
3264 for (i = 0; i < 4; i++) {
3265 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3266 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3267 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3268 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3269 }
e4a1d846
CML
3270 }
3271
3272 /* Start swing calculation */
1966e59e
VS
3273 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3274 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3275 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3276
3277 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3278 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3279 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3280
3281 /* LRC Bypass */
3282 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3283 val |= DPIO_LRC_BYPASS;
3284 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3285
a580516d 3286 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3287
3288 return 0;
3289}
3290
a4fc5ed6 3291static void
0301b3ac
JN
3292intel_get_adjust_train(struct intel_dp *intel_dp,
3293 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3294{
3295 uint8_t v = 0;
3296 uint8_t p = 0;
3297 int lane;
1a2eb460
KP
3298 uint8_t voltage_max;
3299 uint8_t preemph_max;
a4fc5ed6 3300
33a34e4e 3301 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3302 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3303 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3304
3305 if (this_v > v)
3306 v = this_v;
3307 if (this_p > p)
3308 p = this_p;
3309 }
3310
1a2eb460 3311 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3312 if (v >= voltage_max)
3313 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3314
1a2eb460
KP
3315 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3316 if (p >= preemph_max)
3317 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3318
3319 for (lane = 0; lane < 4; lane++)
33a34e4e 3320 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3321}
3322
3323static uint32_t
5829975c 3324gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3325{
3cf2efb1 3326 uint32_t signal_levels = 0;
a4fc5ed6 3327
3cf2efb1 3328 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3330 default:
3331 signal_levels |= DP_VOLTAGE_0_4;
3332 break;
bd60018a 3333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3334 signal_levels |= DP_VOLTAGE_0_6;
3335 break;
bd60018a 3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3337 signal_levels |= DP_VOLTAGE_0_8;
3338 break;
bd60018a 3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3340 signal_levels |= DP_VOLTAGE_1_2;
3341 break;
3342 }
3cf2efb1 3343 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3344 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3345 default:
3346 signal_levels |= DP_PRE_EMPHASIS_0;
3347 break;
bd60018a 3348 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3349 signal_levels |= DP_PRE_EMPHASIS_3_5;
3350 break;
bd60018a 3351 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3352 signal_levels |= DP_PRE_EMPHASIS_6;
3353 break;
bd60018a 3354 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3355 signal_levels |= DP_PRE_EMPHASIS_9_5;
3356 break;
3357 }
3358 return signal_levels;
3359}
3360
e3421a18
ZW
3361/* Gen6's DP voltage swing and pre-emphasis control */
3362static uint32_t
5829975c 3363gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3364{
3c5a62b5
YL
3365 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3366 DP_TRAIN_PRE_EMPHASIS_MASK);
3367 switch (signal_levels) {
bd60018a
SJ
3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3370 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3372 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3375 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3378 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3381 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3382 default:
3c5a62b5
YL
3383 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3384 "0x%x\n", signal_levels);
3385 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3386 }
3387}
3388
1a2eb460
KP
3389/* Gen7's DP voltage swing and pre-emphasis control */
3390static uint32_t
5829975c 3391gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3392{
3393 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3394 DP_TRAIN_PRE_EMPHASIS_MASK);
3395 switch (signal_levels) {
bd60018a 3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3397 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3399 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3401 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3402
bd60018a 3403 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3404 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3406 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3407
bd60018a 3408 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3409 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3410 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3411 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3412
3413 default:
3414 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3415 "0x%x\n", signal_levels);
3416 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3417 }
3418}
3419
d6c0d722
PZ
3420/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3421static uint32_t
5829975c 3422hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3423{
d6c0d722
PZ
3424 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3425 DP_TRAIN_PRE_EMPHASIS_MASK);
3426 switch (signal_levels) {
bd60018a 3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3428 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3430 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3432 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3434 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3435
bd60018a 3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3437 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3438 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3439 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3440 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3441 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3442
bd60018a 3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3444 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3445 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3446 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3447
3448 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3449 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3450 default:
3451 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3452 "0x%x\n", signal_levels);
c5fe6a06 3453 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3454 }
a4fc5ed6
KP
3455}
3456
5829975c 3457static void bxt_signal_levels(struct intel_dp *intel_dp)
96fb9f9b
VK
3458{
3459 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3460 enum port port = dport->port;
3461 struct drm_device *dev = dport->base.base.dev;
3462 struct intel_encoder *encoder = &dport->base;
3463 uint8_t train_set = intel_dp->train_set[0];
3464 uint32_t level = 0;
3465
3466 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3467 DP_TRAIN_PRE_EMPHASIS_MASK);
3468 switch (signal_levels) {
3469 default:
3470 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3472 level = 0;
3473 break;
3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3475 level = 1;
3476 break;
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3478 level = 2;
3479 break;
3480 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3481 level = 3;
3482 break;
3483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3484 level = 4;
3485 break;
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3487 level = 5;
3488 break;
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3490 level = 6;
3491 break;
3492 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3493 level = 7;
3494 break;
3495 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3496 level = 8;
3497 break;
3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3499 level = 9;
3500 break;
3501 }
3502
3503 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3504}
3505
f0a3424e
PZ
3506/* Properly updates "DP" with the correct signal levels. */
3507static void
3508intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3509{
3510 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3511 enum port port = intel_dig_port->port;
f0a3424e
PZ
3512 struct drm_device *dev = intel_dig_port->base.base.dev;
3513 uint32_t signal_levels, mask;
3514 uint8_t train_set = intel_dp->train_set[0];
3515
96fb9f9b
VK
3516 if (IS_BROXTON(dev)) {
3517 signal_levels = 0;
5829975c 3518 bxt_signal_levels(intel_dp);
96fb9f9b
VK
3519 mask = 0;
3520 } else if (HAS_DDI(dev)) {
5829975c 3521 signal_levels = hsw_signal_levels(train_set);
f0a3424e 3522 mask = DDI_BUF_EMP_MASK;
e4a1d846 3523 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3524 signal_levels = chv_signal_levels(intel_dp);
e4a1d846 3525 mask = 0;
e2fa6fba 3526 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3527 signal_levels = vlv_signal_levels(intel_dp);
e2fa6fba 3528 mask = 0;
bc7d38a4 3529 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3530 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3531 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3532 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3533 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3534 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3535 } else {
5829975c 3536 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3537 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3538 }
3539
96fb9f9b
VK
3540 if (mask)
3541 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3542
3543 DRM_DEBUG_KMS("Using vswing level %d\n",
3544 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3545 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3546 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3547 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3548
3549 *DP = (*DP & ~mask) | signal_levels;
3550}
3551
a4fc5ed6 3552static bool
ea5b213a 3553intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3554 uint32_t *DP,
58e10eb9 3555 uint8_t dp_train_pat)
a4fc5ed6 3556{
174edf1f
PZ
3557 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3558 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3559 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3560 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3561 int ret, len;
a4fc5ed6 3562
7b13b58a 3563 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3564
70aff66c 3565 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3566 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3567
2cdfe6c8
JN
3568 buf[0] = dp_train_pat;
3569 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3570 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3571 /* don't write DP_TRAINING_LANEx_SET on disable */
3572 len = 1;
3573 } else {
3574 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3575 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3576 len = intel_dp->lane_count + 1;
47ea7542 3577 }
a4fc5ed6 3578
9d1a1031
JN
3579 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3580 buf, len);
2cdfe6c8
JN
3581
3582 return ret == len;
a4fc5ed6
KP
3583}
3584
70aff66c
JN
3585static bool
3586intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3587 uint8_t dp_train_pat)
3588{
4e96c977
MK
3589 if (!intel_dp->train_set_valid)
3590 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3591 intel_dp_set_signal_levels(intel_dp, DP);
3592 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3593}
3594
3595static bool
3596intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3597 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3598{
3599 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3600 struct drm_device *dev = intel_dig_port->base.base.dev;
3601 struct drm_i915_private *dev_priv = dev->dev_private;
3602 int ret;
3603
3604 intel_get_adjust_train(intel_dp, link_status);
3605 intel_dp_set_signal_levels(intel_dp, DP);
3606
3607 I915_WRITE(intel_dp->output_reg, *DP);
3608 POSTING_READ(intel_dp->output_reg);
3609
9d1a1031
JN
3610 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3611 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3612
3613 return ret == intel_dp->lane_count;
3614}
3615
3ab9c637
ID
3616static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3617{
3618 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3619 struct drm_device *dev = intel_dig_port->base.base.dev;
3620 struct drm_i915_private *dev_priv = dev->dev_private;
3621 enum port port = intel_dig_port->port;
3622 uint32_t val;
3623
3624 if (!HAS_DDI(dev))
3625 return;
3626
3627 val = I915_READ(DP_TP_CTL(port));
3628 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3629 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3630 I915_WRITE(DP_TP_CTL(port), val);
3631
3632 /*
3633 * On PORT_A we can have only eDP in SST mode. There the only reason
3634 * we need to set idle transmission mode is to work around a HW issue
3635 * where we enable the pipe while not in idle link-training mode.
3636 * In this case there is requirement to wait for a minimum number of
3637 * idle patterns to be sent.
3638 */
3639 if (port == PORT_A)
3640 return;
3641
3642 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3643 1))
3644 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3645}
3646
33a34e4e 3647/* Enable corresponding port and start training pattern 1 */
c19b0669 3648void
33a34e4e 3649intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3650{
da63a9f2 3651 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3652 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3653 int i;
3654 uint8_t voltage;
cdb0e95b 3655 int voltage_tries, loop_tries;
ea5b213a 3656 uint32_t DP = intel_dp->DP;
6aba5b6c 3657 uint8_t link_config[2];
a4fc5ed6 3658
affa9354 3659 if (HAS_DDI(dev))
c19b0669
PZ
3660 intel_ddi_prepare_link_retrain(encoder);
3661
3cf2efb1 3662 /* Write the link configuration data */
6aba5b6c
JN
3663 link_config[0] = intel_dp->link_bw;
3664 link_config[1] = intel_dp->lane_count;
3665 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3666 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3667 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3668 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3669 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3670 &intel_dp->rate_select, 1);
6aba5b6c
JN
3671
3672 link_config[0] = 0;
3673 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3674 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3675
3676 DP |= DP_PORT_EN;
1a2eb460 3677
70aff66c
JN
3678 /* clock recovery */
3679 if (!intel_dp_reset_link_train(intel_dp, &DP,
3680 DP_TRAINING_PATTERN_1 |
3681 DP_LINK_SCRAMBLING_DISABLE)) {
3682 DRM_ERROR("failed to enable link training\n");
3683 return;
3684 }
3685
a4fc5ed6 3686 voltage = 0xff;
cdb0e95b
KP
3687 voltage_tries = 0;
3688 loop_tries = 0;
a4fc5ed6 3689 for (;;) {
70aff66c 3690 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3691
a7c9655f 3692 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3693 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3694 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3695 break;
93f62dad 3696 }
a4fc5ed6 3697
01916270 3698 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3699 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3700 break;
3701 }
3702
4e96c977
MK
3703 /*
3704 * if we used previously trained voltage and pre-emphasis values
3705 * and we don't get clock recovery, reset link training values
3706 */
3707 if (intel_dp->train_set_valid) {
3708 DRM_DEBUG_KMS("clock recovery not ok, reset");
3709 /* clear the flag as we are not reusing train set */
3710 intel_dp->train_set_valid = false;
3711 if (!intel_dp_reset_link_train(intel_dp, &DP,
3712 DP_TRAINING_PATTERN_1 |
3713 DP_LINK_SCRAMBLING_DISABLE)) {
3714 DRM_ERROR("failed to enable link training\n");
3715 return;
3716 }
3717 continue;
3718 }
3719
3cf2efb1
CW
3720 /* Check to see if we've tried the max voltage */
3721 for (i = 0; i < intel_dp->lane_count; i++)
3722 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3723 break;
3b4f819d 3724 if (i == intel_dp->lane_count) {
b06fbda3
DV
3725 ++loop_tries;
3726 if (loop_tries == 5) {
3def84b3 3727 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3728 break;
3729 }
70aff66c
JN
3730 intel_dp_reset_link_train(intel_dp, &DP,
3731 DP_TRAINING_PATTERN_1 |
3732 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3733 voltage_tries = 0;
3734 continue;
3735 }
a4fc5ed6 3736
3cf2efb1 3737 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3738 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3739 ++voltage_tries;
b06fbda3 3740 if (voltage_tries == 5) {
3def84b3 3741 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3742 break;
3743 }
3744 } else
3745 voltage_tries = 0;
3746 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3747
70aff66c
JN
3748 /* Update training set as requested by target */
3749 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3750 DRM_ERROR("failed to update link training\n");
3751 break;
3752 }
a4fc5ed6
KP
3753 }
3754
33a34e4e
JB
3755 intel_dp->DP = DP;
3756}
3757
c19b0669 3758void
33a34e4e
JB
3759intel_dp_complete_link_train(struct intel_dp *intel_dp)
3760{
33a34e4e 3761 bool channel_eq = false;
37f80975 3762 int tries, cr_tries;
33a34e4e 3763 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3764 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3765
3766 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3767 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3768 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3769
a4fc5ed6 3770 /* channel equalization */
70aff66c 3771 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3772 training_pattern |
70aff66c
JN
3773 DP_LINK_SCRAMBLING_DISABLE)) {
3774 DRM_ERROR("failed to start channel equalization\n");
3775 return;
3776 }
3777
a4fc5ed6 3778 tries = 0;
37f80975 3779 cr_tries = 0;
a4fc5ed6
KP
3780 channel_eq = false;
3781 for (;;) {
70aff66c 3782 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3783
37f80975
JB
3784 if (cr_tries > 5) {
3785 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3786 break;
3787 }
3788
a7c9655f 3789 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3790 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3791 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3792 break;
70aff66c 3793 }
a4fc5ed6 3794
37f80975 3795 /* Make sure clock is still ok */
01916270 3796 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
4e96c977 3797 intel_dp->train_set_valid = false;
37f80975 3798 intel_dp_start_link_train(intel_dp);
70aff66c 3799 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3800 training_pattern |
70aff66c 3801 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3802 cr_tries++;
3803 continue;
3804 }
3805
1ffdff13 3806 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3807 channel_eq = true;
3808 break;
3809 }
a4fc5ed6 3810
37f80975
JB
3811 /* Try 5 times, then try clock recovery if that fails */
3812 if (tries > 5) {
4e96c977 3813 intel_dp->train_set_valid = false;
37f80975 3814 intel_dp_start_link_train(intel_dp);
70aff66c 3815 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3816 training_pattern |
70aff66c 3817 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3818 tries = 0;
3819 cr_tries++;
3820 continue;
3821 }
a4fc5ed6 3822
70aff66c
JN
3823 /* Update training set as requested by target */
3824 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3825 DRM_ERROR("failed to update link training\n");
3826 break;
3827 }
3cf2efb1 3828 ++tries;
869184a6 3829 }
3cf2efb1 3830
3ab9c637
ID
3831 intel_dp_set_idle_link_train(intel_dp);
3832
3833 intel_dp->DP = DP;
3834
4e96c977 3835 if (channel_eq) {
5fa836a9 3836 intel_dp->train_set_valid = true;
07f42258 3837 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3838 }
3ab9c637
ID
3839}
3840
3841void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3842{
70aff66c 3843 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3844 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3845}
3846
3847static void
ea5b213a 3848intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3849{
da63a9f2 3850 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3851 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3852 enum port port = intel_dig_port->port;
da63a9f2 3853 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3854 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3855 uint32_t DP = intel_dp->DP;
a4fc5ed6 3856
bc76e320 3857 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3858 return;
3859
0c33d8d7 3860 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3861 return;
3862
28c97730 3863 DRM_DEBUG_KMS("\n");
32f9d658 3864
39e5fa88
VS
3865 if ((IS_GEN7(dev) && port == PORT_A) ||
3866 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3867 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3868 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3869 } else {
aad3d14d
VS
3870 if (IS_CHERRYVIEW(dev))
3871 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3872 else
3873 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3874 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3875 }
1612c8bd 3876 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3877 POSTING_READ(intel_dp->output_reg);
5eb08b69 3878
1612c8bd
VS
3879 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3880 I915_WRITE(intel_dp->output_reg, DP);
3881 POSTING_READ(intel_dp->output_reg);
3882
3883 /*
3884 * HW workaround for IBX, we need to move the port
3885 * to transcoder A after disabling it to allow the
3886 * matching HDMI port to be enabled on transcoder A.
3887 */
3888 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3889 /* always enable with pattern 1 (as per spec) */
3890 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3891 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3892 I915_WRITE(intel_dp->output_reg, DP);
3893 POSTING_READ(intel_dp->output_reg);
3894
3895 DP &= ~DP_PORT_EN;
5bddd17f 3896 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3897 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3898 }
3899
f01eca2e 3900 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3901}
3902
26d61aad
KP
3903static bool
3904intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3905{
a031d709
RV
3906 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3907 struct drm_device *dev = dig_port->base.base.dev;
3908 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3909 uint8_t rev;
a031d709 3910
9d1a1031
JN
3911 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3912 sizeof(intel_dp->dpcd)) < 0)
edb39244 3913 return false; /* aux transfer failed */
92fd8fd1 3914
a8e98153 3915 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3916
edb39244
AJ
3917 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3918 return false; /* DPCD not present */
3919
2293bb5c
SK
3920 /* Check if the panel supports PSR */
3921 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3922 if (is_edp(intel_dp)) {
9d1a1031
JN
3923 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3924 intel_dp->psr_dpcd,
3925 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3926 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3927 dev_priv->psr.sink_support = true;
50003939 3928 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3929 }
474d1ec4
SJ
3930
3931 if (INTEL_INFO(dev)->gen >= 9 &&
3932 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3933 uint8_t frame_sync_cap;
3934
3935 dev_priv->psr.sink_support = true;
3936 intel_dp_dpcd_read_wake(&intel_dp->aux,
3937 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3938 &frame_sync_cap, 1);
3939 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3940 /* PSR2 needs frame sync as well */
3941 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3942 DRM_DEBUG_KMS("PSR2 %s on sink",
3943 dev_priv->psr.psr2_support ? "supported" : "not supported");
3944 }
50003939
JN
3945 }
3946
7809a611 3947 /* Training Pattern 3 support, both source and sink */
06ea66b6 3948 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3949 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3950 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3951 intel_dp->use_tps3 = true;
f8d8a672 3952 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3953 } else
3954 intel_dp->use_tps3 = false;
3955
fc0f8e25
SJ
3956 /* Intermediate frequency support */
3957 if (is_edp(intel_dp) &&
3958 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3959 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3960 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3961 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3962 int i;
3963
fc0f8e25
SJ
3964 intel_dp_dpcd_read_wake(&intel_dp->aux,
3965 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3966 sink_rates,
3967 sizeof(sink_rates));
ea2d8a42 3968
94ca719e
VS
3969 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3970 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3971
3972 if (val == 0)
3973 break;
3974
af77b974
SJ
3975 /* Value read is in kHz while drm clock is saved in deca-kHz */
3976 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3977 }
94ca719e 3978 intel_dp->num_sink_rates = i;
fc0f8e25 3979 }
0336400e
VS
3980
3981 intel_dp_print_rates(intel_dp);
3982
edb39244
AJ
3983 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3984 DP_DWN_STRM_PORT_PRESENT))
3985 return true; /* native DP sink */
3986
3987 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3988 return true; /* no per-port downstream info */
3989
9d1a1031
JN
3990 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3991 intel_dp->downstream_ports,
3992 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3993 return false; /* downstream port status fetch failed */
3994
3995 return true;
92fd8fd1
KP
3996}
3997
0d198328
AJ
3998static void
3999intel_dp_probe_oui(struct intel_dp *intel_dp)
4000{
4001 u8 buf[3];
4002
4003 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4004 return;
4005
9d1a1031 4006 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
4007 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4008 buf[0], buf[1], buf[2]);
4009
9d1a1031 4010 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
4011 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4012 buf[0], buf[1], buf[2]);
4013}
4014
0e32b39c
DA
4015static bool
4016intel_dp_probe_mst(struct intel_dp *intel_dp)
4017{
4018 u8 buf[1];
4019
4020 if (!intel_dp->can_mst)
4021 return false;
4022
4023 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4024 return false;
4025
0e32b39c
DA
4026 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4027 if (buf[0] & DP_MST_CAP) {
4028 DRM_DEBUG_KMS("Sink is MST capable\n");
4029 intel_dp->is_mst = true;
4030 } else {
4031 DRM_DEBUG_KMS("Sink is not MST capable\n");
4032 intel_dp->is_mst = false;
4033 }
4034 }
0e32b39c
DA
4035
4036 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4037 return intel_dp->is_mst;
4038}
4039
d2e216d0
RV
4040int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4041{
4042 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4043 struct drm_device *dev = intel_dig_port->base.base.dev;
4044 struct intel_crtc *intel_crtc =
4045 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
4046 u8 buf;
4047 int test_crc_count;
4048 int attempts = 6;
4373f0f2 4049 int ret = 0;
d2e216d0 4050
4373f0f2 4051 hsw_disable_ips(intel_crtc);
d2e216d0 4052
4373f0f2
PZ
4053 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4054 ret = -EIO;
4055 goto out;
4056 }
4057
4058 if (!(buf & DP_TEST_CRC_SUPPORTED)) {
4059 ret = -ENOTTY;
4060 goto out;
4061 }
d2e216d0 4062
4373f0f2
PZ
4063 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4064 ret = -EIO;
4065 goto out;
4066 }
1dda5f93 4067
9d1a1031 4068 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4373f0f2
PZ
4069 buf | DP_TEST_SINK_START) < 0) {
4070 ret = -EIO;
4071 goto out;
4072 }
4073
4074 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4075 ret = -EIO;
4076 goto out;
4077 }
d2e216d0 4078
ad9dc91b 4079 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 4080
ad9dc91b 4081 do {
1dda5f93 4082 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4083 DP_TEST_SINK_MISC, &buf) < 0) {
4084 ret = -EIO;
4085 goto out;
4086 }
ad9dc91b
RV
4087 intel_wait_for_vblank(dev, intel_crtc->pipe);
4088 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4089
4090 if (attempts == 0) {
90bd1f46 4091 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4373f0f2
PZ
4092 ret = -ETIMEDOUT;
4093 goto out;
ad9dc91b 4094 }
d2e216d0 4095
4373f0f2
PZ
4096 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4097 ret = -EIO;
4098 goto out;
4099 }
d2e216d0 4100
4373f0f2
PZ
4101 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4102 ret = -EIO;
4103 goto out;
4104 }
1dda5f93 4105 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4373f0f2
PZ
4106 buf & ~DP_TEST_SINK_START) < 0) {
4107 ret = -EIO;
4108 goto out;
4109 }
4110out:
4111 hsw_enable_ips(intel_crtc);
4112 return ret;
d2e216d0
RV
4113}
4114
a60f0e38
JB
4115static bool
4116intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4117{
9d1a1031
JN
4118 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4119 DP_DEVICE_SERVICE_IRQ_VECTOR,
4120 sink_irq_vector, 1) == 1;
a60f0e38
JB
4121}
4122
0e32b39c
DA
4123static bool
4124intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4125{
4126 int ret;
4127
4128 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4129 DP_SINK_COUNT_ESI,
4130 sink_irq_vector, 14);
4131 if (ret != 14)
4132 return false;
4133
4134 return true;
4135}
4136
c5d5ab7a
TP
4137static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4138{
4139 uint8_t test_result = DP_TEST_ACK;
4140 return test_result;
4141}
4142
4143static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4144{
4145 uint8_t test_result = DP_TEST_NAK;
4146 return test_result;
4147}
4148
4149static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4150{
c5d5ab7a 4151 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4152 struct intel_connector *intel_connector = intel_dp->attached_connector;
4153 struct drm_connector *connector = &intel_connector->base;
4154
4155 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4156 connector->edid_corrupt ||
559be30c
TP
4157 intel_dp->aux.i2c_defer_count > 6) {
4158 /* Check EDID read for NACKs, DEFERs and corruption
4159 * (DP CTS 1.2 Core r1.1)
4160 * 4.2.2.4 : Failed EDID read, I2C_NAK
4161 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4162 * 4.2.2.6 : EDID corruption detected
4163 * Use failsafe mode for all cases
4164 */
4165 if (intel_dp->aux.i2c_nack_count > 0 ||
4166 intel_dp->aux.i2c_defer_count > 0)
4167 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4168 intel_dp->aux.i2c_nack_count,
4169 intel_dp->aux.i2c_defer_count);
4170 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4171 } else {
4172 if (!drm_dp_dpcd_write(&intel_dp->aux,
4173 DP_TEST_EDID_CHECKSUM,
4174 &intel_connector->detect_edid->checksum,
5a1cc655 4175 1))
559be30c
TP
4176 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4177
4178 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4179 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4180 }
4181
4182 /* Set test active flag here so userspace doesn't interrupt things */
4183 intel_dp->compliance_test_active = 1;
4184
c5d5ab7a
TP
4185 return test_result;
4186}
4187
4188static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4189{
c5d5ab7a
TP
4190 uint8_t test_result = DP_TEST_NAK;
4191 return test_result;
4192}
4193
4194static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4195{
4196 uint8_t response = DP_TEST_NAK;
4197 uint8_t rxdata = 0;
4198 int status = 0;
4199
559be30c 4200 intel_dp->compliance_test_active = 0;
c5d5ab7a 4201 intel_dp->compliance_test_type = 0;
559be30c
TP
4202 intel_dp->compliance_test_data = 0;
4203
c5d5ab7a
TP
4204 intel_dp->aux.i2c_nack_count = 0;
4205 intel_dp->aux.i2c_defer_count = 0;
4206
4207 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4208 if (status <= 0) {
4209 DRM_DEBUG_KMS("Could not read test request from sink\n");
4210 goto update_status;
4211 }
4212
4213 switch (rxdata) {
4214 case DP_TEST_LINK_TRAINING:
4215 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4216 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4217 response = intel_dp_autotest_link_training(intel_dp);
4218 break;
4219 case DP_TEST_LINK_VIDEO_PATTERN:
4220 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4221 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4222 response = intel_dp_autotest_video_pattern(intel_dp);
4223 break;
4224 case DP_TEST_LINK_EDID_READ:
4225 DRM_DEBUG_KMS("EDID test requested\n");
4226 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4227 response = intel_dp_autotest_edid(intel_dp);
4228 break;
4229 case DP_TEST_LINK_PHY_TEST_PATTERN:
4230 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4231 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4232 response = intel_dp_autotest_phy_pattern(intel_dp);
4233 break;
4234 default:
4235 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4236 break;
4237 }
4238
4239update_status:
4240 status = drm_dp_dpcd_write(&intel_dp->aux,
4241 DP_TEST_RESPONSE,
4242 &response, 1);
4243 if (status <= 0)
4244 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4245}
4246
0e32b39c
DA
4247static int
4248intel_dp_check_mst_status(struct intel_dp *intel_dp)
4249{
4250 bool bret;
4251
4252 if (intel_dp->is_mst) {
4253 u8 esi[16] = { 0 };
4254 int ret = 0;
4255 int retry;
4256 bool handled;
4257 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4258go_again:
4259 if (bret == true) {
4260
4261 /* check link status - esi[10] = 0x200c */
4262 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4263 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4264 intel_dp_start_link_train(intel_dp);
4265 intel_dp_complete_link_train(intel_dp);
4266 intel_dp_stop_link_train(intel_dp);
4267 }
4268
6f34cc39 4269 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4270 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4271
4272 if (handled) {
4273 for (retry = 0; retry < 3; retry++) {
4274 int wret;
4275 wret = drm_dp_dpcd_write(&intel_dp->aux,
4276 DP_SINK_COUNT_ESI+1,
4277 &esi[1], 3);
4278 if (wret == 3) {
4279 break;
4280 }
4281 }
4282
4283 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4284 if (bret == true) {
6f34cc39 4285 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4286 goto go_again;
4287 }
4288 } else
4289 ret = 0;
4290
4291 return ret;
4292 } else {
4293 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4294 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4295 intel_dp->is_mst = false;
4296 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4297 /* send a hotplug event */
4298 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4299 }
4300 }
4301 return -EINVAL;
4302}
4303
a4fc5ed6
KP
4304/*
4305 * According to DP spec
4306 * 5.1.2:
4307 * 1. Read DPCD
4308 * 2. Configure link according to Receiver Capabilities
4309 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4310 * 4. Check link status on receipt of hot-plug interrupt
4311 */
a5146200 4312static void
ea5b213a 4313intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4314{
5b215bcf 4315 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4316 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4317 u8 sink_irq_vector;
93f62dad 4318 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4319
5b215bcf
DA
4320 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4321
da63a9f2 4322 if (!intel_encoder->connectors_active)
d2b996ac 4323 return;
59cd09e1 4324
da63a9f2 4325 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4326 return;
4327
1a125d8a
ID
4328 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4329 return;
4330
92fd8fd1 4331 /* Try to read receiver status if the link appears to be up */
93f62dad 4332 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4333 return;
4334 }
4335
92fd8fd1 4336 /* Now read the DPCD to see if it's actually running */
26d61aad 4337 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4338 return;
4339 }
4340
a60f0e38
JB
4341 /* Try to read the source of the interrupt */
4342 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4343 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4344 /* Clear interrupt source */
9d1a1031
JN
4345 drm_dp_dpcd_writeb(&intel_dp->aux,
4346 DP_DEVICE_SERVICE_IRQ_VECTOR,
4347 sink_irq_vector);
a60f0e38
JB
4348
4349 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4350 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4351 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4352 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4353 }
4354
1ffdff13 4355 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4356 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4357 intel_encoder->base.name);
33a34e4e
JB
4358 intel_dp_start_link_train(intel_dp);
4359 intel_dp_complete_link_train(intel_dp);
3ab9c637 4360 intel_dp_stop_link_train(intel_dp);
33a34e4e 4361 }
a4fc5ed6 4362}
a4fc5ed6 4363
caf9ab24 4364/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4365static enum drm_connector_status
26d61aad 4366intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4367{
caf9ab24 4368 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4369 uint8_t type;
4370
4371 if (!intel_dp_get_dpcd(intel_dp))
4372 return connector_status_disconnected;
4373
4374 /* if there's no downstream port, we're done */
4375 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4376 return connector_status_connected;
caf9ab24
AJ
4377
4378 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4379 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4380 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4381 uint8_t reg;
9d1a1031
JN
4382
4383 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4384 &reg, 1) < 0)
caf9ab24 4385 return connector_status_unknown;
9d1a1031 4386
23235177
AJ
4387 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4388 : connector_status_disconnected;
caf9ab24
AJ
4389 }
4390
4391 /* If no HPD, poke DDC gently */
0b99836f 4392 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4393 return connector_status_connected;
caf9ab24
AJ
4394
4395 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4396 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4397 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4398 if (type == DP_DS_PORT_TYPE_VGA ||
4399 type == DP_DS_PORT_TYPE_NON_EDID)
4400 return connector_status_unknown;
4401 } else {
4402 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4403 DP_DWN_STRM_PORT_TYPE_MASK;
4404 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4405 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4406 return connector_status_unknown;
4407 }
caf9ab24
AJ
4408
4409 /* Anything else is out of spec, warn and ignore */
4410 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4411 return connector_status_disconnected;
71ba9000
AJ
4412}
4413
d410b56d
CW
4414static enum drm_connector_status
4415edp_detect(struct intel_dp *intel_dp)
4416{
4417 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4418 enum drm_connector_status status;
4419
4420 status = intel_panel_detect(dev);
4421 if (status == connector_status_unknown)
4422 status = connector_status_connected;
4423
4424 return status;
4425}
4426
5eb08b69 4427static enum drm_connector_status
a9756bb5 4428ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4429{
30add22d 4430 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4431 struct drm_i915_private *dev_priv = dev->dev_private;
4432 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4433
1b469639
DL
4434 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4435 return connector_status_disconnected;
4436
26d61aad 4437 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4438}
4439
2a592bec
DA
4440static int g4x_digital_port_connected(struct drm_device *dev,
4441 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4442{
a4fc5ed6 4443 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4444 uint32_t bit;
5eb08b69 4445
232a6ee9
TP
4446 if (IS_VALLEYVIEW(dev)) {
4447 switch (intel_dig_port->port) {
4448 case PORT_B:
4449 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4450 break;
4451 case PORT_C:
4452 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4453 break;
4454 case PORT_D:
4455 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4456 break;
4457 default:
2a592bec 4458 return -EINVAL;
232a6ee9
TP
4459 }
4460 } else {
4461 switch (intel_dig_port->port) {
4462 case PORT_B:
4463 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4464 break;
4465 case PORT_C:
4466 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4467 break;
4468 case PORT_D:
4469 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4470 break;
4471 default:
2a592bec 4472 return -EINVAL;
232a6ee9 4473 }
a4fc5ed6
KP
4474 }
4475
10f76a38 4476 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4477 return 0;
4478 return 1;
4479}
4480
4481static enum drm_connector_status
4482g4x_dp_detect(struct intel_dp *intel_dp)
4483{
4484 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4485 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4486 int ret;
4487
4488 /* Can't disconnect eDP, but you can close the lid... */
4489 if (is_edp(intel_dp)) {
4490 enum drm_connector_status status;
4491
4492 status = intel_panel_detect(dev);
4493 if (status == connector_status_unknown)
4494 status = connector_status_connected;
4495 return status;
4496 }
4497
4498 ret = g4x_digital_port_connected(dev, intel_dig_port);
4499 if (ret == -EINVAL)
4500 return connector_status_unknown;
4501 else if (ret == 0)
a4fc5ed6
KP
4502 return connector_status_disconnected;
4503
26d61aad 4504 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4505}
4506
8c241fef 4507static struct edid *
beb60608 4508intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4509{
beb60608 4510 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4511
9cd300e0
JN
4512 /* use cached edid if we have one */
4513 if (intel_connector->edid) {
9cd300e0
JN
4514 /* invalid edid */
4515 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4516 return NULL;
4517
55e9edeb 4518 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4519 } else
4520 return drm_get_edid(&intel_connector->base,
4521 &intel_dp->aux.ddc);
4522}
8c241fef 4523
beb60608
CW
4524static void
4525intel_dp_set_edid(struct intel_dp *intel_dp)
4526{
4527 struct intel_connector *intel_connector = intel_dp->attached_connector;
4528 struct edid *edid;
8c241fef 4529
beb60608
CW
4530 edid = intel_dp_get_edid(intel_dp);
4531 intel_connector->detect_edid = edid;
4532
4533 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4534 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4535 else
4536 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4537}
4538
beb60608
CW
4539static void
4540intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4541{
beb60608 4542 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4543
beb60608
CW
4544 kfree(intel_connector->detect_edid);
4545 intel_connector->detect_edid = NULL;
9cd300e0 4546
beb60608
CW
4547 intel_dp->has_audio = false;
4548}
d6f24d0f 4549
beb60608
CW
4550static enum intel_display_power_domain
4551intel_dp_power_get(struct intel_dp *dp)
4552{
4553 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4554 enum intel_display_power_domain power_domain;
4555
4556 power_domain = intel_display_port_power_domain(encoder);
4557 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4558
4559 return power_domain;
4560}
d6f24d0f 4561
beb60608
CW
4562static void
4563intel_dp_power_put(struct intel_dp *dp,
4564 enum intel_display_power_domain power_domain)
4565{
4566 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4567 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4568}
4569
a9756bb5
ZW
4570static enum drm_connector_status
4571intel_dp_detect(struct drm_connector *connector, bool force)
4572{
4573 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4574 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4575 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4576 struct drm_device *dev = connector->dev;
a9756bb5 4577 enum drm_connector_status status;
671dedd2 4578 enum intel_display_power_domain power_domain;
0e32b39c 4579 bool ret;
09b1eb13 4580 u8 sink_irq_vector;
a9756bb5 4581
164c8598 4582 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4583 connector->base.id, connector->name);
beb60608 4584 intel_dp_unset_edid(intel_dp);
164c8598 4585
0e32b39c
DA
4586 if (intel_dp->is_mst) {
4587 /* MST devices are disconnected from a monitor POV */
4588 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4589 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4590 return connector_status_disconnected;
0e32b39c
DA
4591 }
4592
beb60608 4593 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4594
d410b56d
CW
4595 /* Can't disconnect eDP, but you can close the lid... */
4596 if (is_edp(intel_dp))
4597 status = edp_detect(intel_dp);
4598 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4599 status = ironlake_dp_detect(intel_dp);
4600 else
4601 status = g4x_dp_detect(intel_dp);
4602 if (status != connector_status_connected)
c8c8fb33 4603 goto out;
a9756bb5 4604
0d198328
AJ
4605 intel_dp_probe_oui(intel_dp);
4606
0e32b39c
DA
4607 ret = intel_dp_probe_mst(intel_dp);
4608 if (ret) {
4609 /* if we are in MST mode then this connector
4610 won't appear connected or have anything with EDID on it */
4611 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4612 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4613 status = connector_status_disconnected;
4614 goto out;
4615 }
4616
beb60608 4617 intel_dp_set_edid(intel_dp);
a9756bb5 4618
d63885da
PZ
4619 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4620 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4621 status = connector_status_connected;
4622
09b1eb13
TP
4623 /* Try to read the source of the interrupt */
4624 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4625 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4626 /* Clear interrupt source */
4627 drm_dp_dpcd_writeb(&intel_dp->aux,
4628 DP_DEVICE_SERVICE_IRQ_VECTOR,
4629 sink_irq_vector);
4630
4631 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4632 intel_dp_handle_test_request(intel_dp);
4633 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4634 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4635 }
4636
c8c8fb33 4637out:
beb60608 4638 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4639 return status;
a4fc5ed6
KP
4640}
4641
beb60608
CW
4642static void
4643intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4644{
df0e9248 4645 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4646 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4647 enum intel_display_power_domain power_domain;
a4fc5ed6 4648
beb60608
CW
4649 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4650 connector->base.id, connector->name);
4651 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4652
beb60608
CW
4653 if (connector->status != connector_status_connected)
4654 return;
671dedd2 4655
beb60608
CW
4656 power_domain = intel_dp_power_get(intel_dp);
4657
4658 intel_dp_set_edid(intel_dp);
4659
4660 intel_dp_power_put(intel_dp, power_domain);
4661
4662 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4663 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4664}
4665
4666static int intel_dp_get_modes(struct drm_connector *connector)
4667{
4668 struct intel_connector *intel_connector = to_intel_connector(connector);
4669 struct edid *edid;
4670
4671 edid = intel_connector->detect_edid;
4672 if (edid) {
4673 int ret = intel_connector_update_modes(connector, edid);
4674 if (ret)
4675 return ret;
4676 }
32f9d658 4677
f8779fda 4678 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4679 if (is_edp(intel_attached_dp(connector)) &&
4680 intel_connector->panel.fixed_mode) {
f8779fda 4681 struct drm_display_mode *mode;
beb60608
CW
4682
4683 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4684 intel_connector->panel.fixed_mode);
f8779fda 4685 if (mode) {
32f9d658
ZW
4686 drm_mode_probed_add(connector, mode);
4687 return 1;
4688 }
4689 }
beb60608 4690
32f9d658 4691 return 0;
a4fc5ed6
KP
4692}
4693
1aad7ac0
CW
4694static bool
4695intel_dp_detect_audio(struct drm_connector *connector)
4696{
1aad7ac0 4697 bool has_audio = false;
beb60608 4698 struct edid *edid;
1aad7ac0 4699
beb60608
CW
4700 edid = to_intel_connector(connector)->detect_edid;
4701 if (edid)
1aad7ac0 4702 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4703
1aad7ac0
CW
4704 return has_audio;
4705}
4706
f684960e
CW
4707static int
4708intel_dp_set_property(struct drm_connector *connector,
4709 struct drm_property *property,
4710 uint64_t val)
4711{
e953fd7b 4712 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4713 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4714 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4715 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4716 int ret;
4717
662595df 4718 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4719 if (ret)
4720 return ret;
4721
3f43c48d 4722 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4723 int i = val;
4724 bool has_audio;
4725
4726 if (i == intel_dp->force_audio)
f684960e
CW
4727 return 0;
4728
1aad7ac0 4729 intel_dp->force_audio = i;
f684960e 4730
c3e5f67b 4731 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4732 has_audio = intel_dp_detect_audio(connector);
4733 else
c3e5f67b 4734 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4735
4736 if (has_audio == intel_dp->has_audio)
f684960e
CW
4737 return 0;
4738
1aad7ac0 4739 intel_dp->has_audio = has_audio;
f684960e
CW
4740 goto done;
4741 }
4742
e953fd7b 4743 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4744 bool old_auto = intel_dp->color_range_auto;
4745 uint32_t old_range = intel_dp->color_range;
4746
55bc60db
VS
4747 switch (val) {
4748 case INTEL_BROADCAST_RGB_AUTO:
4749 intel_dp->color_range_auto = true;
4750 break;
4751 case INTEL_BROADCAST_RGB_FULL:
4752 intel_dp->color_range_auto = false;
4753 intel_dp->color_range = 0;
4754 break;
4755 case INTEL_BROADCAST_RGB_LIMITED:
4756 intel_dp->color_range_auto = false;
4757 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4758 break;
4759 default:
4760 return -EINVAL;
4761 }
ae4edb80
DV
4762
4763 if (old_auto == intel_dp->color_range_auto &&
4764 old_range == intel_dp->color_range)
4765 return 0;
4766
e953fd7b
CW
4767 goto done;
4768 }
4769
53b41837
YN
4770 if (is_edp(intel_dp) &&
4771 property == connector->dev->mode_config.scaling_mode_property) {
4772 if (val == DRM_MODE_SCALE_NONE) {
4773 DRM_DEBUG_KMS("no scaling not supported\n");
4774 return -EINVAL;
4775 }
4776
4777 if (intel_connector->panel.fitting_mode == val) {
4778 /* the eDP scaling property is not changed */
4779 return 0;
4780 }
4781 intel_connector->panel.fitting_mode = val;
4782
4783 goto done;
4784 }
4785
f684960e
CW
4786 return -EINVAL;
4787
4788done:
c0c36b94
CW
4789 if (intel_encoder->base.crtc)
4790 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4791
4792 return 0;
4793}
4794
a4fc5ed6 4795static void
73845adf 4796intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4797{
1d508706 4798 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4799
10e972d3 4800 kfree(intel_connector->detect_edid);
beb60608 4801
9cd300e0
JN
4802 if (!IS_ERR_OR_NULL(intel_connector->edid))
4803 kfree(intel_connector->edid);
4804
acd8db10
PZ
4805 /* Can't call is_edp() since the encoder may have been destroyed
4806 * already. */
4807 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4808 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4809
a4fc5ed6 4810 drm_connector_cleanup(connector);
55f78c43 4811 kfree(connector);
a4fc5ed6
KP
4812}
4813
00c09d70 4814void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4815{
da63a9f2
PZ
4816 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4817 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4818
4f71d0cb 4819 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4820 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4821 if (is_edp(intel_dp)) {
4822 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4823 /*
4824 * vdd might still be enabled do to the delayed vdd off.
4825 * Make sure vdd is actually turned off here.
4826 */
773538e8 4827 pps_lock(intel_dp);
4be73780 4828 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4829 pps_unlock(intel_dp);
4830
01527b31
CT
4831 if (intel_dp->edp_notifier.notifier_call) {
4832 unregister_reboot_notifier(&intel_dp->edp_notifier);
4833 intel_dp->edp_notifier.notifier_call = NULL;
4834 }
bd943159 4835 }
c8bd0e49 4836 drm_encoder_cleanup(encoder);
da63a9f2 4837 kfree(intel_dig_port);
24d05927
DV
4838}
4839
07f9cd0b
ID
4840static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4841{
4842 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4843
4844 if (!is_edp(intel_dp))
4845 return;
4846
951468f3
VS
4847 /*
4848 * vdd might still be enabled do to the delayed vdd off.
4849 * Make sure vdd is actually turned off here.
4850 */
afa4e53a 4851 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4852 pps_lock(intel_dp);
07f9cd0b 4853 edp_panel_vdd_off_sync(intel_dp);
773538e8 4854 pps_unlock(intel_dp);
07f9cd0b
ID
4855}
4856
49e6bc51
VS
4857static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4858{
4859 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4860 struct drm_device *dev = intel_dig_port->base.base.dev;
4861 struct drm_i915_private *dev_priv = dev->dev_private;
4862 enum intel_display_power_domain power_domain;
4863
4864 lockdep_assert_held(&dev_priv->pps_mutex);
4865
4866 if (!edp_have_panel_vdd(intel_dp))
4867 return;
4868
4869 /*
4870 * The VDD bit needs a power domain reference, so if the bit is
4871 * already enabled when we boot or resume, grab this reference and
4872 * schedule a vdd off, so we don't hold on to the reference
4873 * indefinitely.
4874 */
4875 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4876 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4877 intel_display_power_get(dev_priv, power_domain);
4878
4879 edp_panel_vdd_schedule_off(intel_dp);
4880}
4881
6d93c0c4
ID
4882static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4883{
49e6bc51
VS
4884 struct intel_dp *intel_dp;
4885
4886 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4887 return;
4888
4889 intel_dp = enc_to_intel_dp(encoder);
4890
4891 pps_lock(intel_dp);
4892
4893 /*
4894 * Read out the current power sequencer assignment,
4895 * in case the BIOS did something with it.
4896 */
4897 if (IS_VALLEYVIEW(encoder->dev))
4898 vlv_initial_power_sequencer_setup(intel_dp);
4899
4900 intel_edp_panel_vdd_sanitize(intel_dp);
4901
4902 pps_unlock(intel_dp);
6d93c0c4
ID
4903}
4904
a4fc5ed6 4905static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4906 .dpms = intel_connector_dpms,
a4fc5ed6 4907 .detect = intel_dp_detect,
beb60608 4908 .force = intel_dp_force,
a4fc5ed6 4909 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4910 .set_property = intel_dp_set_property,
2545e4a6 4911 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4912 .destroy = intel_dp_connector_destroy,
c6f95f27 4913 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4914 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4915};
4916
4917static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4918 .get_modes = intel_dp_get_modes,
4919 .mode_valid = intel_dp_mode_valid,
df0e9248 4920 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4921};
4922
a4fc5ed6 4923static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4924 .reset = intel_dp_encoder_reset,
24d05927 4925 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4926};
4927
b2c5c181 4928enum irqreturn
13cf5504
DA
4929intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4930{
4931 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4932 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4933 struct drm_device *dev = intel_dig_port->base.base.dev;
4934 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4935 enum intel_display_power_domain power_domain;
b2c5c181 4936 enum irqreturn ret = IRQ_NONE;
1c767b33 4937
0e32b39c
DA
4938 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4939 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4940
7a7f84cc
VS
4941 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4942 /*
4943 * vdd off can generate a long pulse on eDP which
4944 * would require vdd on to handle it, and thus we
4945 * would end up in an endless cycle of
4946 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4947 */
4948 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4949 port_name(intel_dig_port->port));
a8b3d52f 4950 return IRQ_HANDLED;
7a7f84cc
VS
4951 }
4952
26fbb774
VS
4953 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4954 port_name(intel_dig_port->port),
0e32b39c 4955 long_hpd ? "long" : "short");
13cf5504 4956
1c767b33
ID
4957 power_domain = intel_display_port_power_domain(intel_encoder);
4958 intel_display_power_get(dev_priv, power_domain);
4959
0e32b39c 4960 if (long_hpd) {
5fa836a9
MK
4961 /* indicate that we need to restart link training */
4962 intel_dp->train_set_valid = false;
2a592bec
DA
4963
4964 if (HAS_PCH_SPLIT(dev)) {
4965 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4966 goto mst_fail;
4967 } else {
4968 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4969 goto mst_fail;
4970 }
0e32b39c
DA
4971
4972 if (!intel_dp_get_dpcd(intel_dp)) {
4973 goto mst_fail;
4974 }
4975
4976 intel_dp_probe_oui(intel_dp);
4977
4978 if (!intel_dp_probe_mst(intel_dp))
4979 goto mst_fail;
4980
4981 } else {
4982 if (intel_dp->is_mst) {
1c767b33 4983 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4984 goto mst_fail;
4985 }
4986
4987 if (!intel_dp->is_mst) {
4988 /*
4989 * we'll check the link status via the normal hot plug path later -
4990 * but for short hpds we should check it now
4991 */
5b215bcf 4992 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4993 intel_dp_check_link_status(intel_dp);
5b215bcf 4994 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4995 }
4996 }
b2c5c181
DV
4997
4998 ret = IRQ_HANDLED;
4999
1c767b33 5000 goto put_power;
0e32b39c
DA
5001mst_fail:
5002 /* if we were in MST mode, and device is not there get out of MST mode */
5003 if (intel_dp->is_mst) {
5004 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5005 intel_dp->is_mst = false;
5006 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5007 }
1c767b33
ID
5008put_power:
5009 intel_display_power_put(dev_priv, power_domain);
5010
5011 return ret;
13cf5504
DA
5012}
5013
e3421a18
ZW
5014/* Return which DP Port should be selected for Transcoder DP control */
5015int
0206e353 5016intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5017{
5018 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5019 struct intel_encoder *intel_encoder;
5020 struct intel_dp *intel_dp;
e3421a18 5021
fa90ecef
PZ
5022 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5023 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5024
fa90ecef
PZ
5025 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5026 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5027 return intel_dp->output_reg;
e3421a18 5028 }
ea5b213a 5029
e3421a18
ZW
5030 return -1;
5031}
5032
36e83a18 5033/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5034bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5035{
5036 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5037 union child_device_config *p_child;
36e83a18 5038 int i;
5d8a7752
VS
5039 static const short port_mapping[] = {
5040 [PORT_B] = PORT_IDPB,
5041 [PORT_C] = PORT_IDPC,
5042 [PORT_D] = PORT_IDPD,
5043 };
36e83a18 5044
3b32a35b
VS
5045 if (port == PORT_A)
5046 return true;
5047
41aa3448 5048 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5049 return false;
5050
41aa3448
RV
5051 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5052 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5053
5d8a7752 5054 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5055 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5056 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5057 return true;
5058 }
5059 return false;
5060}
5061
0e32b39c 5062void
f684960e
CW
5063intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5064{
53b41837
YN
5065 struct intel_connector *intel_connector = to_intel_connector(connector);
5066
3f43c48d 5067 intel_attach_force_audio_property(connector);
e953fd7b 5068 intel_attach_broadcast_rgb_property(connector);
55bc60db 5069 intel_dp->color_range_auto = true;
53b41837
YN
5070
5071 if (is_edp(intel_dp)) {
5072 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5073 drm_object_attach_property(
5074 &connector->base,
53b41837 5075 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5076 DRM_MODE_SCALE_ASPECT);
5077 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5078 }
f684960e
CW
5079}
5080
dada1a9f
ID
5081static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5082{
5083 intel_dp->last_power_cycle = jiffies;
5084 intel_dp->last_power_on = jiffies;
5085 intel_dp->last_backlight_off = jiffies;
5086}
5087
67a54566
DV
5088static void
5089intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5090 struct intel_dp *intel_dp)
67a54566
DV
5091{
5092 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5093 struct edp_power_seq cur, vbt, spec,
5094 *final = &intel_dp->pps_delays;
67a54566 5095 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 5096 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5097
e39b999a
VS
5098 lockdep_assert_held(&dev_priv->pps_mutex);
5099
81ddbc69
VS
5100 /* already initialized? */
5101 if (final->t11_t12 != 0)
5102 return;
5103
453c5420 5104 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5105 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5106 pp_on_reg = PCH_PP_ON_DELAYS;
5107 pp_off_reg = PCH_PP_OFF_DELAYS;
5108 pp_div_reg = PCH_PP_DIVISOR;
5109 } else {
bf13e81b
JN
5110 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5111
5112 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5113 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5114 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5115 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5116 }
67a54566
DV
5117
5118 /* Workaround: Need to write PP_CONTROL with the unlock key as
5119 * the very first thing. */
453c5420 5120 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 5121 I915_WRITE(pp_ctrl_reg, pp);
67a54566 5122
453c5420
JB
5123 pp_on = I915_READ(pp_on_reg);
5124 pp_off = I915_READ(pp_off_reg);
5125 pp_div = I915_READ(pp_div_reg);
67a54566
DV
5126
5127 /* Pull timing values out of registers */
5128 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5129 PANEL_POWER_UP_DELAY_SHIFT;
5130
5131 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5132 PANEL_LIGHT_ON_DELAY_SHIFT;
5133
5134 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5135 PANEL_LIGHT_OFF_DELAY_SHIFT;
5136
5137 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5138 PANEL_POWER_DOWN_DELAY_SHIFT;
5139
5140 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5141 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5142
5143 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5144 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5145
41aa3448 5146 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5147
5148 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5149 * our hw here, which are all in 100usec. */
5150 spec.t1_t3 = 210 * 10;
5151 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5152 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5153 spec.t10 = 500 * 10;
5154 /* This one is special and actually in units of 100ms, but zero
5155 * based in the hw (so we need to add 100 ms). But the sw vbt
5156 * table multiplies it with 1000 to make it in units of 100usec,
5157 * too. */
5158 spec.t11_t12 = (510 + 100) * 10;
5159
5160 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5161 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5162
5163 /* Use the max of the register settings and vbt. If both are
5164 * unset, fall back to the spec limits. */
36b5f425 5165#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5166 spec.field : \
5167 max(cur.field, vbt.field))
5168 assign_final(t1_t3);
5169 assign_final(t8);
5170 assign_final(t9);
5171 assign_final(t10);
5172 assign_final(t11_t12);
5173#undef assign_final
5174
36b5f425 5175#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5176 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5177 intel_dp->backlight_on_delay = get_delay(t8);
5178 intel_dp->backlight_off_delay = get_delay(t9);
5179 intel_dp->panel_power_down_delay = get_delay(t10);
5180 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5181#undef get_delay
5182
f30d26e4
JN
5183 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5184 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5185 intel_dp->panel_power_cycle_delay);
5186
5187 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5188 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5189}
5190
5191static void
5192intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5193 struct intel_dp *intel_dp)
f30d26e4
JN
5194{
5195 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5196 u32 pp_on, pp_off, pp_div, port_sel = 0;
5197 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5198 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 5199 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5200 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5201
e39b999a 5202 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
5203
5204 if (HAS_PCH_SPLIT(dev)) {
5205 pp_on_reg = PCH_PP_ON_DELAYS;
5206 pp_off_reg = PCH_PP_OFF_DELAYS;
5207 pp_div_reg = PCH_PP_DIVISOR;
5208 } else {
bf13e81b
JN
5209 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5210
5211 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5212 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5213 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5214 }
5215
b2f19d1a
PZ
5216 /*
5217 * And finally store the new values in the power sequencer. The
5218 * backlight delays are set to 1 because we do manual waits on them. For
5219 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5220 * we'll end up waiting for the backlight off delay twice: once when we
5221 * do the manual sleep, and once when we disable the panel and wait for
5222 * the PP_STATUS bit to become zero.
5223 */
f30d26e4 5224 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5225 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5226 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5227 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5228 /* Compute the divisor for the pp clock, simply match the Bspec
5229 * formula. */
453c5420 5230 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 5231 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
5232 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5233
5234 /* Haswell doesn't have any port selection bits for the panel
5235 * power sequencer any more. */
bc7d38a4 5236 if (IS_VALLEYVIEW(dev)) {
ad933b56 5237 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5238 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5239 if (port == PORT_A)
a24c144c 5240 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5241 else
a24c144c 5242 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5243 }
5244
453c5420
JB
5245 pp_on |= port_sel;
5246
5247 I915_WRITE(pp_on_reg, pp_on);
5248 I915_WRITE(pp_off_reg, pp_off);
5249 I915_WRITE(pp_div_reg, pp_div);
67a54566 5250
67a54566 5251 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5252 I915_READ(pp_on_reg),
5253 I915_READ(pp_off_reg),
5254 I915_READ(pp_div_reg));
f684960e
CW
5255}
5256
b33a2815
VK
5257/**
5258 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5259 * @dev: DRM device
5260 * @refresh_rate: RR to be programmed
5261 *
5262 * This function gets called when refresh rate (RR) has to be changed from
5263 * one frequency to another. Switches can be between high and low RR
5264 * supported by the panel or to any other RR based on media playback (in
5265 * this case, RR value needs to be passed from user space).
5266 *
5267 * The caller of this function needs to take a lock on dev_priv->drrs.
5268 */
96178eeb 5269static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5270{
5271 struct drm_i915_private *dev_priv = dev->dev_private;
5272 struct intel_encoder *encoder;
96178eeb
VK
5273 struct intel_digital_port *dig_port = NULL;
5274 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5275 struct intel_crtc_state *config = NULL;
439d7ac0 5276 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5277 u32 reg, val;
96178eeb 5278 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5279
5280 if (refresh_rate <= 0) {
5281 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5282 return;
5283 }
5284
96178eeb
VK
5285 if (intel_dp == NULL) {
5286 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5287 return;
5288 }
5289
1fcc9d1c 5290 /*
e4d59f6b
RV
5291 * FIXME: This needs proper synchronization with psr state for some
5292 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5293 */
439d7ac0 5294
96178eeb
VK
5295 dig_port = dp_to_dig_port(intel_dp);
5296 encoder = &dig_port->base;
723f9aab 5297 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5298
5299 if (!intel_crtc) {
5300 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5301 return;
5302 }
5303
6e3c9717 5304 config = intel_crtc->config;
439d7ac0 5305
96178eeb 5306 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5307 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5308 return;
5309 }
5310
96178eeb
VK
5311 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5312 refresh_rate)
439d7ac0
PB
5313 index = DRRS_LOW_RR;
5314
96178eeb 5315 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5316 DRM_DEBUG_KMS(
5317 "DRRS requested for previously set RR...ignoring\n");
5318 return;
5319 }
5320
5321 if (!intel_crtc->active) {
5322 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5323 return;
5324 }
5325
44395bfe 5326 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5327 switch (index) {
5328 case DRRS_HIGH_RR:
5329 intel_dp_set_m_n(intel_crtc, M1_N1);
5330 break;
5331 case DRRS_LOW_RR:
5332 intel_dp_set_m_n(intel_crtc, M2_N2);
5333 break;
5334 case DRRS_MAX_RR:
5335 default:
5336 DRM_ERROR("Unsupported refreshrate type\n");
5337 }
5338 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5339 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5340 val = I915_READ(reg);
a4c30b1d 5341
439d7ac0 5342 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5343 if (IS_VALLEYVIEW(dev))
5344 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5345 else
5346 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5347 } else {
6fa7aec1
VK
5348 if (IS_VALLEYVIEW(dev))
5349 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5350 else
5351 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5352 }
5353 I915_WRITE(reg, val);
5354 }
5355
4e9ac947
VK
5356 dev_priv->drrs.refresh_rate_type = index;
5357
5358 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5359}
5360
b33a2815
VK
5361/**
5362 * intel_edp_drrs_enable - init drrs struct if supported
5363 * @intel_dp: DP struct
5364 *
5365 * Initializes frontbuffer_bits and drrs.dp
5366 */
c395578e
VK
5367void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5368{
5369 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5370 struct drm_i915_private *dev_priv = dev->dev_private;
5371 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5372 struct drm_crtc *crtc = dig_port->base.base.crtc;
5373 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5374
5375 if (!intel_crtc->config->has_drrs) {
5376 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5377 return;
5378 }
5379
5380 mutex_lock(&dev_priv->drrs.mutex);
5381 if (WARN_ON(dev_priv->drrs.dp)) {
5382 DRM_ERROR("DRRS already enabled\n");
5383 goto unlock;
5384 }
5385
5386 dev_priv->drrs.busy_frontbuffer_bits = 0;
5387
5388 dev_priv->drrs.dp = intel_dp;
5389
5390unlock:
5391 mutex_unlock(&dev_priv->drrs.mutex);
5392}
5393
b33a2815
VK
5394/**
5395 * intel_edp_drrs_disable - Disable DRRS
5396 * @intel_dp: DP struct
5397 *
5398 */
c395578e
VK
5399void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5400{
5401 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5402 struct drm_i915_private *dev_priv = dev->dev_private;
5403 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5404 struct drm_crtc *crtc = dig_port->base.base.crtc;
5405 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5406
5407 if (!intel_crtc->config->has_drrs)
5408 return;
5409
5410 mutex_lock(&dev_priv->drrs.mutex);
5411 if (!dev_priv->drrs.dp) {
5412 mutex_unlock(&dev_priv->drrs.mutex);
5413 return;
5414 }
5415
5416 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5417 intel_dp_set_drrs_state(dev_priv->dev,
5418 intel_dp->attached_connector->panel.
5419 fixed_mode->vrefresh);
5420
5421 dev_priv->drrs.dp = NULL;
5422 mutex_unlock(&dev_priv->drrs.mutex);
5423
5424 cancel_delayed_work_sync(&dev_priv->drrs.work);
5425}
5426
4e9ac947
VK
5427static void intel_edp_drrs_downclock_work(struct work_struct *work)
5428{
5429 struct drm_i915_private *dev_priv =
5430 container_of(work, typeof(*dev_priv), drrs.work.work);
5431 struct intel_dp *intel_dp;
5432
5433 mutex_lock(&dev_priv->drrs.mutex);
5434
5435 intel_dp = dev_priv->drrs.dp;
5436
5437 if (!intel_dp)
5438 goto unlock;
5439
439d7ac0 5440 /*
4e9ac947
VK
5441 * The delayed work can race with an invalidate hence we need to
5442 * recheck.
439d7ac0
PB
5443 */
5444
4e9ac947
VK
5445 if (dev_priv->drrs.busy_frontbuffer_bits)
5446 goto unlock;
439d7ac0 5447
4e9ac947
VK
5448 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5449 intel_dp_set_drrs_state(dev_priv->dev,
5450 intel_dp->attached_connector->panel.
5451 downclock_mode->vrefresh);
439d7ac0 5452
4e9ac947 5453unlock:
4e9ac947 5454 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5455}
5456
b33a2815
VK
5457/**
5458 * intel_edp_drrs_invalidate - Invalidate DRRS
5459 * @dev: DRM device
5460 * @frontbuffer_bits: frontbuffer plane tracking bits
5461 *
5462 * When there is a disturbance on screen (due to cursor movement/time
5463 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5464 * high RR.
5465 *
5466 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5467 */
a93fad0f
VK
5468void intel_edp_drrs_invalidate(struct drm_device *dev,
5469 unsigned frontbuffer_bits)
5470{
5471 struct drm_i915_private *dev_priv = dev->dev_private;
5472 struct drm_crtc *crtc;
5473 enum pipe pipe;
5474
9da7d693 5475 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5476 return;
5477
88f933a8 5478 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5479
a93fad0f 5480 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5481 if (!dev_priv->drrs.dp) {
5482 mutex_unlock(&dev_priv->drrs.mutex);
5483 return;
5484 }
5485
a93fad0f
VK
5486 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5487 pipe = to_intel_crtc(crtc)->pipe;
5488
5489 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5490 intel_dp_set_drrs_state(dev_priv->dev,
5491 dev_priv->drrs.dp->attached_connector->panel.
5492 fixed_mode->vrefresh);
5493 }
5494
5495 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5496
5497 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5498 mutex_unlock(&dev_priv->drrs.mutex);
5499}
5500
b33a2815
VK
5501/**
5502 * intel_edp_drrs_flush - Flush DRRS
5503 * @dev: DRM device
5504 * @frontbuffer_bits: frontbuffer plane tracking bits
5505 *
5506 * When there is no movement on screen, DRRS work can be scheduled.
5507 * This DRRS work is responsible for setting relevant registers after a
5508 * timeout of 1 second.
5509 *
5510 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5511 */
a93fad0f
VK
5512void intel_edp_drrs_flush(struct drm_device *dev,
5513 unsigned frontbuffer_bits)
5514{
5515 struct drm_i915_private *dev_priv = dev->dev_private;
5516 struct drm_crtc *crtc;
5517 enum pipe pipe;
5518
9da7d693 5519 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5520 return;
5521
88f933a8 5522 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5523
a93fad0f 5524 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5525 if (!dev_priv->drrs.dp) {
5526 mutex_unlock(&dev_priv->drrs.mutex);
5527 return;
5528 }
5529
a93fad0f
VK
5530 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5531 pipe = to_intel_crtc(crtc)->pipe;
5532 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5533
a93fad0f
VK
5534 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5535 !dev_priv->drrs.busy_frontbuffer_bits)
5536 schedule_delayed_work(&dev_priv->drrs.work,
5537 msecs_to_jiffies(1000));
5538 mutex_unlock(&dev_priv->drrs.mutex);
5539}
5540
b33a2815
VK
5541/**
5542 * DOC: Display Refresh Rate Switching (DRRS)
5543 *
5544 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5545 * which enables swtching between low and high refresh rates,
5546 * dynamically, based on the usage scenario. This feature is applicable
5547 * for internal panels.
5548 *
5549 * Indication that the panel supports DRRS is given by the panel EDID, which
5550 * would list multiple refresh rates for one resolution.
5551 *
5552 * DRRS is of 2 types - static and seamless.
5553 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5554 * (may appear as a blink on screen) and is used in dock-undock scenario.
5555 * Seamless DRRS involves changing RR without any visual effect to the user
5556 * and can be used during normal system usage. This is done by programming
5557 * certain registers.
5558 *
5559 * Support for static/seamless DRRS may be indicated in the VBT based on
5560 * inputs from the panel spec.
5561 *
5562 * DRRS saves power by switching to low RR based on usage scenarios.
5563 *
5564 * eDP DRRS:-
5565 * The implementation is based on frontbuffer tracking implementation.
5566 * When there is a disturbance on the screen triggered by user activity or a
5567 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5568 * When there is no movement on screen, after a timeout of 1 second, a switch
5569 * to low RR is made.
5570 * For integration with frontbuffer tracking code,
5571 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5572 *
5573 * DRRS can be further extended to support other internal panels and also
5574 * the scenario of video playback wherein RR is set based on the rate
5575 * requested by userspace.
5576 */
5577
5578/**
5579 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5580 * @intel_connector: eDP connector
5581 * @fixed_mode: preferred mode of panel
5582 *
5583 * This function is called only once at driver load to initialize basic
5584 * DRRS stuff.
5585 *
5586 * Returns:
5587 * Downclock mode if panel supports it, else return NULL.
5588 * DRRS support is determined by the presence of downclock mode (apart
5589 * from VBT setting).
5590 */
4f9db5b5 5591static struct drm_display_mode *
96178eeb
VK
5592intel_dp_drrs_init(struct intel_connector *intel_connector,
5593 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5594{
5595 struct drm_connector *connector = &intel_connector->base;
96178eeb 5596 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5597 struct drm_i915_private *dev_priv = dev->dev_private;
5598 struct drm_display_mode *downclock_mode = NULL;
5599
9da7d693
DV
5600 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5601 mutex_init(&dev_priv->drrs.mutex);
5602
4f9db5b5
PB
5603 if (INTEL_INFO(dev)->gen <= 6) {
5604 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5605 return NULL;
5606 }
5607
5608 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5609 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5610 return NULL;
5611 }
5612
5613 downclock_mode = intel_find_panel_downclock
5614 (dev, fixed_mode, connector);
5615
5616 if (!downclock_mode) {
a1d26342 5617 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5618 return NULL;
5619 }
5620
96178eeb 5621 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5622
96178eeb 5623 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5624 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5625 return downclock_mode;
5626}
5627
ed92f0b2 5628static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5629 struct intel_connector *intel_connector)
ed92f0b2
PZ
5630{
5631 struct drm_connector *connector = &intel_connector->base;
5632 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5633 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5634 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5635 struct drm_i915_private *dev_priv = dev->dev_private;
5636 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5637 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5638 bool has_dpcd;
5639 struct drm_display_mode *scan;
5640 struct edid *edid;
6517d273 5641 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5642
5643 if (!is_edp(intel_dp))
5644 return true;
5645
49e6bc51
VS
5646 pps_lock(intel_dp);
5647 intel_edp_panel_vdd_sanitize(intel_dp);
5648 pps_unlock(intel_dp);
63635217 5649
ed92f0b2 5650 /* Cache DPCD and EDID for edp. */
ed92f0b2 5651 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5652
5653 if (has_dpcd) {
5654 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5655 dev_priv->no_aux_handshake =
5656 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5657 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5658 } else {
5659 /* if this fails, presume the device is a ghost */
5660 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5661 return false;
5662 }
5663
5664 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5665 pps_lock(intel_dp);
36b5f425 5666 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5667 pps_unlock(intel_dp);
ed92f0b2 5668
060c8778 5669 mutex_lock(&dev->mode_config.mutex);
0b99836f 5670 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5671 if (edid) {
5672 if (drm_add_edid_modes(connector, edid)) {
5673 drm_mode_connector_update_edid_property(connector,
5674 edid);
5675 drm_edid_to_eld(connector, edid);
5676 } else {
5677 kfree(edid);
5678 edid = ERR_PTR(-EINVAL);
5679 }
5680 } else {
5681 edid = ERR_PTR(-ENOENT);
5682 }
5683 intel_connector->edid = edid;
5684
5685 /* prefer fixed mode from EDID if available */
5686 list_for_each_entry(scan, &connector->probed_modes, head) {
5687 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5688 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5689 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5690 intel_connector, fixed_mode);
ed92f0b2
PZ
5691 break;
5692 }
5693 }
5694
5695 /* fallback to VBT if available for eDP */
5696 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5697 fixed_mode = drm_mode_duplicate(dev,
5698 dev_priv->vbt.lfp_lvds_vbt_mode);
5699 if (fixed_mode)
5700 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5701 }
060c8778 5702 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5703
01527b31
CT
5704 if (IS_VALLEYVIEW(dev)) {
5705 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5706 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5707
5708 /*
5709 * Figure out the current pipe for the initial backlight setup.
5710 * If the current pipe isn't valid, try the PPS pipe, and if that
5711 * fails just assume pipe A.
5712 */
5713 if (IS_CHERRYVIEW(dev))
5714 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5715 else
5716 pipe = PORT_TO_PIPE(intel_dp->DP);
5717
5718 if (pipe != PIPE_A && pipe != PIPE_B)
5719 pipe = intel_dp->pps_pipe;
5720
5721 if (pipe != PIPE_A && pipe != PIPE_B)
5722 pipe = PIPE_A;
5723
5724 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5725 pipe_name(pipe));
01527b31
CT
5726 }
5727
4f9db5b5 5728 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5729 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5730 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5731
5732 return true;
5733}
5734
16c25533 5735bool
f0fec3f2
PZ
5736intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5737 struct intel_connector *intel_connector)
a4fc5ed6 5738{
f0fec3f2
PZ
5739 struct drm_connector *connector = &intel_connector->base;
5740 struct intel_dp *intel_dp = &intel_dig_port->dp;
5741 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5742 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5743 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5744 enum port port = intel_dig_port->port;
0b99836f 5745 int type;
a4fc5ed6 5746
a4a5d2f8
VS
5747 intel_dp->pps_pipe = INVALID_PIPE;
5748
ec5b01dd 5749 /* intel_dp vfuncs */
b6b5e383
DL
5750 if (INTEL_INFO(dev)->gen >= 9)
5751 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5752 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5753 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5754 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5755 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5756 else if (HAS_PCH_SPLIT(dev))
5757 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5758 else
5759 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5760
b9ca5fad
DL
5761 if (INTEL_INFO(dev)->gen >= 9)
5762 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5763 else
5764 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5765
0767935e
DV
5766 /* Preserve the current hw state. */
5767 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5768 intel_dp->attached_connector = intel_connector;
3d3dc149 5769
3b32a35b 5770 if (intel_dp_is_edp(dev, port))
b329530c 5771 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5772 else
5773 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5774
f7d24902
ID
5775 /*
5776 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5777 * for DP the encoder type can be set by the caller to
5778 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5779 */
5780 if (type == DRM_MODE_CONNECTOR_eDP)
5781 intel_encoder->type = INTEL_OUTPUT_EDP;
5782
c17ed5b5
VS
5783 /* eDP only on port B and/or C on vlv/chv */
5784 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5785 port != PORT_B && port != PORT_C))
5786 return false;
5787
e7281eab
ID
5788 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5789 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5790 port_name(port));
5791
b329530c 5792 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5793 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5794
a4fc5ed6
KP
5795 connector->interlace_allowed = true;
5796 connector->doublescan_allowed = 0;
5797
f0fec3f2 5798 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5799 edp_panel_vdd_work);
a4fc5ed6 5800
df0e9248 5801 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5802 drm_connector_register(connector);
a4fc5ed6 5803
affa9354 5804 if (HAS_DDI(dev))
bcbc889b
PZ
5805 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5806 else
5807 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5808 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5809
0b99836f 5810 /* Set up the hotplug pin. */
ab9d7c30
PZ
5811 switch (port) {
5812 case PORT_A:
1d843f9d 5813 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5814 break;
5815 case PORT_B:
1d843f9d 5816 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5817 break;
5818 case PORT_C:
1d843f9d 5819 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5820 break;
5821 case PORT_D:
1d843f9d 5822 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5823 break;
5824 default:
ad1c0b19 5825 BUG();
5eb08b69
ZW
5826 }
5827
dada1a9f 5828 if (is_edp(intel_dp)) {
773538e8 5829 pps_lock(intel_dp);
1e74a324
VS
5830 intel_dp_init_panel_power_timestamps(intel_dp);
5831 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5832 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5833 else
36b5f425 5834 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5835 pps_unlock(intel_dp);
dada1a9f 5836 }
0095e6dc 5837
9d1a1031 5838 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5839
0e32b39c 5840 /* init MST on ports that can support it */
0c9b3715
JN
5841 if (HAS_DP_MST(dev) &&
5842 (port == PORT_B || port == PORT_C || port == PORT_D))
5843 intel_dp_mst_encoder_init(intel_dig_port,
5844 intel_connector->base.base.id);
0e32b39c 5845
36b5f425 5846 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5847 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5848 if (is_edp(intel_dp)) {
5849 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5850 /*
5851 * vdd might still be enabled do to the delayed vdd off.
5852 * Make sure vdd is actually turned off here.
5853 */
773538e8 5854 pps_lock(intel_dp);
4be73780 5855 edp_panel_vdd_off_sync(intel_dp);
773538e8 5856 pps_unlock(intel_dp);
15b1d171 5857 }
34ea3d38 5858 drm_connector_unregister(connector);
b2f246a8 5859 drm_connector_cleanup(connector);
16c25533 5860 return false;
b2f246a8 5861 }
32f9d658 5862
f684960e
CW
5863 intel_dp_add_properties(intel_dp, connector);
5864
a4fc5ed6
KP
5865 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5866 * 0xd. Failure to do so will result in spurious interrupts being
5867 * generated on the port when a cable is not attached.
5868 */
5869 if (IS_G4X(dev) && !IS_GM45(dev)) {
5870 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5871 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5872 }
16c25533 5873
aa7471d2
JN
5874 i915_debugfs_connector_add(connector);
5875
16c25533 5876 return true;
a4fc5ed6 5877}
f0fec3f2
PZ
5878
5879void
5880intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5881{
13cf5504 5882 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5883 struct intel_digital_port *intel_dig_port;
5884 struct intel_encoder *intel_encoder;
5885 struct drm_encoder *encoder;
5886 struct intel_connector *intel_connector;
5887
b14c5679 5888 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5889 if (!intel_dig_port)
5890 return;
5891
08d9bc92 5892 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5893 if (!intel_connector) {
5894 kfree(intel_dig_port);
5895 return;
5896 }
5897
5898 intel_encoder = &intel_dig_port->base;
5899 encoder = &intel_encoder->base;
5900
5901 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5902 DRM_MODE_ENCODER_TMDS);
5903
5bfe2ac0 5904 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5905 intel_encoder->disable = intel_disable_dp;
00c09d70 5906 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5907 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5908 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5909 if (IS_CHERRYVIEW(dev)) {
9197c88b 5910 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5911 intel_encoder->pre_enable = chv_pre_enable_dp;
5912 intel_encoder->enable = vlv_enable_dp;
580d3811 5913 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5914 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5915 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5916 intel_encoder->pre_enable = vlv_pre_enable_dp;
5917 intel_encoder->enable = vlv_enable_dp;
49277c31 5918 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5919 } else {
ecff4f3b
JN
5920 intel_encoder->pre_enable = g4x_pre_enable_dp;
5921 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5922 if (INTEL_INFO(dev)->gen >= 5)
5923 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5924 }
f0fec3f2 5925
174edf1f 5926 intel_dig_port->port = port;
f0fec3f2
PZ
5927 intel_dig_port->dp.output_reg = output_reg;
5928
00c09d70 5929 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5930 if (IS_CHERRYVIEW(dev)) {
5931 if (port == PORT_D)
5932 intel_encoder->crtc_mask = 1 << 2;
5933 else
5934 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5935 } else {
5936 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5937 }
bc079e8b 5938 intel_encoder->cloneable = 0;
f0fec3f2 5939
13cf5504 5940 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 5941 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 5942
15b1d171
PZ
5943 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5944 drm_encoder_cleanup(encoder);
5945 kfree(intel_dig_port);
b2f246a8 5946 kfree(intel_connector);
15b1d171 5947 }
f0fec3f2 5948}
0e32b39c
DA
5949
5950void intel_dp_mst_suspend(struct drm_device *dev)
5951{
5952 struct drm_i915_private *dev_priv = dev->dev_private;
5953 int i;
5954
5955 /* disable MST */
5956 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 5957 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
5958 if (!intel_dig_port)
5959 continue;
5960
5961 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5962 if (!intel_dig_port->dp.can_mst)
5963 continue;
5964 if (intel_dig_port->dp.is_mst)
5965 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5966 }
5967 }
5968}
5969
5970void intel_dp_mst_resume(struct drm_device *dev)
5971{
5972 struct drm_i915_private *dev_priv = dev->dev_private;
5973 int i;
5974
5975 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 5976 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
5977 if (!intel_dig_port)
5978 continue;
5979 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5980 int ret;
5981
5982 if (!intel_dig_port->dp.can_mst)
5983 continue;
5984
5985 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5986 if (ret != 0) {
5987 intel_dp_check_mst_status(&intel_dig_port->dp);
5988 }
5989 }
5990 }
5991}