drm/i915: Use intel_dp->DP in eDP PLL setup
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
cd9dde44
AJ
173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
a4fc5ed6 190static int
c898261c 191intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 192{
cd9dde44 193 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
194}
195
fe27d53e
DA
196static int
197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
c19de8eb 202static enum drm_mode_status
a4fc5ed6
KP
203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
df0e9248 206 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 211
dd06f90e
JN
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
214 return MODE_PANEL;
215
dd06f90e 216 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 217 return MODE_PANEL;
03afc4a2
DV
218
219 target_clock = fixed_mode->clock;
7de56f43
ZY
220 }
221
50fec21a 222 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 223 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
c4867936 229 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
0af78a2b
DV
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
a4fc5ed6
KP
237 return MODE_OK;
238}
239
a4f1289e 240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
c2af70e2 252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
bf13e81b
JN
261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 263 struct intel_dp *intel_dp);
bf13e81b
JN
264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 266 struct intel_dp *intel_dp);
bf13e81b 267
773538e8
VS
268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298}
299
961a0db0
VS
300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
d288f65f
VS
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
0047eedc
VS
339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
d288f65f
VS
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
a8c3344e
VS
410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
a4a5d2f8 413
a8c3344e
VS
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
36b5f425
VS
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 424
961a0db0
VS
425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
430
431 return intel_dp->pps_pipe;
432}
433
6491ab27
VS
434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
bf13e81b 454
a4a5d2f8 455static enum pipe
6491ab27
VS
456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
a4a5d2f8
VS
459{
460 enum pipe pipe;
bf13e81b 461
bf13e81b
JN
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
6491ab27
VS
469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
a4a5d2f8 472 return pipe;
bf13e81b
JN
473 }
474
a4a5d2f8
VS
475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
6491ab27
VS
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
a4a5d2f8
VS
500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
bf13e81b
JN
506 }
507
a4a5d2f8
VS
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
36b5f425
VS
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
513}
514
773538e8
VS
515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
bf13e81b
JN
542}
543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
b0a08bec
VK
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
556static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
b0a08bec
VK
560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566}
567
01527b31
CT
568/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572{
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
577
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
579 return 0;
580
773538e8 581 pps_lock(intel_dp);
e39b999a 582
01527b31 583 if (IS_VALLEYVIEW(dev)) {
e39b999a 584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
649636ef
VS
585 u32 pp_ctrl_reg, pp_div_reg;
586 u32 pp_div;
e39b999a 587
01527b31
CT
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
773538e8 599 pps_unlock(intel_dp);
e39b999a 600
01527b31
CT
601 return 0;
602}
603
4be73780 604static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 605{
30add22d 606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
607 struct drm_i915_private *dev_priv = dev->dev_private;
608
e39b999a
VS
609 lockdep_assert_held(&dev_priv->pps_mutex);
610
9a42356b
VS
611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
bf13e81b 615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
616}
617
4be73780 618static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 619{
30add22d 620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
621 struct drm_i915_private *dev_priv = dev->dev_private;
622
e39b999a
VS
623 lockdep_assert_held(&dev_priv->pps_mutex);
624
9a42356b
VS
625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
773538e8 629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
630}
631
9b984dae
KP
632static void
633intel_dp_check_edp(struct intel_dp *intel_dp)
634{
30add22d 635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 636 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 637
9b984dae
KP
638 if (!is_edp(intel_dp))
639 return;
453c5420 640
4be73780 641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
646 }
647}
648
9ee32fea
DV
649static uint32_t
650intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651{
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
656 uint32_t status;
657 bool done;
658
ef04f00d 659#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 660 if (has_aux_irq)
b18ac466 661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 662 msecs_to_jiffies_timeout(10));
9ee32fea
DV
663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668#undef C
669
670 return status;
671}
672
ec5b01dd 673static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 674{
174edf1f
PZ
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 677
ec5b01dd
DL
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 681 */
ec5b01dd
DL
682 return index ? 0 : intel_hrawclk(dev) / 2;
683}
684
685static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686{
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 689 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
05024da3
VS
695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
ec5b01dd
DL
697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700}
701
702static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703{
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
708 if (intel_dig_port->port == PORT_A) {
709 if (index)
710 return 0;
05024da3 711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
bc86625a
CW
714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
ec5b01dd 719 } else {
bc86625a 720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 721 }
b84a1cf8
RV
722}
723
ec5b01dd
DL
724static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 return index ? 0 : 100;
727}
728
b6b5e383
DL
729static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730{
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737}
738
5ed12a19
DL
739static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743{
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 759 DP_AUX_CH_CTL_DONE |
5ed12a19 760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 762 timeout |
788d4433 763 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
767}
768
b9ca5fad
DL
769static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773{
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782}
783
b84a1cf8
RV
784static int
785intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 786 const uint8_t *send, int send_bytes,
b84a1cf8
RV
787 uint8_t *recv, int recv_size)
788{
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t ch_data = ch_ctl + 4;
bc86625a 794 uint32_t aux_clock_divider;
b84a1cf8
RV
795 int i, ret, recv_bytes;
796 uint32_t status;
5ed12a19 797 int try, clock = 0;
4e6b788c 798 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
799 bool vdd;
800
773538e8 801 pps_lock(intel_dp);
e39b999a 802
72c3500a
VS
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
1e0560e0 809 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
5eb08b69 818
c67a470b
PZ
819 intel_aux_display_runtime_get(dev_priv);
820
11bee43e
JB
821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
ef04f00d 823 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825 break;
826 msleep(1);
827 }
828
829 if (try == 3) {
02196c77
MK
830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
832
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 status);
836 last_status = status;
837 }
838
9ee32fea
DV
839 ret = -EBUSY;
840 goto out;
4f7f7b7e
CW
841 }
842
46a5ae9f
PZ
843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845 ret = -E2BIG;
846 goto out;
847 }
848
ec5b01dd 849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851 has_aux_irq,
852 send_bytes,
853 aux_clock_divider);
5ed12a19 854
bc86625a
CW
855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
a4f1289e
RV
860 intel_dp_pack_aux(send + i,
861 send_bytes - i));
bc86625a
CW
862
863 /* Send the command and wait for it to complete */
5ed12a19 864 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
865
866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
867
868 /* Clear done status and any errors */
869 I915_WRITE(ch_ctl,
870 status |
871 DP_AUX_CH_CTL_DONE |
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
874
74ebf294 875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 876 continue;
74ebf294
TP
877
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
882 */
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
bc86625a 885 continue;
74ebf294 886 }
bc86625a 887 if (status & DP_AUX_CH_CTL_DONE)
e058c945 888 goto done;
bc86625a 889 }
a4fc5ed6
KP
890 }
891
a4fc5ed6 892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
894 ret = -EBUSY;
895 goto out;
a4fc5ed6
KP
896 }
897
e058c945 898done:
a4fc5ed6
KP
899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
901 */
a5b3da54 902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
904 ret = -EIO;
905 goto out;
a5b3da54 906 }
1ae8c0a5
KP
907
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
a5b3da54 910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
912 ret = -ETIMEDOUT;
913 goto out;
a4fc5ed6
KP
914 }
915
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
0206e353 921
4f7f7b7e 922 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
a4fc5ed6 925
9ee32fea
DV
926 ret = recv_bytes;
927out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 929 intel_aux_display_runtime_put(dev_priv);
9ee32fea 930
884f19e9
JN
931 if (vdd)
932 edp_panel_vdd_off(intel_dp, false);
933
773538e8 934 pps_unlock(intel_dp);
e39b999a 935
9ee32fea 936 return ret;
a4fc5ed6
KP
937}
938
a6c8aff0
JN
939#define BARE_ADDRESS_SIZE 3
940#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
941static ssize_t
942intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 943{
9d1a1031
JN
944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
a4fc5ed6 947 int ret;
a4fc5ed6 948
d2d9cbbd
VS
949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
46a5ae9f 954
9d1a1031
JN
955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
c1e74122 958 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 959 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 960 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 961
9d1a1031
JN
962 if (WARN_ON(txsize > 20))
963 return -E2BIG;
a4fc5ed6 964
9d1a1031 965 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 966
9d1a1031
JN
967 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
968 if (ret > 0) {
969 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 970
a1ddefd8
JN
971 if (ret > 1) {
972 /* Number of bytes written in a short write. */
973 ret = clamp_t(int, rxbuf[1], 0, msg->size);
974 } else {
975 /* Return payload size. */
976 ret = msg->size;
977 }
9d1a1031
JN
978 }
979 break;
46a5ae9f 980
9d1a1031
JN
981 case DP_AUX_NATIVE_READ:
982 case DP_AUX_I2C_READ:
a6c8aff0 983 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 984 rxsize = msg->size + 1;
a4fc5ed6 985
9d1a1031
JN
986 if (WARN_ON(rxsize > 20))
987 return -E2BIG;
a4fc5ed6 988
9d1a1031
JN
989 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
990 if (ret > 0) {
991 msg->reply = rxbuf[0] >> 4;
992 /*
993 * Assume happy day, and copy the data. The caller is
994 * expected to check msg->reply before touching it.
995 *
996 * Return payload size.
997 */
998 ret--;
999 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1000 }
9d1a1031
JN
1001 break;
1002
1003 default:
1004 ret = -EINVAL;
1005 break;
a4fc5ed6 1006 }
f51a44b9 1007
9d1a1031 1008 return ret;
a4fc5ed6
KP
1009}
1010
9d1a1031
JN
1011static void
1012intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1013{
1014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1015 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1017 enum port port = intel_dig_port->port;
500ea70d 1018 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1019 const char *name = NULL;
500ea70d 1020 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1021 int ret;
1022
500ea70d
RV
1023 /* On SKL we don't have Aux for port E so we rely on VBT to set
1024 * a proper alternate aux channel.
1025 */
ef11bdb3 1026 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && port == PORT_E) {
500ea70d
RV
1027 switch (info->alternate_aux_channel) {
1028 case DP_AUX_B:
1029 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1030 break;
1031 case DP_AUX_C:
1032 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1033 break;
1034 case DP_AUX_D:
1035 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1036 break;
1037 case DP_AUX_A:
1038 default:
1039 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040 }
1041 }
1042
33ad6626
JN
1043 switch (port) {
1044 case PORT_A:
1045 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1046 name = "DPDDC-A";
ab2c0672 1047 break;
33ad6626
JN
1048 case PORT_B:
1049 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1050 name = "DPDDC-B";
ab2c0672 1051 break;
33ad6626
JN
1052 case PORT_C:
1053 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1054 name = "DPDDC-C";
ab2c0672 1055 break;
33ad6626
JN
1056 case PORT_D:
1057 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1058 name = "DPDDC-D";
33ad6626 1059 break;
500ea70d
RV
1060 case PORT_E:
1061 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1062 name = "DPDDC-E";
1063 break;
33ad6626
JN
1064 default:
1065 BUG();
ab2c0672
DA
1066 }
1067
1b1aad75
DL
1068 /*
1069 * The AUX_CTL register is usually DP_CTL + 0x10.
1070 *
1071 * On Haswell and Broadwell though:
1072 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1073 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1074 *
1075 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1076 */
500ea70d 1077 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1078 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1079
0b99836f 1080 intel_dp->aux.name = name;
9d1a1031
JN
1081 intel_dp->aux.dev = dev->dev;
1082 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1083
0b99836f
JN
1084 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1085 connector->base.kdev->kobj.name);
8316f337 1086
4f71d0cb 1087 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1088 if (ret < 0) {
4f71d0cb 1089 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1090 name, ret);
1091 return;
ab2c0672 1092 }
8a5e6aeb 1093
0b99836f
JN
1094 ret = sysfs_create_link(&connector->base.kdev->kobj,
1095 &intel_dp->aux.ddc.dev.kobj,
1096 intel_dp->aux.ddc.dev.kobj.name);
1097 if (ret < 0) {
1098 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1099 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1100 }
a4fc5ed6
KP
1101}
1102
80f65de3
ID
1103static void
1104intel_dp_connector_unregister(struct intel_connector *intel_connector)
1105{
1106 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1107
0e32b39c
DA
1108 if (!intel_connector->mst_port)
1109 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1110 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1111 intel_connector_unregister(intel_connector);
1112}
1113
5416d871 1114static void
840b32b7 1115skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1116{
1117 u32 ctrl1;
1118
dd3cd74a
ACO
1119 memset(&pipe_config->dpll_hw_state, 0,
1120 sizeof(pipe_config->dpll_hw_state));
1121
5416d871
DL
1122 pipe_config->ddi_pll_sel = SKL_DPLL0;
1123 pipe_config->dpll_hw_state.cfgcr1 = 0;
1124 pipe_config->dpll_hw_state.cfgcr2 = 0;
1125
1126 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1127 switch (pipe_config->port_clock / 2) {
c3346ef6 1128 case 81000:
71cd8423 1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1130 SKL_DPLL0);
1131 break;
c3346ef6 1132 case 135000:
71cd8423 1133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1134 SKL_DPLL0);
1135 break;
c3346ef6 1136 case 270000:
71cd8423 1137 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1138 SKL_DPLL0);
1139 break;
c3346ef6 1140 case 162000:
71cd8423 1141 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1142 SKL_DPLL0);
1143 break;
1144 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1145 results in CDCLK change. Need to handle the change of CDCLK by
1146 disabling pipes and re-enabling them */
1147 case 108000:
71cd8423 1148 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1149 SKL_DPLL0);
1150 break;
1151 case 216000:
71cd8423 1152 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1153 SKL_DPLL0);
1154 break;
1155
5416d871
DL
1156 }
1157 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1158}
1159
6fa2d197 1160void
840b32b7 1161hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1162{
ee46f3c7
ACO
1163 memset(&pipe_config->dpll_hw_state, 0,
1164 sizeof(pipe_config->dpll_hw_state));
1165
840b32b7
VS
1166 switch (pipe_config->port_clock / 2) {
1167 case 81000:
0e50338c
DV
1168 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1169 break;
840b32b7 1170 case 135000:
0e50338c
DV
1171 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1172 break;
840b32b7 1173 case 270000:
0e50338c
DV
1174 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1175 break;
1176 }
1177}
1178
fc0f8e25 1179static int
12f6a2e2 1180intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1181{
94ca719e
VS
1182 if (intel_dp->num_sink_rates) {
1183 *sink_rates = intel_dp->sink_rates;
1184 return intel_dp->num_sink_rates;
fc0f8e25 1185 }
12f6a2e2
VS
1186
1187 *sink_rates = default_rates;
1188
1189 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1190}
1191
e588fa18 1192bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1193{
e588fa18
ACO
1194 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1195 struct drm_device *dev = dig_port->base.base.dev;
1196
ed63baaf 1197 /* WaDisableHBR2:skl */
e87a005d 1198 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1199 return false;
1200
1201 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1202 (INTEL_INFO(dev)->gen >= 9))
1203 return true;
1204 else
1205 return false;
1206}
1207
a8f3ef61 1208static int
e588fa18 1209intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1210{
e588fa18
ACO
1211 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1212 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1213 int size;
1214
64987fc5
SJ
1215 if (IS_BROXTON(dev)) {
1216 *source_rates = bxt_rates;
af7080f5 1217 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1218 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1219 *source_rates = skl_rates;
af7080f5
TS
1220 size = ARRAY_SIZE(skl_rates);
1221 } else {
1222 *source_rates = default_rates;
1223 size = ARRAY_SIZE(default_rates);
a8f3ef61 1224 }
636280ba 1225
ed63baaf 1226 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1227 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1228 size--;
636280ba 1229
af7080f5 1230 return size;
a8f3ef61
SJ
1231}
1232
c6bb3538
DV
1233static void
1234intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1235 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1236{
1237 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1238 const struct dp_link_dpll *divisor = NULL;
1239 int i, count = 0;
c6bb3538
DV
1240
1241 if (IS_G4X(dev)) {
9dd4ffdf
CML
1242 divisor = gen4_dpll;
1243 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1244 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1245 divisor = pch_dpll;
1246 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1247 } else if (IS_CHERRYVIEW(dev)) {
1248 divisor = chv_dpll;
1249 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1250 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1251 divisor = vlv_dpll;
1252 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1253 }
9dd4ffdf
CML
1254
1255 if (divisor && count) {
1256 for (i = 0; i < count; i++) {
840b32b7 1257 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1258 pipe_config->dpll = divisor[i].dpll;
1259 pipe_config->clock_set = true;
1260 break;
1261 }
1262 }
c6bb3538
DV
1263 }
1264}
1265
2ecae76a
VS
1266static int intersect_rates(const int *source_rates, int source_len,
1267 const int *sink_rates, int sink_len,
94ca719e 1268 int *common_rates)
a8f3ef61
SJ
1269{
1270 int i = 0, j = 0, k = 0;
1271
a8f3ef61
SJ
1272 while (i < source_len && j < sink_len) {
1273 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1274 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1275 return k;
94ca719e 1276 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1277 ++k;
1278 ++i;
1279 ++j;
1280 } else if (source_rates[i] < sink_rates[j]) {
1281 ++i;
1282 } else {
1283 ++j;
1284 }
1285 }
1286 return k;
1287}
1288
94ca719e
VS
1289static int intel_dp_common_rates(struct intel_dp *intel_dp,
1290 int *common_rates)
2ecae76a 1291{
2ecae76a
VS
1292 const int *source_rates, *sink_rates;
1293 int source_len, sink_len;
1294
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1296 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1297
1298 return intersect_rates(source_rates, source_len,
1299 sink_rates, sink_len,
94ca719e 1300 common_rates);
2ecae76a
VS
1301}
1302
0336400e
VS
1303static void snprintf_int_array(char *str, size_t len,
1304 const int *array, int nelem)
1305{
1306 int i;
1307
1308 str[0] = '\0';
1309
1310 for (i = 0; i < nelem; i++) {
b2f505be 1311 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1312 if (r >= len)
1313 return;
1314 str += r;
1315 len -= r;
1316 }
1317}
1318
1319static void intel_dp_print_rates(struct intel_dp *intel_dp)
1320{
0336400e 1321 const int *source_rates, *sink_rates;
94ca719e
VS
1322 int source_len, sink_len, common_len;
1323 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1324 char str[128]; /* FIXME: too big for stack? */
1325
1326 if ((drm_debug & DRM_UT_KMS) == 0)
1327 return;
1328
e588fa18 1329 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1330 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1331 DRM_DEBUG_KMS("source rates: %s\n", str);
1332
1333 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1334 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1335 DRM_DEBUG_KMS("sink rates: %s\n", str);
1336
94ca719e
VS
1337 common_len = intel_dp_common_rates(intel_dp, common_rates);
1338 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1339 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1340}
1341
f4896f15 1342static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1343{
1344 int i = 0;
1345
1346 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1347 if (find == rates[i])
1348 break;
1349
1350 return i;
1351}
1352
50fec21a
VS
1353int
1354intel_dp_max_link_rate(struct intel_dp *intel_dp)
1355{
1356 int rates[DP_MAX_SUPPORTED_RATES] = {};
1357 int len;
1358
94ca719e 1359 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1360 if (WARN_ON(len <= 0))
1361 return 162000;
1362
1363 return rates[rate_to_index(0, rates) - 1];
1364}
1365
ed4e9c1d
VS
1366int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1367{
94ca719e 1368 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1369}
1370
94223d04
ACO
1371void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1372 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1373{
1374 if (intel_dp->num_sink_rates) {
1375 *link_bw = 0;
1376 *rate_select =
1377 intel_dp_rate_select(intel_dp, port_clock);
1378 } else {
1379 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1380 *rate_select = 0;
1381 }
1382}
1383
00c09d70 1384bool
5bfe2ac0 1385intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1386 struct intel_crtc_state *pipe_config)
a4fc5ed6 1387{
5bfe2ac0 1388 struct drm_device *dev = encoder->base.dev;
36008365 1389 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1390 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1391 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1392 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1393 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1394 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1395 int lane_count, clock;
56071a20 1396 int min_lane_count = 1;
eeb6324d 1397 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1398 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1399 int min_clock = 0;
a8f3ef61 1400 int max_clock;
083f9560 1401 int bpp, mode_rate;
ff9a6750 1402 int link_avail, link_clock;
94ca719e
VS
1403 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1404 int common_len;
04a60f9f 1405 uint8_t link_bw, rate_select;
a8f3ef61 1406
94ca719e 1407 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1408
1409 /* No common link rates between source and sink */
94ca719e 1410 WARN_ON(common_len <= 0);
a8f3ef61 1411
94ca719e 1412 max_clock = common_len - 1;
a4fc5ed6 1413
bc7d38a4 1414 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1415 pipe_config->has_pch_encoder = true;
1416
03afc4a2 1417 pipe_config->has_dp_encoder = true;
f769cd24 1418 pipe_config->has_drrs = false;
9fcb1704 1419 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1420
dd06f90e
JN
1421 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1422 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1423 adjusted_mode);
a1b2278e
CK
1424
1425 if (INTEL_INFO(dev)->gen >= 9) {
1426 int ret;
e435d6e5 1427 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1428 if (ret)
1429 return ret;
1430 }
1431
b5667627 1432 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1433 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1434 intel_connector->panel.fitting_mode);
1435 else
b074cec8
JB
1436 intel_pch_panel_fitting(intel_crtc, pipe_config,
1437 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1438 }
1439
cb1793ce 1440 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1441 return false;
1442
083f9560 1443 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1444 "max bw %d pixel clock %iKHz\n",
94ca719e 1445 max_lane_count, common_rates[max_clock],
241bfc38 1446 adjusted_mode->crtc_clock);
083f9560 1447
36008365
DV
1448 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1449 * bpc in between. */
3e7ca985 1450 bpp = pipe_config->pipe_bpp;
56071a20 1451 if (is_edp(intel_dp)) {
22ce5628
TS
1452
1453 /* Get bpp from vbt only for panels that dont have bpp in edid */
1454 if (intel_connector->base.display_info.bpc == 0 &&
1455 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1456 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1457 dev_priv->vbt.edp_bpp);
1458 bpp = dev_priv->vbt.edp_bpp;
1459 }
1460
344c5bbc
JN
1461 /*
1462 * Use the maximum clock and number of lanes the eDP panel
1463 * advertizes being capable of. The panels are generally
1464 * designed to support only a single clock and lane
1465 * configuration, and typically these values correspond to the
1466 * native resolution of the panel.
1467 */
1468 min_lane_count = max_lane_count;
1469 min_clock = max_clock;
7984211e 1470 }
657445fe 1471
36008365 1472 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1473 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1474 bpp);
36008365 1475
c6930992 1476 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1477 for (lane_count = min_lane_count;
1478 lane_count <= max_lane_count;
1479 lane_count <<= 1) {
1480
94ca719e 1481 link_clock = common_rates[clock];
36008365
DV
1482 link_avail = intel_dp_max_data_rate(link_clock,
1483 lane_count);
1484
1485 if (mode_rate <= link_avail) {
1486 goto found;
1487 }
1488 }
1489 }
1490 }
c4867936 1491
36008365 1492 return false;
3685a8f3 1493
36008365 1494found:
55bc60db
VS
1495 if (intel_dp->color_range_auto) {
1496 /*
1497 * See:
1498 * CEA-861-E - 5.1 Default Encoding Parameters
1499 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1500 */
0f2a2a75
VS
1501 pipe_config->limited_color_range =
1502 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1503 } else {
1504 pipe_config->limited_color_range =
1505 intel_dp->limited_color_range;
55bc60db
VS
1506 }
1507
90a6b7b0 1508 pipe_config->lane_count = lane_count;
a8f3ef61 1509
657445fe 1510 pipe_config->pipe_bpp = bpp;
94ca719e 1511 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1512
04a60f9f
VS
1513 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1514 &link_bw, &rate_select);
1515
1516 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1517 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1518 pipe_config->port_clock, bpp);
36008365
DV
1519 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1520 mode_rate, link_avail);
a4fc5ed6 1521
03afc4a2 1522 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1523 adjusted_mode->crtc_clock,
1524 pipe_config->port_clock,
03afc4a2 1525 &pipe_config->dp_m_n);
9d1a455b 1526
439d7ac0 1527 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1528 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1529 pipe_config->has_drrs = true;
439d7ac0
PB
1530 intel_link_compute_m_n(bpp, lane_count,
1531 intel_connector->panel.downclock_mode->clock,
1532 pipe_config->port_clock,
1533 &pipe_config->dp_m2_n2);
1534 }
1535
ef11bdb3 1536 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1537 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1538 else if (IS_BROXTON(dev))
1539 /* handled in ddi */;
5416d871 1540 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1541 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1542 else
840b32b7 1543 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1544
03afc4a2 1545 return true;
a4fc5ed6
KP
1546}
1547
7c62a164 1548static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1549{
7c62a164
DV
1550 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1551 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1552 struct drm_device *dev = crtc->base.dev;
ea9b6006 1553 struct drm_i915_private *dev_priv = dev->dev_private;
ea9b6006 1554
6e3c9717
ACO
1555 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1556 crtc->config->port_clock);
ea9b6006 1557
6fec7662
VS
1558 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
1559
1560 if (crtc->config->port_clock == 162000)
b377e0df 1561 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
6fec7662 1562 else
7c62a164 1563 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1564
6fec7662 1565 I915_WRITE(DP_A, intel_dp->DP);
ea9b6006
DV
1566 POSTING_READ(DP_A);
1567 udelay(500);
1568}
1569
901c2daf
VS
1570void intel_dp_set_link_params(struct intel_dp *intel_dp,
1571 const struct intel_crtc_state *pipe_config)
1572{
1573 intel_dp->link_rate = pipe_config->port_clock;
1574 intel_dp->lane_count = pipe_config->lane_count;
1575}
1576
8ac33ed3 1577static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1578{
b934223d 1579 struct drm_device *dev = encoder->base.dev;
417e822d 1580 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1581 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1582 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1583 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1584 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1585
901c2daf
VS
1586 intel_dp_set_link_params(intel_dp, crtc->config);
1587
417e822d 1588 /*
1a2eb460 1589 * There are four kinds of DP registers:
417e822d
KP
1590 *
1591 * IBX PCH
1a2eb460
KP
1592 * SNB CPU
1593 * IVB CPU
417e822d
KP
1594 * CPT PCH
1595 *
1596 * IBX PCH and CPU are the same for almost everything,
1597 * except that the CPU DP PLL is configured in this
1598 * register
1599 *
1600 * CPT PCH is quite different, having many bits moved
1601 * to the TRANS_DP_CTL register instead. That
1602 * configuration happens (oddly) in ironlake_pch_enable
1603 */
9c9e7927 1604
417e822d
KP
1605 /* Preserve the BIOS-computed detected bit. This is
1606 * supposed to be read-only.
1607 */
1608 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1609
417e822d 1610 /* Handle DP bits in common between all three register formats */
417e822d 1611 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1612 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1613
417e822d 1614 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1615
39e5fa88 1616 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1617 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1618 intel_dp->DP |= DP_SYNC_HS_HIGH;
1619 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1620 intel_dp->DP |= DP_SYNC_VS_HIGH;
1621 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1622
6aba5b6c 1623 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1624 intel_dp->DP |= DP_ENHANCED_FRAMING;
1625
7c62a164 1626 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1627 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1628 u32 trans_dp;
1629
39e5fa88 1630 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1631
1632 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1633 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1634 trans_dp |= TRANS_DP_ENH_FRAMING;
1635 else
1636 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1637 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1638 } else {
0f2a2a75
VS
1639 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1640 crtc->config->limited_color_range)
1641 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1642
1643 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1644 intel_dp->DP |= DP_SYNC_HS_HIGH;
1645 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1646 intel_dp->DP |= DP_SYNC_VS_HIGH;
1647 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1648
6aba5b6c 1649 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1650 intel_dp->DP |= DP_ENHANCED_FRAMING;
1651
39e5fa88 1652 if (IS_CHERRYVIEW(dev))
44f37d1f 1653 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1654 else if (crtc->pipe == PIPE_B)
1655 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1656 }
a4fc5ed6
KP
1657}
1658
ffd6749d
PZ
1659#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1660#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1661
1a5ef5b7
PZ
1662#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1663#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1664
ffd6749d
PZ
1665#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1666#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1667
4be73780 1668static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1669 u32 mask,
1670 u32 value)
bd943159 1671{
30add22d 1672 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1673 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1674 u32 pp_stat_reg, pp_ctrl_reg;
1675
e39b999a
VS
1676 lockdep_assert_held(&dev_priv->pps_mutex);
1677
bf13e81b
JN
1678 pp_stat_reg = _pp_stat_reg(intel_dp);
1679 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1680
99ea7127 1681 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1682 mask, value,
1683 I915_READ(pp_stat_reg),
1684 I915_READ(pp_ctrl_reg));
32ce697c 1685
453c5420 1686 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1687 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1688 I915_READ(pp_stat_reg),
1689 I915_READ(pp_ctrl_reg));
32ce697c 1690 }
54c136d4
CW
1691
1692 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1693}
32ce697c 1694
4be73780 1695static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1696{
1697 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1698 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1699}
1700
4be73780 1701static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1702{
1703 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1704 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1705}
1706
4be73780 1707static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1708{
1709 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1710
1711 /* When we disable the VDD override bit last we have to do the manual
1712 * wait. */
1713 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1714 intel_dp->panel_power_cycle_delay);
1715
4be73780 1716 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1717}
1718
4be73780 1719static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1720{
1721 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1722 intel_dp->backlight_on_delay);
1723}
1724
4be73780 1725static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1726{
1727 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1728 intel_dp->backlight_off_delay);
1729}
99ea7127 1730
832dd3c1
KP
1731/* Read the current pp_control value, unlocking the register if it
1732 * is locked
1733 */
1734
453c5420 1735static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1736{
453c5420
JB
1737 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1738 struct drm_i915_private *dev_priv = dev->dev_private;
1739 u32 control;
832dd3c1 1740
e39b999a
VS
1741 lockdep_assert_held(&dev_priv->pps_mutex);
1742
bf13e81b 1743 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1744 if (!IS_BROXTON(dev)) {
1745 control &= ~PANEL_UNLOCK_MASK;
1746 control |= PANEL_UNLOCK_REGS;
1747 }
832dd3c1 1748 return control;
bd943159
KP
1749}
1750
951468f3
VS
1751/*
1752 * Must be paired with edp_panel_vdd_off().
1753 * Must hold pps_mutex around the whole on/off sequence.
1754 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1755 */
1e0560e0 1756static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1757{
30add22d 1758 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1760 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1761 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1762 enum intel_display_power_domain power_domain;
5d613501 1763 u32 pp;
453c5420 1764 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1765 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1766
e39b999a
VS
1767 lockdep_assert_held(&dev_priv->pps_mutex);
1768
97af61f5 1769 if (!is_edp(intel_dp))
adddaaf4 1770 return false;
bd943159 1771
2c623c11 1772 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1773 intel_dp->want_panel_vdd = true;
99ea7127 1774
4be73780 1775 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1776 return need_to_disable;
b0665d57 1777
4e6e1a54
ID
1778 power_domain = intel_display_port_power_domain(intel_encoder);
1779 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1780
3936fcf4
VS
1781 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1782 port_name(intel_dig_port->port));
bd943159 1783
4be73780
DV
1784 if (!edp_have_panel_power(intel_dp))
1785 wait_panel_power_cycle(intel_dp);
99ea7127 1786
453c5420 1787 pp = ironlake_get_pp_control(intel_dp);
5d613501 1788 pp |= EDP_FORCE_VDD;
ebf33b18 1789
bf13e81b
JN
1790 pp_stat_reg = _pp_stat_reg(intel_dp);
1791 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1792
1793 I915_WRITE(pp_ctrl_reg, pp);
1794 POSTING_READ(pp_ctrl_reg);
1795 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1796 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1797 /*
1798 * If the panel wasn't on, delay before accessing aux channel
1799 */
4be73780 1800 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1801 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1802 port_name(intel_dig_port->port));
f01eca2e 1803 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1804 }
adddaaf4
JN
1805
1806 return need_to_disable;
1807}
1808
951468f3
VS
1809/*
1810 * Must be paired with intel_edp_panel_vdd_off() or
1811 * intel_edp_panel_off().
1812 * Nested calls to these functions are not allowed since
1813 * we drop the lock. Caller must use some higher level
1814 * locking to prevent nested calls from other threads.
1815 */
b80d6c78 1816void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1817{
c695b6b6 1818 bool vdd;
adddaaf4 1819
c695b6b6
VS
1820 if (!is_edp(intel_dp))
1821 return;
1822
773538e8 1823 pps_lock(intel_dp);
c695b6b6 1824 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1825 pps_unlock(intel_dp);
c695b6b6 1826
e2c719b7 1827 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1828 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1829}
1830
4be73780 1831static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1832{
30add22d 1833 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1834 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1835 struct intel_digital_port *intel_dig_port =
1836 dp_to_dig_port(intel_dp);
1837 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1838 enum intel_display_power_domain power_domain;
5d613501 1839 u32 pp;
453c5420 1840 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1841
e39b999a 1842 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1843
15e899a0 1844 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1845
15e899a0 1846 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1847 return;
b0665d57 1848
3936fcf4
VS
1849 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1850 port_name(intel_dig_port->port));
bd943159 1851
be2c9196
VS
1852 pp = ironlake_get_pp_control(intel_dp);
1853 pp &= ~EDP_FORCE_VDD;
453c5420 1854
be2c9196
VS
1855 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1856 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1857
be2c9196
VS
1858 I915_WRITE(pp_ctrl_reg, pp);
1859 POSTING_READ(pp_ctrl_reg);
90791a5c 1860
be2c9196
VS
1861 /* Make sure sequencer is idle before allowing subsequent activity */
1862 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1863 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1864
be2c9196
VS
1865 if ((pp & POWER_TARGET_ON) == 0)
1866 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1867
be2c9196
VS
1868 power_domain = intel_display_port_power_domain(intel_encoder);
1869 intel_display_power_put(dev_priv, power_domain);
bd943159 1870}
5d613501 1871
4be73780 1872static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1873{
1874 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1875 struct intel_dp, panel_vdd_work);
bd943159 1876
773538e8 1877 pps_lock(intel_dp);
15e899a0
VS
1878 if (!intel_dp->want_panel_vdd)
1879 edp_panel_vdd_off_sync(intel_dp);
773538e8 1880 pps_unlock(intel_dp);
bd943159
KP
1881}
1882
aba86890
ID
1883static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1884{
1885 unsigned long delay;
1886
1887 /*
1888 * Queue the timer to fire a long time from now (relative to the power
1889 * down delay) to keep the panel power up across a sequence of
1890 * operations.
1891 */
1892 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1893 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1894}
1895
951468f3
VS
1896/*
1897 * Must be paired with edp_panel_vdd_on().
1898 * Must hold pps_mutex around the whole on/off sequence.
1899 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1900 */
4be73780 1901static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1902{
e39b999a
VS
1903 struct drm_i915_private *dev_priv =
1904 intel_dp_to_dev(intel_dp)->dev_private;
1905
1906 lockdep_assert_held(&dev_priv->pps_mutex);
1907
97af61f5
KP
1908 if (!is_edp(intel_dp))
1909 return;
5d613501 1910
e2c719b7 1911 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1912 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1913
bd943159
KP
1914 intel_dp->want_panel_vdd = false;
1915
aba86890 1916 if (sync)
4be73780 1917 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1918 else
1919 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1920}
1921
9f0fb5be 1922static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1923{
30add22d 1924 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1925 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1926 u32 pp;
453c5420 1927 u32 pp_ctrl_reg;
9934c132 1928
9f0fb5be
VS
1929 lockdep_assert_held(&dev_priv->pps_mutex);
1930
97af61f5 1931 if (!is_edp(intel_dp))
bd943159 1932 return;
99ea7127 1933
3936fcf4
VS
1934 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1935 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1936
e7a89ace
VS
1937 if (WARN(edp_have_panel_power(intel_dp),
1938 "eDP port %c panel power already on\n",
1939 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1940 return;
9934c132 1941
4be73780 1942 wait_panel_power_cycle(intel_dp);
37c6c9b0 1943
bf13e81b 1944 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1945 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1946 if (IS_GEN5(dev)) {
1947 /* ILK workaround: disable reset around power sequence */
1948 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1949 I915_WRITE(pp_ctrl_reg, pp);
1950 POSTING_READ(pp_ctrl_reg);
05ce1a49 1951 }
37c6c9b0 1952
1c0ae80a 1953 pp |= POWER_TARGET_ON;
99ea7127
KP
1954 if (!IS_GEN5(dev))
1955 pp |= PANEL_POWER_RESET;
1956
453c5420
JB
1957 I915_WRITE(pp_ctrl_reg, pp);
1958 POSTING_READ(pp_ctrl_reg);
9934c132 1959
4be73780 1960 wait_panel_on(intel_dp);
dce56b3c 1961 intel_dp->last_power_on = jiffies;
9934c132 1962
05ce1a49
KP
1963 if (IS_GEN5(dev)) {
1964 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1965 I915_WRITE(pp_ctrl_reg, pp);
1966 POSTING_READ(pp_ctrl_reg);
05ce1a49 1967 }
9f0fb5be 1968}
e39b999a 1969
9f0fb5be
VS
1970void intel_edp_panel_on(struct intel_dp *intel_dp)
1971{
1972 if (!is_edp(intel_dp))
1973 return;
1974
1975 pps_lock(intel_dp);
1976 edp_panel_on(intel_dp);
773538e8 1977 pps_unlock(intel_dp);
9934c132
JB
1978}
1979
9f0fb5be
VS
1980
1981static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1982{
4e6e1a54
ID
1983 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1984 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1985 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1986 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1987 enum intel_display_power_domain power_domain;
99ea7127 1988 u32 pp;
453c5420 1989 u32 pp_ctrl_reg;
9934c132 1990
9f0fb5be
VS
1991 lockdep_assert_held(&dev_priv->pps_mutex);
1992
97af61f5
KP
1993 if (!is_edp(intel_dp))
1994 return;
37c6c9b0 1995
3936fcf4
VS
1996 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1997 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1998
3936fcf4
VS
1999 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2000 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2001
453c5420 2002 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2003 /* We need to switch off panel power _and_ force vdd, for otherwise some
2004 * panels get very unhappy and cease to work. */
b3064154
PJ
2005 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2006 EDP_BLC_ENABLE);
453c5420 2007
bf13e81b 2008 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2009
849e39f5
PZ
2010 intel_dp->want_panel_vdd = false;
2011
453c5420
JB
2012 I915_WRITE(pp_ctrl_reg, pp);
2013 POSTING_READ(pp_ctrl_reg);
9934c132 2014
dce56b3c 2015 intel_dp->last_power_cycle = jiffies;
4be73780 2016 wait_panel_off(intel_dp);
849e39f5
PZ
2017
2018 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2019 power_domain = intel_display_port_power_domain(intel_encoder);
2020 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2021}
e39b999a 2022
9f0fb5be
VS
2023void intel_edp_panel_off(struct intel_dp *intel_dp)
2024{
2025 if (!is_edp(intel_dp))
2026 return;
e39b999a 2027
9f0fb5be
VS
2028 pps_lock(intel_dp);
2029 edp_panel_off(intel_dp);
773538e8 2030 pps_unlock(intel_dp);
9934c132
JB
2031}
2032
1250d107
JN
2033/* Enable backlight in the panel power control. */
2034static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2035{
da63a9f2
PZ
2036 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2037 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2038 struct drm_i915_private *dev_priv = dev->dev_private;
2039 u32 pp;
453c5420 2040 u32 pp_ctrl_reg;
32f9d658 2041
01cb9ea6
JB
2042 /*
2043 * If we enable the backlight right away following a panel power
2044 * on, we may see slight flicker as the panel syncs with the eDP
2045 * link. So delay a bit to make sure the image is solid before
2046 * allowing it to appear.
2047 */
4be73780 2048 wait_backlight_on(intel_dp);
e39b999a 2049
773538e8 2050 pps_lock(intel_dp);
e39b999a 2051
453c5420 2052 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2053 pp |= EDP_BLC_ENABLE;
453c5420 2054
bf13e81b 2055 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2056
2057 I915_WRITE(pp_ctrl_reg, pp);
2058 POSTING_READ(pp_ctrl_reg);
e39b999a 2059
773538e8 2060 pps_unlock(intel_dp);
32f9d658
ZW
2061}
2062
1250d107
JN
2063/* Enable backlight PWM and backlight PP control. */
2064void intel_edp_backlight_on(struct intel_dp *intel_dp)
2065{
2066 if (!is_edp(intel_dp))
2067 return;
2068
2069 DRM_DEBUG_KMS("\n");
2070
2071 intel_panel_enable_backlight(intel_dp->attached_connector);
2072 _intel_edp_backlight_on(intel_dp);
2073}
2074
2075/* Disable backlight in the panel power control. */
2076static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2077{
30add22d 2078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2079 struct drm_i915_private *dev_priv = dev->dev_private;
2080 u32 pp;
453c5420 2081 u32 pp_ctrl_reg;
32f9d658 2082
f01eca2e
KP
2083 if (!is_edp(intel_dp))
2084 return;
2085
773538e8 2086 pps_lock(intel_dp);
e39b999a 2087
453c5420 2088 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2089 pp &= ~EDP_BLC_ENABLE;
453c5420 2090
bf13e81b 2091 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2092
2093 I915_WRITE(pp_ctrl_reg, pp);
2094 POSTING_READ(pp_ctrl_reg);
f7d2323c 2095
773538e8 2096 pps_unlock(intel_dp);
e39b999a
VS
2097
2098 intel_dp->last_backlight_off = jiffies;
f7d2323c 2099 edp_wait_backlight_off(intel_dp);
1250d107 2100}
f7d2323c 2101
1250d107
JN
2102/* Disable backlight PP control and backlight PWM. */
2103void intel_edp_backlight_off(struct intel_dp *intel_dp)
2104{
2105 if (!is_edp(intel_dp))
2106 return;
2107
2108 DRM_DEBUG_KMS("\n");
f7d2323c 2109
1250d107 2110 _intel_edp_backlight_off(intel_dp);
f7d2323c 2111 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2112}
a4fc5ed6 2113
73580fb7
JN
2114/*
2115 * Hook for controlling the panel power control backlight through the bl_power
2116 * sysfs attribute. Take care to handle multiple calls.
2117 */
2118static void intel_edp_backlight_power(struct intel_connector *connector,
2119 bool enable)
2120{
2121 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2122 bool is_enabled;
2123
773538e8 2124 pps_lock(intel_dp);
e39b999a 2125 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2126 pps_unlock(intel_dp);
73580fb7
JN
2127
2128 if (is_enabled == enable)
2129 return;
2130
23ba9373
JN
2131 DRM_DEBUG_KMS("panel power control backlight %s\n",
2132 enable ? "enable" : "disable");
73580fb7
JN
2133
2134 if (enable)
2135 _intel_edp_backlight_on(intel_dp);
2136 else
2137 _intel_edp_backlight_off(intel_dp);
2138}
2139
64e1077a
VS
2140static const char *state_string(bool enabled)
2141{
2142 return enabled ? "on" : "off";
2143}
2144
2145static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2146{
2147 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2148 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2149 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2150
2151 I915_STATE_WARN(cur_state != state,
2152 "DP port %c state assertion failure (expected %s, current %s)\n",
2153 port_name(dig_port->port),
2154 state_string(state), state_string(cur_state));
2155}
2156#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2157
2158static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2159{
2160 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2161
2162 I915_STATE_WARN(cur_state != state,
2163 "eDP PLL state assertion failure (expected %s, current %s)\n",
2164 state_string(state), state_string(cur_state));
2165}
2166#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2167#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2168
2bd2ad64 2169static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2170{
da63a9f2 2171 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2172 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2173 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2174
64e1077a
VS
2175 assert_pipe_disabled(dev_priv, crtc->pipe);
2176 assert_dp_port_disabled(intel_dp);
2177 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2178
d240f20f 2179 DRM_DEBUG_KMS("\n");
0767935e 2180 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2181
0767935e 2182 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2183 POSTING_READ(DP_A);
2184 udelay(200);
d240f20f
JB
2185}
2186
2bd2ad64 2187static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2188{
da63a9f2 2189 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2190 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2191 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2192
64e1077a
VS
2193 assert_pipe_disabled(dev_priv, crtc->pipe);
2194 assert_dp_port_disabled(intel_dp);
2195 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2196
6fec7662 2197 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2198
6fec7662 2199 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2200 POSTING_READ(DP_A);
d240f20f
JB
2201 udelay(200);
2202}
2203
c7ad3810 2204/* If the sink supports it, try to set the power state appropriately */
c19b0669 2205void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2206{
2207 int ret, i;
2208
2209 /* Should have a valid DPCD by this point */
2210 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2211 return;
2212
2213 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2214 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2215 DP_SET_POWER_D3);
c7ad3810
JB
2216 } else {
2217 /*
2218 * When turning on, we need to retry for 1ms to give the sink
2219 * time to wake up.
2220 */
2221 for (i = 0; i < 3; i++) {
9d1a1031
JN
2222 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2223 DP_SET_POWER_D0);
c7ad3810
JB
2224 if (ret == 1)
2225 break;
2226 msleep(1);
2227 }
2228 }
f9cac721
JN
2229
2230 if (ret != 1)
2231 DRM_DEBUG_KMS("failed to %s sink power state\n",
2232 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2233}
2234
19d8fe15
DV
2235static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2236 enum pipe *pipe)
d240f20f 2237{
19d8fe15 2238 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2239 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2240 struct drm_device *dev = encoder->base.dev;
2241 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2242 enum intel_display_power_domain power_domain;
2243 u32 tmp;
2244
2245 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2246 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2247 return false;
2248
2249 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2250
2251 if (!(tmp & DP_PORT_EN))
2252 return false;
2253
39e5fa88 2254 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2255 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2256 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2257 enum pipe p;
19d8fe15 2258
adc289d7
VS
2259 for_each_pipe(dev_priv, p) {
2260 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2261 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2262 *pipe = p;
19d8fe15
DV
2263 return true;
2264 }
2265 }
19d8fe15 2266
4a0833ec
DV
2267 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2268 intel_dp->output_reg);
39e5fa88
VS
2269 } else if (IS_CHERRYVIEW(dev)) {
2270 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2271 } else {
2272 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2273 }
d240f20f 2274
19d8fe15
DV
2275 return true;
2276}
d240f20f 2277
045ac3b5 2278static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2279 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2280{
2281 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2282 u32 tmp, flags = 0;
63000ef6
XZ
2283 struct drm_device *dev = encoder->base.dev;
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 enum port port = dp_to_dig_port(intel_dp)->port;
2286 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2287 int dotclock;
045ac3b5 2288
9ed109a7 2289 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2290
2291 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2292
39e5fa88 2293 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2294 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2295
2296 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2297 flags |= DRM_MODE_FLAG_PHSYNC;
2298 else
2299 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2300
b81e34c2 2301 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2302 flags |= DRM_MODE_FLAG_PVSYNC;
2303 else
2304 flags |= DRM_MODE_FLAG_NVSYNC;
2305 } else {
39e5fa88 2306 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2307 flags |= DRM_MODE_FLAG_PHSYNC;
2308 else
2309 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2310
39e5fa88 2311 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2312 flags |= DRM_MODE_FLAG_PVSYNC;
2313 else
2314 flags |= DRM_MODE_FLAG_NVSYNC;
2315 }
045ac3b5 2316
2d112de7 2317 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2318
8c875fca
VS
2319 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2320 tmp & DP_COLOR_RANGE_16_235)
2321 pipe_config->limited_color_range = true;
2322
eb14cb74
VS
2323 pipe_config->has_dp_encoder = true;
2324
90a6b7b0
VS
2325 pipe_config->lane_count =
2326 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2327
eb14cb74
VS
2328 intel_dp_get_m_n(crtc, pipe_config);
2329
18442d08 2330 if (port == PORT_A) {
b377e0df 2331 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2332 pipe_config->port_clock = 162000;
2333 else
2334 pipe_config->port_clock = 270000;
2335 }
18442d08
VS
2336
2337 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2338 &pipe_config->dp_m_n);
2339
2340 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2341 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2342
2d112de7 2343 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2344
c6cd2ee2
JN
2345 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2346 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2347 /*
2348 * This is a big fat ugly hack.
2349 *
2350 * Some machines in UEFI boot mode provide us a VBT that has 18
2351 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2352 * unknown we fail to light up. Yet the same BIOS boots up with
2353 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2354 * max, not what it tells us to use.
2355 *
2356 * Note: This will still be broken if the eDP panel is not lit
2357 * up by the BIOS, and thus we can't get the mode at module
2358 * load.
2359 */
2360 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2361 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2362 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2363 }
045ac3b5
JB
2364}
2365
e8cb4558 2366static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2367{
e8cb4558 2368 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2369 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2370 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2371
6e3c9717 2372 if (crtc->config->has_audio)
495a5bb8 2373 intel_audio_codec_disable(encoder);
6cb49835 2374
b32c6f48
RV
2375 if (HAS_PSR(dev) && !HAS_DDI(dev))
2376 intel_psr_disable(intel_dp);
2377
6cb49835
DV
2378 /* Make sure the panel is off before trying to change the mode. But also
2379 * ensure that we have vdd while we switch off the panel. */
24f3e092 2380 intel_edp_panel_vdd_on(intel_dp);
4be73780 2381 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2382 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2383 intel_edp_panel_off(intel_dp);
3739850b 2384
08aff3fe
VS
2385 /* disable the port before the pipe on g4x */
2386 if (INTEL_INFO(dev)->gen < 5)
3739850b 2387 intel_dp_link_down(intel_dp);
d240f20f
JB
2388}
2389
08aff3fe 2390static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2391{
2bd2ad64 2392 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2393 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2394
49277c31 2395 intel_dp_link_down(intel_dp);
08aff3fe
VS
2396 if (port == PORT_A)
2397 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2398}
2399
2400static void vlv_post_disable_dp(struct intel_encoder *encoder)
2401{
2402 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2403
2404 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2405}
2406
a8f327fb
VS
2407static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2408 bool reset)
580d3811 2409{
a8f327fb
VS
2410 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2411 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2412 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2413 enum pipe pipe = crtc->pipe;
2414 uint32_t val;
580d3811 2415
a8f327fb
VS
2416 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2417 if (reset)
2418 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2419 else
2420 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2421 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2422
a8f327fb
VS
2423 if (crtc->config->lane_count > 2) {
2424 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2425 if (reset)
2426 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2427 else
2428 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2429 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2430 }
580d3811 2431
97fd4d5c 2432 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2433 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2434 if (reset)
2435 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2436 else
2437 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2438 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2439
a8f327fb 2440 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2441 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2442 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2443 if (reset)
2444 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2445 else
2446 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2447 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2448 }
a8f327fb 2449}
97fd4d5c 2450
a8f327fb
VS
2451static void chv_post_disable_dp(struct intel_encoder *encoder)
2452{
2453 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2454 struct drm_device *dev = encoder->base.dev;
2455 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2456
a8f327fb
VS
2457 intel_dp_link_down(intel_dp);
2458
2459 mutex_lock(&dev_priv->sb_lock);
2460
2461 /* Assert data lane reset */
2462 chv_data_lane_soft_reset(encoder, true);
580d3811 2463
a580516d 2464 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2465}
2466
7b13b58a
VS
2467static void
2468_intel_dp_set_link_train(struct intel_dp *intel_dp,
2469 uint32_t *DP,
2470 uint8_t dp_train_pat)
2471{
2472 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2473 struct drm_device *dev = intel_dig_port->base.base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
2475 enum port port = intel_dig_port->port;
2476
2477 if (HAS_DDI(dev)) {
2478 uint32_t temp = I915_READ(DP_TP_CTL(port));
2479
2480 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2481 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2482 else
2483 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2484
2485 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2486 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2487 case DP_TRAINING_PATTERN_DISABLE:
2488 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2489
2490 break;
2491 case DP_TRAINING_PATTERN_1:
2492 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2493 break;
2494 case DP_TRAINING_PATTERN_2:
2495 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2496 break;
2497 case DP_TRAINING_PATTERN_3:
2498 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2499 break;
2500 }
2501 I915_WRITE(DP_TP_CTL(port), temp);
2502
39e5fa88
VS
2503 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2504 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2505 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2506
2507 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2508 case DP_TRAINING_PATTERN_DISABLE:
2509 *DP |= DP_LINK_TRAIN_OFF_CPT;
2510 break;
2511 case DP_TRAINING_PATTERN_1:
2512 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2513 break;
2514 case DP_TRAINING_PATTERN_2:
2515 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2516 break;
2517 case DP_TRAINING_PATTERN_3:
2518 DRM_ERROR("DP training pattern 3 not supported\n");
2519 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2520 break;
2521 }
2522
2523 } else {
2524 if (IS_CHERRYVIEW(dev))
2525 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2526 else
2527 *DP &= ~DP_LINK_TRAIN_MASK;
2528
2529 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2530 case DP_TRAINING_PATTERN_DISABLE:
2531 *DP |= DP_LINK_TRAIN_OFF;
2532 break;
2533 case DP_TRAINING_PATTERN_1:
2534 *DP |= DP_LINK_TRAIN_PAT_1;
2535 break;
2536 case DP_TRAINING_PATTERN_2:
2537 *DP |= DP_LINK_TRAIN_PAT_2;
2538 break;
2539 case DP_TRAINING_PATTERN_3:
2540 if (IS_CHERRYVIEW(dev)) {
2541 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2542 } else {
2543 DRM_ERROR("DP training pattern 3 not supported\n");
2544 *DP |= DP_LINK_TRAIN_PAT_2;
2545 }
2546 break;
2547 }
2548 }
2549}
2550
2551static void intel_dp_enable_port(struct intel_dp *intel_dp)
2552{
2553 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2554 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2555 struct intel_crtc *crtc =
2556 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2557
7b13b58a
VS
2558 /* enable with pattern 1 (as per spec) */
2559 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2560 DP_TRAINING_PATTERN_1);
2561
2562 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2563 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2564
2565 /*
2566 * Magic for VLV/CHV. We _must_ first set up the register
2567 * without actually enabling the port, and then do another
2568 * write to enable the port. Otherwise link training will
2569 * fail when the power sequencer is freshly used for this port.
2570 */
2571 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2572 if (crtc->config->has_audio)
2573 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2574
2575 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2576 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2577}
2578
e8cb4558 2579static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2580{
e8cb4558
DV
2581 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2582 struct drm_device *dev = encoder->base.dev;
2583 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2584 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2585 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2586 enum port port = dp_to_dig_port(intel_dp)->port;
2587 enum pipe pipe = crtc->pipe;
5d613501 2588
0c33d8d7
DV
2589 if (WARN_ON(dp_reg & DP_PORT_EN))
2590 return;
5d613501 2591
093e3f13
VS
2592 pps_lock(intel_dp);
2593
2594 if (IS_VALLEYVIEW(dev))
2595 vlv_init_panel_power_sequencer(intel_dp);
2596
7b13b58a 2597 intel_dp_enable_port(intel_dp);
093e3f13 2598
d6fbdd15
VS
2599 if (port == PORT_A && IS_GEN5(dev_priv)) {
2600 /*
2601 * Underrun reporting for the other pipe was disabled in
2602 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2603 * enabled, so it's now safe to re-enable underrun reporting.
2604 */
2605 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2606 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2607 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2608 }
2609
093e3f13
VS
2610 edp_panel_vdd_on(intel_dp);
2611 edp_panel_on(intel_dp);
2612 edp_panel_vdd_off(intel_dp, true);
2613
2614 pps_unlock(intel_dp);
2615
e0fce78f
VS
2616 if (IS_VALLEYVIEW(dev)) {
2617 unsigned int lane_mask = 0x0;
2618
2619 if (IS_CHERRYVIEW(dev))
2620 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2621
9b6de0a1
VS
2622 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2623 lane_mask);
e0fce78f 2624 }
61234fa5 2625
f01eca2e 2626 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2627 intel_dp_start_link_train(intel_dp);
3ab9c637 2628 intel_dp_stop_link_train(intel_dp);
c1dec79a 2629
6e3c9717 2630 if (crtc->config->has_audio) {
c1dec79a 2631 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2632 pipe_name(pipe));
c1dec79a
JN
2633 intel_audio_codec_enable(encoder);
2634 }
ab1f90f9 2635}
89b667f8 2636
ecff4f3b
JN
2637static void g4x_enable_dp(struct intel_encoder *encoder)
2638{
828f5c6e
JN
2639 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2640
ecff4f3b 2641 intel_enable_dp(encoder);
4be73780 2642 intel_edp_backlight_on(intel_dp);
ab1f90f9 2643}
89b667f8 2644
ab1f90f9
JN
2645static void vlv_enable_dp(struct intel_encoder *encoder)
2646{
828f5c6e
JN
2647 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2648
4be73780 2649 intel_edp_backlight_on(intel_dp);
b32c6f48 2650 intel_psr_enable(intel_dp);
d240f20f
JB
2651}
2652
ecff4f3b 2653static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2654{
d6fbdd15 2655 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2656 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2657 enum port port = dp_to_dig_port(intel_dp)->port;
2658 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2659
8ac33ed3
DV
2660 intel_dp_prepare(encoder);
2661
d6fbdd15
VS
2662 if (port == PORT_A && IS_GEN5(dev_priv)) {
2663 /*
2664 * We get FIFO underruns on the other pipe when
2665 * enabling the CPU eDP PLL, and when enabling CPU
2666 * eDP port. We could potentially avoid the PLL
2667 * underrun with a vblank wait just prior to enabling
2668 * the PLL, but that doesn't appear to help the port
2669 * enable case. Just sweep it all under the rug.
2670 */
2671 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2672 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2673 }
2674
d41f1efb 2675 /* Only ilk+ has port A */
d6fbdd15 2676 if (port == PORT_A) {
d41f1efb 2677 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2678 ironlake_edp_pll_on(intel_dp);
d41f1efb 2679 }
ab1f90f9
JN
2680}
2681
83b84597
VS
2682static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2683{
2684 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2685 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2686 enum pipe pipe = intel_dp->pps_pipe;
2687 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2688
2689 edp_panel_vdd_off_sync(intel_dp);
2690
2691 /*
2692 * VLV seems to get confused when multiple power seqeuencers
2693 * have the same port selected (even if only one has power/vdd
2694 * enabled). The failure manifests as vlv_wait_port_ready() failing
2695 * CHV on the other hand doesn't seem to mind having the same port
2696 * selected in multiple power seqeuencers, but let's clear the
2697 * port select always when logically disconnecting a power sequencer
2698 * from a port.
2699 */
2700 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2701 pipe_name(pipe), port_name(intel_dig_port->port));
2702 I915_WRITE(pp_on_reg, 0);
2703 POSTING_READ(pp_on_reg);
2704
2705 intel_dp->pps_pipe = INVALID_PIPE;
2706}
2707
a4a5d2f8
VS
2708static void vlv_steal_power_sequencer(struct drm_device *dev,
2709 enum pipe pipe)
2710{
2711 struct drm_i915_private *dev_priv = dev->dev_private;
2712 struct intel_encoder *encoder;
2713
2714 lockdep_assert_held(&dev_priv->pps_mutex);
2715
ac3c12e4
VS
2716 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2717 return;
2718
a4a5d2f8
VS
2719 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2720 base.head) {
2721 struct intel_dp *intel_dp;
773538e8 2722 enum port port;
a4a5d2f8
VS
2723
2724 if (encoder->type != INTEL_OUTPUT_EDP)
2725 continue;
2726
2727 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2728 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2729
2730 if (intel_dp->pps_pipe != pipe)
2731 continue;
2732
2733 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2734 pipe_name(pipe), port_name(port));
a4a5d2f8 2735
e02f9a06 2736 WARN(encoder->base.crtc,
034e43c6
VS
2737 "stealing pipe %c power sequencer from active eDP port %c\n",
2738 pipe_name(pipe), port_name(port));
a4a5d2f8 2739
a4a5d2f8 2740 /* make sure vdd is off before we steal it */
83b84597 2741 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2742 }
2743}
2744
2745static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2746{
2747 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2748 struct intel_encoder *encoder = &intel_dig_port->base;
2749 struct drm_device *dev = encoder->base.dev;
2750 struct drm_i915_private *dev_priv = dev->dev_private;
2751 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2752
2753 lockdep_assert_held(&dev_priv->pps_mutex);
2754
093e3f13
VS
2755 if (!is_edp(intel_dp))
2756 return;
2757
a4a5d2f8
VS
2758 if (intel_dp->pps_pipe == crtc->pipe)
2759 return;
2760
2761 /*
2762 * If another power sequencer was being used on this
2763 * port previously make sure to turn off vdd there while
2764 * we still have control of it.
2765 */
2766 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2767 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2768
2769 /*
2770 * We may be stealing the power
2771 * sequencer from another port.
2772 */
2773 vlv_steal_power_sequencer(dev, crtc->pipe);
2774
2775 /* now it's all ours */
2776 intel_dp->pps_pipe = crtc->pipe;
2777
2778 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2779 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2780
2781 /* init power sequencer on this pipe and port */
36b5f425
VS
2782 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2783 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2784}
2785
ab1f90f9 2786static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2787{
2bd2ad64 2788 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2789 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2790 struct drm_device *dev = encoder->base.dev;
89b667f8 2791 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2792 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2793 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2794 int pipe = intel_crtc->pipe;
2795 u32 val;
a4fc5ed6 2796
a580516d 2797 mutex_lock(&dev_priv->sb_lock);
89b667f8 2798
ab3c759a 2799 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2800 val = 0;
2801 if (pipe)
2802 val |= (1<<21);
2803 else
2804 val &= ~(1<<21);
2805 val |= 0x001000c4;
ab3c759a
CML
2806 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2807 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2808 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2809
a580516d 2810 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2811
2812 intel_enable_dp(encoder);
89b667f8
JB
2813}
2814
ecff4f3b 2815static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2816{
2817 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2818 struct drm_device *dev = encoder->base.dev;
2819 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2820 struct intel_crtc *intel_crtc =
2821 to_intel_crtc(encoder->base.crtc);
e4607fcf 2822 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2823 int pipe = intel_crtc->pipe;
89b667f8 2824
8ac33ed3
DV
2825 intel_dp_prepare(encoder);
2826
89b667f8 2827 /* Program Tx lane resets to default */
a580516d 2828 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2829 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2830 DPIO_PCS_TX_LANE2_RESET |
2831 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2833 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2834 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2835 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2836 DPIO_PCS_CLK_SOFT_RESET);
2837
2838 /* Fix up inter-pair skew failure */
ab3c759a
CML
2839 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2840 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2841 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2842 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2843}
2844
e4a1d846
CML
2845static void chv_pre_enable_dp(struct intel_encoder *encoder)
2846{
2847 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2848 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2849 struct drm_device *dev = encoder->base.dev;
2850 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2851 struct intel_crtc *intel_crtc =
2852 to_intel_crtc(encoder->base.crtc);
2853 enum dpio_channel ch = vlv_dport_to_channel(dport);
2854 int pipe = intel_crtc->pipe;
2e523e98 2855 int data, i, stagger;
949c1d43 2856 u32 val;
e4a1d846 2857
a580516d 2858 mutex_lock(&dev_priv->sb_lock);
949c1d43 2859
570e2a74
VS
2860 /* allow hardware to manage TX FIFO reset source */
2861 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2862 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2863 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2864
e0fce78f
VS
2865 if (intel_crtc->config->lane_count > 2) {
2866 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2867 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2869 }
570e2a74 2870
949c1d43 2871 /* Program Tx lane latency optimal setting*/
e0fce78f 2872 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 2873 /* Set the upar bit */
e0fce78f
VS
2874 if (intel_crtc->config->lane_count == 1)
2875 data = 0x0;
2876 else
2877 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
2878 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2879 data << DPIO_UPAR_SHIFT);
2880 }
2881
2882 /* Data lane stagger programming */
2e523e98
VS
2883 if (intel_crtc->config->port_clock > 270000)
2884 stagger = 0x18;
2885 else if (intel_crtc->config->port_clock > 135000)
2886 stagger = 0xd;
2887 else if (intel_crtc->config->port_clock > 67500)
2888 stagger = 0x7;
2889 else if (intel_crtc->config->port_clock > 33750)
2890 stagger = 0x4;
2891 else
2892 stagger = 0x2;
2893
2894 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2895 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2896 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2897
e0fce78f
VS
2898 if (intel_crtc->config->lane_count > 2) {
2899 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2900 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2901 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2902 }
2e523e98
VS
2903
2904 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2905 DPIO_LANESTAGGER_STRAP(stagger) |
2906 DPIO_LANESTAGGER_STRAP_OVRD |
2907 DPIO_TX1_STAGGER_MASK(0x1f) |
2908 DPIO_TX1_STAGGER_MULT(6) |
2909 DPIO_TX2_STAGGER_MULT(0));
2910
e0fce78f
VS
2911 if (intel_crtc->config->lane_count > 2) {
2912 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2913 DPIO_LANESTAGGER_STRAP(stagger) |
2914 DPIO_LANESTAGGER_STRAP_OVRD |
2915 DPIO_TX1_STAGGER_MASK(0x1f) |
2916 DPIO_TX1_STAGGER_MULT(7) |
2917 DPIO_TX2_STAGGER_MULT(5));
2918 }
e4a1d846 2919
a8f327fb
VS
2920 /* Deassert data lane reset */
2921 chv_data_lane_soft_reset(encoder, false);
2922
a580516d 2923 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2924
e4a1d846 2925 intel_enable_dp(encoder);
b0b33846
VS
2926
2927 /* Second common lane will stay alive on its own now */
2928 if (dport->release_cl2_override) {
2929 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2930 dport->release_cl2_override = false;
2931 }
e4a1d846
CML
2932}
2933
9197c88b
VS
2934static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2935{
2936 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2937 struct drm_device *dev = encoder->base.dev;
2938 struct drm_i915_private *dev_priv = dev->dev_private;
2939 struct intel_crtc *intel_crtc =
2940 to_intel_crtc(encoder->base.crtc);
2941 enum dpio_channel ch = vlv_dport_to_channel(dport);
2942 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
2943 unsigned int lane_mask =
2944 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
2945 u32 val;
2946
625695f8
VS
2947 intel_dp_prepare(encoder);
2948
b0b33846
VS
2949 /*
2950 * Must trick the second common lane into life.
2951 * Otherwise we can't even access the PLL.
2952 */
2953 if (ch == DPIO_CH0 && pipe == PIPE_B)
2954 dport->release_cl2_override =
2955 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2956
e0fce78f
VS
2957 chv_phy_powergate_lanes(encoder, true, lane_mask);
2958
a580516d 2959 mutex_lock(&dev_priv->sb_lock);
9197c88b 2960
a8f327fb
VS
2961 /* Assert data lane reset */
2962 chv_data_lane_soft_reset(encoder, true);
2963
b9e5ac3c
VS
2964 /* program left/right clock distribution */
2965 if (pipe != PIPE_B) {
2966 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2967 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2968 if (ch == DPIO_CH0)
2969 val |= CHV_BUFLEFTENA1_FORCE;
2970 if (ch == DPIO_CH1)
2971 val |= CHV_BUFRIGHTENA1_FORCE;
2972 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2973 } else {
2974 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2975 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2976 if (ch == DPIO_CH0)
2977 val |= CHV_BUFLEFTENA2_FORCE;
2978 if (ch == DPIO_CH1)
2979 val |= CHV_BUFRIGHTENA2_FORCE;
2980 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2981 }
2982
9197c88b
VS
2983 /* program clock channel usage */
2984 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2985 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2986 if (pipe != PIPE_B)
2987 val &= ~CHV_PCS_USEDCLKCHANNEL;
2988 else
2989 val |= CHV_PCS_USEDCLKCHANNEL;
2990 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2991
e0fce78f
VS
2992 if (intel_crtc->config->lane_count > 2) {
2993 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2994 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2995 if (pipe != PIPE_B)
2996 val &= ~CHV_PCS_USEDCLKCHANNEL;
2997 else
2998 val |= CHV_PCS_USEDCLKCHANNEL;
2999 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3000 }
9197c88b
VS
3001
3002 /*
3003 * This a a bit weird since generally CL
3004 * matches the pipe, but here we need to
3005 * pick the CL based on the port.
3006 */
3007 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3008 if (pipe != PIPE_B)
3009 val &= ~CHV_CMN_USEDCLKCHANNEL;
3010 else
3011 val |= CHV_CMN_USEDCLKCHANNEL;
3012 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3013
a580516d 3014 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3015}
3016
d6db995f
VS
3017static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3018{
3019 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3020 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3021 u32 val;
3022
3023 mutex_lock(&dev_priv->sb_lock);
3024
3025 /* disable left/right clock distribution */
3026 if (pipe != PIPE_B) {
3027 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3028 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3029 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3030 } else {
3031 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3032 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3033 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3034 }
3035
3036 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3037
b0b33846
VS
3038 /*
3039 * Leave the power down bit cleared for at least one
3040 * lane so that chv_powergate_phy_ch() will power
3041 * on something when the channel is otherwise unused.
3042 * When the port is off and the override is removed
3043 * the lanes power down anyway, so otherwise it doesn't
3044 * really matter what the state of power down bits is
3045 * after this.
3046 */
e0fce78f 3047 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3048}
3049
a4fc5ed6 3050/*
df0c237d
JB
3051 * Native read with retry for link status and receiver capability reads for
3052 * cases where the sink may still be asleep.
9d1a1031
JN
3053 *
3054 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3055 * supposed to retry 3 times per the spec.
a4fc5ed6 3056 */
9d1a1031
JN
3057static ssize_t
3058intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3059 void *buffer, size_t size)
a4fc5ed6 3060{
9d1a1031
JN
3061 ssize_t ret;
3062 int i;
61da5fab 3063
f6a19066
VS
3064 /*
3065 * Sometime we just get the same incorrect byte repeated
3066 * over the entire buffer. Doing just one throw away read
3067 * initially seems to "solve" it.
3068 */
3069 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3070
61da5fab 3071 for (i = 0; i < 3; i++) {
9d1a1031
JN
3072 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3073 if (ret == size)
3074 return ret;
61da5fab
JB
3075 msleep(1);
3076 }
a4fc5ed6 3077
9d1a1031 3078 return ret;
a4fc5ed6
KP
3079}
3080
3081/*
3082 * Fetch AUX CH registers 0x202 - 0x207 which contain
3083 * link status information
3084 */
94223d04 3085bool
93f62dad 3086intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3087{
9d1a1031
JN
3088 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3089 DP_LANE0_1_STATUS,
3090 link_status,
3091 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3092}
3093
1100244e 3094/* These are source-specific values. */
94223d04 3095uint8_t
1a2eb460 3096intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3097{
30add22d 3098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3099 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3100 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3101
9314726b
VK
3102 if (IS_BROXTON(dev))
3103 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3104 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3105 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3106 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3107 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 3108 } else if (IS_VALLEYVIEW(dev))
bd60018a 3109 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3110 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3111 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3112 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3113 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3114 else
bd60018a 3115 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3116}
3117
94223d04 3118uint8_t
1a2eb460
KP
3119intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3120{
30add22d 3121 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3122 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3123
5a9d1f1a
DL
3124 if (INTEL_INFO(dev)->gen >= 9) {
3125 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3127 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3129 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3131 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3133 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3134 default:
3135 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3136 }
3137 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3138 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3144 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3145 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3146 default:
bd60018a 3147 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3148 }
e2fa6fba
P
3149 } else if (IS_VALLEYVIEW(dev)) {
3150 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3151 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3152 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3153 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3154 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3156 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3157 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3158 default:
bd60018a 3159 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3160 }
bc7d38a4 3161 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3162 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3164 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3165 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3166 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3167 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3168 default:
bd60018a 3169 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3170 }
3171 } else {
3172 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3173 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3174 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3175 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3176 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3177 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3178 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3180 default:
bd60018a 3181 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3182 }
a4fc5ed6
KP
3183 }
3184}
3185
5829975c 3186static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3187{
3188 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3189 struct drm_i915_private *dev_priv = dev->dev_private;
3190 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3191 struct intel_crtc *intel_crtc =
3192 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3193 unsigned long demph_reg_value, preemph_reg_value,
3194 uniqtranscale_reg_value;
3195 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3196 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3197 int pipe = intel_crtc->pipe;
e2fa6fba
P
3198
3199 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3200 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3201 preemph_reg_value = 0x0004000;
3202 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3203 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3204 demph_reg_value = 0x2B405555;
3205 uniqtranscale_reg_value = 0x552AB83A;
3206 break;
bd60018a 3207 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3208 demph_reg_value = 0x2B404040;
3209 uniqtranscale_reg_value = 0x5548B83A;
3210 break;
bd60018a 3211 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3212 demph_reg_value = 0x2B245555;
3213 uniqtranscale_reg_value = 0x5560B83A;
3214 break;
bd60018a 3215 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3216 demph_reg_value = 0x2B405555;
3217 uniqtranscale_reg_value = 0x5598DA3A;
3218 break;
3219 default:
3220 return 0;
3221 }
3222 break;
bd60018a 3223 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3224 preemph_reg_value = 0x0002000;
3225 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3227 demph_reg_value = 0x2B404040;
3228 uniqtranscale_reg_value = 0x5552B83A;
3229 break;
bd60018a 3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3231 demph_reg_value = 0x2B404848;
3232 uniqtranscale_reg_value = 0x5580B83A;
3233 break;
bd60018a 3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3235 demph_reg_value = 0x2B404040;
3236 uniqtranscale_reg_value = 0x55ADDA3A;
3237 break;
3238 default:
3239 return 0;
3240 }
3241 break;
bd60018a 3242 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3243 preemph_reg_value = 0x0000000;
3244 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3246 demph_reg_value = 0x2B305555;
3247 uniqtranscale_reg_value = 0x5570B83A;
3248 break;
bd60018a 3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3250 demph_reg_value = 0x2B2B4040;
3251 uniqtranscale_reg_value = 0x55ADDA3A;
3252 break;
3253 default:
3254 return 0;
3255 }
3256 break;
bd60018a 3257 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3258 preemph_reg_value = 0x0006000;
3259 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3260 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3261 demph_reg_value = 0x1B405555;
3262 uniqtranscale_reg_value = 0x55ADDA3A;
3263 break;
3264 default:
3265 return 0;
3266 }
3267 break;
3268 default:
3269 return 0;
3270 }
3271
a580516d 3272 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3273 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3274 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3275 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3276 uniqtranscale_reg_value);
ab3c759a
CML
3277 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3278 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3279 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3280 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3281 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3282
3283 return 0;
3284}
3285
67fa24b4
VS
3286static bool chv_need_uniq_trans_scale(uint8_t train_set)
3287{
3288 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3289 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3290}
3291
5829975c 3292static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3293{
3294 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3295 struct drm_i915_private *dev_priv = dev->dev_private;
3296 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3297 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3298 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3299 uint8_t train_set = intel_dp->train_set[0];
3300 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3301 enum pipe pipe = intel_crtc->pipe;
3302 int i;
e4a1d846
CML
3303
3304 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3305 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3306 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3308 deemph_reg_value = 128;
3309 margin_reg_value = 52;
3310 break;
bd60018a 3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3312 deemph_reg_value = 128;
3313 margin_reg_value = 77;
3314 break;
bd60018a 3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3316 deemph_reg_value = 128;
3317 margin_reg_value = 102;
3318 break;
bd60018a 3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3320 deemph_reg_value = 128;
3321 margin_reg_value = 154;
3322 /* FIXME extra to set for 1200 */
3323 break;
3324 default:
3325 return 0;
3326 }
3327 break;
bd60018a 3328 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3329 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3331 deemph_reg_value = 85;
3332 margin_reg_value = 78;
3333 break;
bd60018a 3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3335 deemph_reg_value = 85;
3336 margin_reg_value = 116;
3337 break;
bd60018a 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3339 deemph_reg_value = 85;
3340 margin_reg_value = 154;
3341 break;
3342 default:
3343 return 0;
3344 }
3345 break;
bd60018a 3346 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3347 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3348 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3349 deemph_reg_value = 64;
3350 margin_reg_value = 104;
3351 break;
bd60018a 3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3353 deemph_reg_value = 64;
3354 margin_reg_value = 154;
3355 break;
3356 default:
3357 return 0;
3358 }
3359 break;
bd60018a 3360 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3361 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3363 deemph_reg_value = 43;
3364 margin_reg_value = 154;
3365 break;
3366 default:
3367 return 0;
3368 }
3369 break;
3370 default:
3371 return 0;
3372 }
3373
a580516d 3374 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3375
3376 /* Clear calc init */
1966e59e
VS
3377 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3378 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3379 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3380 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3381 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3382
e0fce78f
VS
3383 if (intel_crtc->config->lane_count > 2) {
3384 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3385 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3386 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3387 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3388 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3389 }
e4a1d846 3390
a02ef3c7
VS
3391 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3392 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3393 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3394 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3395
e0fce78f
VS
3396 if (intel_crtc->config->lane_count > 2) {
3397 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3398 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3399 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3400 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3401 }
a02ef3c7 3402
e4a1d846 3403 /* Program swing deemph */
e0fce78f 3404 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3405 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3406 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3407 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3408 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3409 }
e4a1d846
CML
3410
3411 /* Program swing margin */
e0fce78f 3412 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3413 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3414
1fb44505
VS
3415 val &= ~DPIO_SWING_MARGIN000_MASK;
3416 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3417
3418 /*
3419 * Supposedly this value shouldn't matter when unique transition
3420 * scale is disabled, but in fact it does matter. Let's just
3421 * always program the same value and hope it's OK.
3422 */
3423 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3424 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3425
f72df8db
VS
3426 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3427 }
e4a1d846 3428
67fa24b4
VS
3429 /*
3430 * The document said it needs to set bit 27 for ch0 and bit 26
3431 * for ch1. Might be a typo in the doc.
3432 * For now, for this unique transition scale selection, set bit
3433 * 27 for ch0 and ch1.
3434 */
e0fce78f 3435 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3436 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3437 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3438 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3439 else
3440 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3441 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3442 }
3443
3444 /* Start swing calculation */
1966e59e
VS
3445 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3446 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3447 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3448
e0fce78f
VS
3449 if (intel_crtc->config->lane_count > 2) {
3450 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3451 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3452 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3453 }
e4a1d846 3454
a580516d 3455 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3456
3457 return 0;
3458}
3459
a4fc5ed6 3460static uint32_t
5829975c 3461gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3462{
3cf2efb1 3463 uint32_t signal_levels = 0;
a4fc5ed6 3464
3cf2efb1 3465 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3467 default:
3468 signal_levels |= DP_VOLTAGE_0_4;
3469 break;
bd60018a 3470 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3471 signal_levels |= DP_VOLTAGE_0_6;
3472 break;
bd60018a 3473 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3474 signal_levels |= DP_VOLTAGE_0_8;
3475 break;
bd60018a 3476 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3477 signal_levels |= DP_VOLTAGE_1_2;
3478 break;
3479 }
3cf2efb1 3480 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3481 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3482 default:
3483 signal_levels |= DP_PRE_EMPHASIS_0;
3484 break;
bd60018a 3485 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3486 signal_levels |= DP_PRE_EMPHASIS_3_5;
3487 break;
bd60018a 3488 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3489 signal_levels |= DP_PRE_EMPHASIS_6;
3490 break;
bd60018a 3491 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3492 signal_levels |= DP_PRE_EMPHASIS_9_5;
3493 break;
3494 }
3495 return signal_levels;
3496}
3497
e3421a18
ZW
3498/* Gen6's DP voltage swing and pre-emphasis control */
3499static uint32_t
5829975c 3500gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3501{
3c5a62b5
YL
3502 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3503 DP_TRAIN_PRE_EMPHASIS_MASK);
3504 switch (signal_levels) {
bd60018a
SJ
3505 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3506 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3507 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3508 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3509 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3510 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3512 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3513 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3514 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3515 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3516 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3517 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3518 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3519 default:
3c5a62b5
YL
3520 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3521 "0x%x\n", signal_levels);
3522 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3523 }
3524}
3525
1a2eb460
KP
3526/* Gen7's DP voltage swing and pre-emphasis control */
3527static uint32_t
5829975c 3528gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3529{
3530 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3531 DP_TRAIN_PRE_EMPHASIS_MASK);
3532 switch (signal_levels) {
bd60018a 3533 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3534 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3535 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3536 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3537 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3538 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3539
bd60018a 3540 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3541 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3542 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3543 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3544
bd60018a 3545 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3546 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3547 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3548 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3549
3550 default:
3551 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3552 "0x%x\n", signal_levels);
3553 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3554 }
3555}
3556
94223d04 3557void
f4eb692e 3558intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3559{
3560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3561 enum port port = intel_dig_port->port;
f0a3424e 3562 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3563 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3564 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3565 uint8_t train_set = intel_dp->train_set[0];
3566
f8896f5d
DW
3567 if (HAS_DDI(dev)) {
3568 signal_levels = ddi_signal_levels(intel_dp);
3569
3570 if (IS_BROXTON(dev))
3571 signal_levels = 0;
3572 else
3573 mask = DDI_BUF_EMP_MASK;
e4a1d846 3574 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3575 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3576 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3577 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3578 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3579 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3580 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3581 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3582 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3583 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3584 } else {
5829975c 3585 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3586 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3587 }
3588
96fb9f9b
VK
3589 if (mask)
3590 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3591
3592 DRM_DEBUG_KMS("Using vswing level %d\n",
3593 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3594 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3595 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3596 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3597
f4eb692e 3598 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3599
3600 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3601 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3602}
3603
94223d04 3604void
e9c176d5
ACO
3605intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3606 uint8_t dp_train_pat)
a4fc5ed6 3607{
174edf1f 3608 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3609 struct drm_i915_private *dev_priv =
3610 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3611
f4eb692e 3612 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3613
f4eb692e 3614 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3615 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3616}
3617
94223d04 3618void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3619{
3620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3621 struct drm_device *dev = intel_dig_port->base.base.dev;
3622 struct drm_i915_private *dev_priv = dev->dev_private;
3623 enum port port = intel_dig_port->port;
3624 uint32_t val;
3625
3626 if (!HAS_DDI(dev))
3627 return;
3628
3629 val = I915_READ(DP_TP_CTL(port));
3630 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3631 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3632 I915_WRITE(DP_TP_CTL(port), val);
3633
3634 /*
3635 * On PORT_A we can have only eDP in SST mode. There the only reason
3636 * we need to set idle transmission mode is to work around a HW issue
3637 * where we enable the pipe while not in idle link-training mode.
3638 * In this case there is requirement to wait for a minimum number of
3639 * idle patterns to be sent.
3640 */
3641 if (port == PORT_A)
3642 return;
3643
3644 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3645 1))
3646 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3647}
3648
a4fc5ed6 3649static void
ea5b213a 3650intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3651{
da63a9f2 3652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3653 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3654 enum port port = intel_dig_port->port;
da63a9f2 3655 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3656 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3657 uint32_t DP = intel_dp->DP;
a4fc5ed6 3658
bc76e320 3659 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3660 return;
3661
0c33d8d7 3662 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3663 return;
3664
28c97730 3665 DRM_DEBUG_KMS("\n");
32f9d658 3666
39e5fa88
VS
3667 if ((IS_GEN7(dev) && port == PORT_A) ||
3668 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3669 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3670 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3671 } else {
aad3d14d
VS
3672 if (IS_CHERRYVIEW(dev))
3673 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3674 else
3675 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3676 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3677 }
1612c8bd 3678 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3679 POSTING_READ(intel_dp->output_reg);
5eb08b69 3680
1612c8bd
VS
3681 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3682 I915_WRITE(intel_dp->output_reg, DP);
3683 POSTING_READ(intel_dp->output_reg);
3684
3685 /*
3686 * HW workaround for IBX, we need to move the port
3687 * to transcoder A after disabling it to allow the
3688 * matching HDMI port to be enabled on transcoder A.
3689 */
3690 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3691 /*
3692 * We get CPU/PCH FIFO underruns on the other pipe when
3693 * doing the workaround. Sweep them under the rug.
3694 */
3695 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3696 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3697
1612c8bd
VS
3698 /* always enable with pattern 1 (as per spec) */
3699 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3700 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3701 I915_WRITE(intel_dp->output_reg, DP);
3702 POSTING_READ(intel_dp->output_reg);
3703
3704 DP &= ~DP_PORT_EN;
5bddd17f 3705 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3706 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3707
3708 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3709 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3710 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3711 }
3712
f01eca2e 3713 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3714
3715 intel_dp->DP = DP;
a4fc5ed6
KP
3716}
3717
26d61aad
KP
3718static bool
3719intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3720{
a031d709
RV
3721 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3722 struct drm_device *dev = dig_port->base.base.dev;
3723 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3724 uint8_t rev;
a031d709 3725
9d1a1031
JN
3726 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3727 sizeof(intel_dp->dpcd)) < 0)
edb39244 3728 return false; /* aux transfer failed */
92fd8fd1 3729
a8e98153 3730 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3731
edb39244
AJ
3732 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3733 return false; /* DPCD not present */
3734
2293bb5c
SK
3735 /* Check if the panel supports PSR */
3736 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3737 if (is_edp(intel_dp)) {
9d1a1031
JN
3738 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3739 intel_dp->psr_dpcd,
3740 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3741 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3742 dev_priv->psr.sink_support = true;
50003939 3743 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3744 }
474d1ec4
SJ
3745
3746 if (INTEL_INFO(dev)->gen >= 9 &&
3747 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3748 uint8_t frame_sync_cap;
3749
3750 dev_priv->psr.sink_support = true;
3751 intel_dp_dpcd_read_wake(&intel_dp->aux,
3752 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3753 &frame_sync_cap, 1);
3754 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3755 /* PSR2 needs frame sync as well */
3756 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3757 DRM_DEBUG_KMS("PSR2 %s on sink",
3758 dev_priv->psr.psr2_support ? "supported" : "not supported");
3759 }
50003939
JN
3760 }
3761
bc5133d5 3762 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3763 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3764 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3765
fc0f8e25
SJ
3766 /* Intermediate frequency support */
3767 if (is_edp(intel_dp) &&
3768 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3769 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3770 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3771 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3772 int i;
3773
fc0f8e25
SJ
3774 intel_dp_dpcd_read_wake(&intel_dp->aux,
3775 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3776 sink_rates,
3777 sizeof(sink_rates));
ea2d8a42 3778
94ca719e
VS
3779 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3780 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3781
3782 if (val == 0)
3783 break;
3784
af77b974
SJ
3785 /* Value read is in kHz while drm clock is saved in deca-kHz */
3786 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3787 }
94ca719e 3788 intel_dp->num_sink_rates = i;
fc0f8e25 3789 }
0336400e
VS
3790
3791 intel_dp_print_rates(intel_dp);
3792
edb39244
AJ
3793 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3794 DP_DWN_STRM_PORT_PRESENT))
3795 return true; /* native DP sink */
3796
3797 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3798 return true; /* no per-port downstream info */
3799
9d1a1031
JN
3800 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3801 intel_dp->downstream_ports,
3802 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3803 return false; /* downstream port status fetch failed */
3804
3805 return true;
92fd8fd1
KP
3806}
3807
0d198328
AJ
3808static void
3809intel_dp_probe_oui(struct intel_dp *intel_dp)
3810{
3811 u8 buf[3];
3812
3813 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3814 return;
3815
9d1a1031 3816 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3817 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3818 buf[0], buf[1], buf[2]);
3819
9d1a1031 3820 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3821 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3822 buf[0], buf[1], buf[2]);
3823}
3824
0e32b39c
DA
3825static bool
3826intel_dp_probe_mst(struct intel_dp *intel_dp)
3827{
3828 u8 buf[1];
3829
3830 if (!intel_dp->can_mst)
3831 return false;
3832
3833 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3834 return false;
3835
0e32b39c
DA
3836 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3837 if (buf[0] & DP_MST_CAP) {
3838 DRM_DEBUG_KMS("Sink is MST capable\n");
3839 intel_dp->is_mst = true;
3840 } else {
3841 DRM_DEBUG_KMS("Sink is not MST capable\n");
3842 intel_dp->is_mst = false;
3843 }
3844 }
0e32b39c
DA
3845
3846 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3847 return intel_dp->is_mst;
3848}
3849
e5a1cab5 3850static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3851{
082dcc7c
RV
3852 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3853 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3854 u8 buf;
e5a1cab5 3855 int ret = 0;
d2e216d0 3856
082dcc7c
RV
3857 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3858 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3859 ret = -EIO;
3860 goto out;
4373f0f2
PZ
3861 }
3862
082dcc7c 3863 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 3864 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 3865 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3866 ret = -EIO;
3867 goto out;
3868 }
d2e216d0 3869
621d4c76 3870 intel_dp->sink_crc.started = false;
e5a1cab5 3871 out:
082dcc7c 3872 hsw_enable_ips(intel_crtc);
e5a1cab5 3873 return ret;
082dcc7c
RV
3874}
3875
3876static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3877{
3878 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3879 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3880 u8 buf;
e5a1cab5
RV
3881 int ret;
3882
621d4c76 3883 if (intel_dp->sink_crc.started) {
e5a1cab5
RV
3884 ret = intel_dp_sink_crc_stop(intel_dp);
3885 if (ret)
3886 return ret;
3887 }
082dcc7c
RV
3888
3889 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3890 return -EIO;
3891
3892 if (!(buf & DP_TEST_CRC_SUPPORTED))
3893 return -ENOTTY;
3894
621d4c76
RV
3895 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3896
082dcc7c
RV
3897 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3898 return -EIO;
3899
3900 hsw_disable_ips(intel_crtc);
1dda5f93 3901
9d1a1031 3902 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
3903 buf | DP_TEST_SINK_START) < 0) {
3904 hsw_enable_ips(intel_crtc);
3905 return -EIO;
4373f0f2
PZ
3906 }
3907
621d4c76 3908 intel_dp->sink_crc.started = true;
082dcc7c
RV
3909 return 0;
3910}
3911
3912int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3913{
3914 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3915 struct drm_device *dev = dig_port->base.base.dev;
3916 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3917 u8 buf;
621d4c76 3918 int count, ret;
082dcc7c 3919 int attempts = 6;
aabc95dc 3920 bool old_equal_new;
082dcc7c
RV
3921
3922 ret = intel_dp_sink_crc_start(intel_dp);
3923 if (ret)
3924 return ret;
3925
ad9dc91b 3926 do {
621d4c76
RV
3927 intel_wait_for_vblank(dev, intel_crtc->pipe);
3928
1dda5f93 3929 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
3930 DP_TEST_SINK_MISC, &buf) < 0) {
3931 ret = -EIO;
afe0d67e 3932 goto stop;
4373f0f2 3933 }
621d4c76 3934 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 3935
621d4c76
RV
3936 /*
3937 * Count might be reset during the loop. In this case
3938 * last known count needs to be reset as well.
3939 */
3940 if (count == 0)
3941 intel_dp->sink_crc.last_count = 0;
3942
3943 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3944 ret = -EIO;
3945 goto stop;
3946 }
aabc95dc
RV
3947
3948 old_equal_new = (count == intel_dp->sink_crc.last_count &&
3949 !memcmp(intel_dp->sink_crc.last_crc, crc,
3950 6 * sizeof(u8)));
3951
3952 } while (--attempts && (count == 0 || old_equal_new));
621d4c76
RV
3953
3954 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3955 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
ad9dc91b
RV
3956
3957 if (attempts == 0) {
aabc95dc
RV
3958 if (old_equal_new) {
3959 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
3960 } else {
3961 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3962 ret = -ETIMEDOUT;
3963 goto stop;
3964 }
ad9dc91b 3965 }
d2e216d0 3966
afe0d67e 3967stop:
082dcc7c 3968 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 3969 return ret;
d2e216d0
RV
3970}
3971
a60f0e38
JB
3972static bool
3973intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3974{
9d1a1031
JN
3975 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3976 DP_DEVICE_SERVICE_IRQ_VECTOR,
3977 sink_irq_vector, 1) == 1;
a60f0e38
JB
3978}
3979
0e32b39c
DA
3980static bool
3981intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3982{
3983 int ret;
3984
3985 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3986 DP_SINK_COUNT_ESI,
3987 sink_irq_vector, 14);
3988 if (ret != 14)
3989 return false;
3990
3991 return true;
3992}
3993
c5d5ab7a
TP
3994static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3995{
3996 uint8_t test_result = DP_TEST_ACK;
3997 return test_result;
3998}
3999
4000static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4001{
4002 uint8_t test_result = DP_TEST_NAK;
4003 return test_result;
4004}
4005
4006static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4007{
c5d5ab7a 4008 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4009 struct intel_connector *intel_connector = intel_dp->attached_connector;
4010 struct drm_connector *connector = &intel_connector->base;
4011
4012 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4013 connector->edid_corrupt ||
559be30c
TP
4014 intel_dp->aux.i2c_defer_count > 6) {
4015 /* Check EDID read for NACKs, DEFERs and corruption
4016 * (DP CTS 1.2 Core r1.1)
4017 * 4.2.2.4 : Failed EDID read, I2C_NAK
4018 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4019 * 4.2.2.6 : EDID corruption detected
4020 * Use failsafe mode for all cases
4021 */
4022 if (intel_dp->aux.i2c_nack_count > 0 ||
4023 intel_dp->aux.i2c_defer_count > 0)
4024 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4025 intel_dp->aux.i2c_nack_count,
4026 intel_dp->aux.i2c_defer_count);
4027 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4028 } else {
f79b468e
TS
4029 struct edid *block = intel_connector->detect_edid;
4030
4031 /* We have to write the checksum
4032 * of the last block read
4033 */
4034 block += intel_connector->detect_edid->extensions;
4035
559be30c
TP
4036 if (!drm_dp_dpcd_write(&intel_dp->aux,
4037 DP_TEST_EDID_CHECKSUM,
f79b468e 4038 &block->checksum,
5a1cc655 4039 1))
559be30c
TP
4040 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4041
4042 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4043 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4044 }
4045
4046 /* Set test active flag here so userspace doesn't interrupt things */
4047 intel_dp->compliance_test_active = 1;
4048
c5d5ab7a
TP
4049 return test_result;
4050}
4051
4052static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4053{
c5d5ab7a
TP
4054 uint8_t test_result = DP_TEST_NAK;
4055 return test_result;
4056}
4057
4058static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4059{
4060 uint8_t response = DP_TEST_NAK;
4061 uint8_t rxdata = 0;
4062 int status = 0;
4063
559be30c 4064 intel_dp->compliance_test_active = 0;
c5d5ab7a 4065 intel_dp->compliance_test_type = 0;
559be30c
TP
4066 intel_dp->compliance_test_data = 0;
4067
c5d5ab7a
TP
4068 intel_dp->aux.i2c_nack_count = 0;
4069 intel_dp->aux.i2c_defer_count = 0;
4070
4071 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4072 if (status <= 0) {
4073 DRM_DEBUG_KMS("Could not read test request from sink\n");
4074 goto update_status;
4075 }
4076
4077 switch (rxdata) {
4078 case DP_TEST_LINK_TRAINING:
4079 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4080 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4081 response = intel_dp_autotest_link_training(intel_dp);
4082 break;
4083 case DP_TEST_LINK_VIDEO_PATTERN:
4084 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4085 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4086 response = intel_dp_autotest_video_pattern(intel_dp);
4087 break;
4088 case DP_TEST_LINK_EDID_READ:
4089 DRM_DEBUG_KMS("EDID test requested\n");
4090 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4091 response = intel_dp_autotest_edid(intel_dp);
4092 break;
4093 case DP_TEST_LINK_PHY_TEST_PATTERN:
4094 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4095 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4096 response = intel_dp_autotest_phy_pattern(intel_dp);
4097 break;
4098 default:
4099 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4100 break;
4101 }
4102
4103update_status:
4104 status = drm_dp_dpcd_write(&intel_dp->aux,
4105 DP_TEST_RESPONSE,
4106 &response, 1);
4107 if (status <= 0)
4108 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4109}
4110
0e32b39c
DA
4111static int
4112intel_dp_check_mst_status(struct intel_dp *intel_dp)
4113{
4114 bool bret;
4115
4116 if (intel_dp->is_mst) {
4117 u8 esi[16] = { 0 };
4118 int ret = 0;
4119 int retry;
4120 bool handled;
4121 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4122go_again:
4123 if (bret == true) {
4124
4125 /* check link status - esi[10] = 0x200c */
90a6b7b0 4126 if (intel_dp->active_mst_links &&
901c2daf 4127 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4128 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4129 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4130 intel_dp_stop_link_train(intel_dp);
4131 }
4132
6f34cc39 4133 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4134 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4135
4136 if (handled) {
4137 for (retry = 0; retry < 3; retry++) {
4138 int wret;
4139 wret = drm_dp_dpcd_write(&intel_dp->aux,
4140 DP_SINK_COUNT_ESI+1,
4141 &esi[1], 3);
4142 if (wret == 3) {
4143 break;
4144 }
4145 }
4146
4147 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4148 if (bret == true) {
6f34cc39 4149 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4150 goto go_again;
4151 }
4152 } else
4153 ret = 0;
4154
4155 return ret;
4156 } else {
4157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4158 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4159 intel_dp->is_mst = false;
4160 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4161 /* send a hotplug event */
4162 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4163 }
4164 }
4165 return -EINVAL;
4166}
4167
a4fc5ed6
KP
4168/*
4169 * According to DP spec
4170 * 5.1.2:
4171 * 1. Read DPCD
4172 * 2. Configure link according to Receiver Capabilities
4173 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4174 * 4. Check link status on receipt of hot-plug interrupt
4175 */
a5146200 4176static void
ea5b213a 4177intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4178{
5b215bcf 4179 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4180 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4181 u8 sink_irq_vector;
93f62dad 4182 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4183
5b215bcf
DA
4184 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4185
e02f9a06 4186 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4187 return;
4188
1a125d8a
ID
4189 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4190 return;
4191
92fd8fd1 4192 /* Try to read receiver status if the link appears to be up */
93f62dad 4193 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4194 return;
4195 }
4196
92fd8fd1 4197 /* Now read the DPCD to see if it's actually running */
26d61aad 4198 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4199 return;
4200 }
4201
a60f0e38
JB
4202 /* Try to read the source of the interrupt */
4203 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4204 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4205 /* Clear interrupt source */
9d1a1031
JN
4206 drm_dp_dpcd_writeb(&intel_dp->aux,
4207 DP_DEVICE_SERVICE_IRQ_VECTOR,
4208 sink_irq_vector);
a60f0e38
JB
4209
4210 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4211 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4212 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4213 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4214 }
4215
901c2daf 4216 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4217 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4218 intel_encoder->base.name);
33a34e4e 4219 intel_dp_start_link_train(intel_dp);
3ab9c637 4220 intel_dp_stop_link_train(intel_dp);
33a34e4e 4221 }
a4fc5ed6 4222}
a4fc5ed6 4223
caf9ab24 4224/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4225static enum drm_connector_status
26d61aad 4226intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4227{
caf9ab24 4228 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4229 uint8_t type;
4230
4231 if (!intel_dp_get_dpcd(intel_dp))
4232 return connector_status_disconnected;
4233
4234 /* if there's no downstream port, we're done */
4235 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4236 return connector_status_connected;
caf9ab24
AJ
4237
4238 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4239 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4240 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4241 uint8_t reg;
9d1a1031
JN
4242
4243 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4244 &reg, 1) < 0)
caf9ab24 4245 return connector_status_unknown;
9d1a1031 4246
23235177
AJ
4247 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4248 : connector_status_disconnected;
caf9ab24
AJ
4249 }
4250
4251 /* If no HPD, poke DDC gently */
0b99836f 4252 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4253 return connector_status_connected;
caf9ab24
AJ
4254
4255 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4256 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4257 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4258 if (type == DP_DS_PORT_TYPE_VGA ||
4259 type == DP_DS_PORT_TYPE_NON_EDID)
4260 return connector_status_unknown;
4261 } else {
4262 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4263 DP_DWN_STRM_PORT_TYPE_MASK;
4264 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4265 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4266 return connector_status_unknown;
4267 }
caf9ab24
AJ
4268
4269 /* Anything else is out of spec, warn and ignore */
4270 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4271 return connector_status_disconnected;
71ba9000
AJ
4272}
4273
d410b56d
CW
4274static enum drm_connector_status
4275edp_detect(struct intel_dp *intel_dp)
4276{
4277 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4278 enum drm_connector_status status;
4279
4280 status = intel_panel_detect(dev);
4281 if (status == connector_status_unknown)
4282 status = connector_status_connected;
4283
4284 return status;
4285}
4286
b93433cc
JN
4287static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4288 struct intel_digital_port *port)
5eb08b69 4289{
b93433cc 4290 u32 bit;
01cb9ea6 4291
0df53b77
JN
4292 switch (port->port) {
4293 case PORT_A:
4294 return true;
4295 case PORT_B:
4296 bit = SDE_PORTB_HOTPLUG;
4297 break;
4298 case PORT_C:
4299 bit = SDE_PORTC_HOTPLUG;
4300 break;
4301 case PORT_D:
4302 bit = SDE_PORTD_HOTPLUG;
4303 break;
4304 default:
4305 MISSING_CASE(port->port);
4306 return false;
4307 }
4308
4309 return I915_READ(SDEISR) & bit;
4310}
4311
4312static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4313 struct intel_digital_port *port)
4314{
4315 u32 bit;
4316
4317 switch (port->port) {
4318 case PORT_A:
4319 return true;
4320 case PORT_B:
4321 bit = SDE_PORTB_HOTPLUG_CPT;
4322 break;
4323 case PORT_C:
4324 bit = SDE_PORTC_HOTPLUG_CPT;
4325 break;
4326 case PORT_D:
4327 bit = SDE_PORTD_HOTPLUG_CPT;
4328 break;
a78695d3
JN
4329 case PORT_E:
4330 bit = SDE_PORTE_HOTPLUG_SPT;
4331 break;
0df53b77
JN
4332 default:
4333 MISSING_CASE(port->port);
4334 return false;
b93433cc 4335 }
1b469639 4336
b93433cc 4337 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4338}
4339
7e66bcf2 4340static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4341 struct intel_digital_port *port)
a4fc5ed6 4342{
9642c81c 4343 u32 bit;
5eb08b69 4344
9642c81c
JN
4345 switch (port->port) {
4346 case PORT_B:
4347 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4348 break;
4349 case PORT_C:
4350 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4351 break;
4352 case PORT_D:
4353 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4354 break;
4355 default:
4356 MISSING_CASE(port->port);
4357 return false;
4358 }
4359
4360 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4361}
4362
4363static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4364 struct intel_digital_port *port)
4365{
4366 u32 bit;
4367
4368 switch (port->port) {
4369 case PORT_B:
4370 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4371 break;
4372 case PORT_C:
4373 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4374 break;
4375 case PORT_D:
4376 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4377 break;
4378 default:
4379 MISSING_CASE(port->port);
4380 return false;
a4fc5ed6
KP
4381 }
4382
1d245987 4383 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4384}
4385
e464bfde 4386static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4387 struct intel_digital_port *intel_dig_port)
e464bfde 4388{
e2ec35a5
SJ
4389 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4390 enum port port;
e464bfde
JN
4391 u32 bit;
4392
e2ec35a5
SJ
4393 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4394 switch (port) {
e464bfde
JN
4395 case PORT_A:
4396 bit = BXT_DE_PORT_HP_DDIA;
4397 break;
4398 case PORT_B:
4399 bit = BXT_DE_PORT_HP_DDIB;
4400 break;
4401 case PORT_C:
4402 bit = BXT_DE_PORT_HP_DDIC;
4403 break;
4404 default:
e2ec35a5 4405 MISSING_CASE(port);
e464bfde
JN
4406 return false;
4407 }
4408
4409 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4410}
4411
7e66bcf2
JN
4412/*
4413 * intel_digital_port_connected - is the specified port connected?
4414 * @dev_priv: i915 private structure
4415 * @port: the port to test
4416 *
4417 * Return %true if @port is connected, %false otherwise.
4418 */
237ed86c 4419bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4420 struct intel_digital_port *port)
4421{
0df53b77 4422 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4423 return ibx_digital_port_connected(dev_priv, port);
0df53b77
JN
4424 if (HAS_PCH_SPLIT(dev_priv))
4425 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4426 else if (IS_BROXTON(dev_priv))
4427 return bxt_digital_port_connected(dev_priv, port);
9642c81c
JN
4428 else if (IS_VALLEYVIEW(dev_priv))
4429 return vlv_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4430 else
4431 return g4x_digital_port_connected(dev_priv, port);
4432}
4433
b93433cc
JN
4434static enum drm_connector_status
4435ironlake_dp_detect(struct intel_dp *intel_dp)
4436{
4437 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4438 struct drm_i915_private *dev_priv = dev->dev_private;
4439 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4440
7e66bcf2 4441 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
b93433cc
JN
4442 return connector_status_disconnected;
4443
4444 return intel_dp_detect_dpcd(intel_dp);
4445}
4446
2a592bec
DA
4447static enum drm_connector_status
4448g4x_dp_detect(struct intel_dp *intel_dp)
4449{
4450 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4451 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2a592bec
DA
4452
4453 /* Can't disconnect eDP, but you can close the lid... */
4454 if (is_edp(intel_dp)) {
4455 enum drm_connector_status status;
4456
4457 status = intel_panel_detect(dev);
4458 if (status == connector_status_unknown)
4459 status = connector_status_connected;
4460 return status;
4461 }
4462
7e66bcf2 4463 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
a4fc5ed6
KP
4464 return connector_status_disconnected;
4465
26d61aad 4466 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4467}
4468
8c241fef 4469static struct edid *
beb60608 4470intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4471{
beb60608 4472 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4473
9cd300e0
JN
4474 /* use cached edid if we have one */
4475 if (intel_connector->edid) {
9cd300e0
JN
4476 /* invalid edid */
4477 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4478 return NULL;
4479
55e9edeb 4480 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4481 } else
4482 return drm_get_edid(&intel_connector->base,
4483 &intel_dp->aux.ddc);
4484}
8c241fef 4485
beb60608
CW
4486static void
4487intel_dp_set_edid(struct intel_dp *intel_dp)
4488{
4489 struct intel_connector *intel_connector = intel_dp->attached_connector;
4490 struct edid *edid;
8c241fef 4491
beb60608
CW
4492 edid = intel_dp_get_edid(intel_dp);
4493 intel_connector->detect_edid = edid;
4494
4495 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4496 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4497 else
4498 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4499}
4500
beb60608
CW
4501static void
4502intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4503{
beb60608 4504 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4505
beb60608
CW
4506 kfree(intel_connector->detect_edid);
4507 intel_connector->detect_edid = NULL;
9cd300e0 4508
beb60608
CW
4509 intel_dp->has_audio = false;
4510}
d6f24d0f 4511
beb60608
CW
4512static enum intel_display_power_domain
4513intel_dp_power_get(struct intel_dp *dp)
4514{
4515 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4516 enum intel_display_power_domain power_domain;
4517
4518 power_domain = intel_display_port_power_domain(encoder);
4519 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4520
4521 return power_domain;
4522}
d6f24d0f 4523
beb60608
CW
4524static void
4525intel_dp_power_put(struct intel_dp *dp,
4526 enum intel_display_power_domain power_domain)
4527{
4528 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4529 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4530}
4531
a9756bb5
ZW
4532static enum drm_connector_status
4533intel_dp_detect(struct drm_connector *connector, bool force)
4534{
4535 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4536 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4537 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4538 struct drm_device *dev = connector->dev;
a9756bb5 4539 enum drm_connector_status status;
671dedd2 4540 enum intel_display_power_domain power_domain;
0e32b39c 4541 bool ret;
09b1eb13 4542 u8 sink_irq_vector;
a9756bb5 4543
164c8598 4544 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4545 connector->base.id, connector->name);
beb60608 4546 intel_dp_unset_edid(intel_dp);
164c8598 4547
0e32b39c
DA
4548 if (intel_dp->is_mst) {
4549 /* MST devices are disconnected from a monitor POV */
4550 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4551 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4552 return connector_status_disconnected;
0e32b39c
DA
4553 }
4554
beb60608 4555 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4556
d410b56d
CW
4557 /* Can't disconnect eDP, but you can close the lid... */
4558 if (is_edp(intel_dp))
4559 status = edp_detect(intel_dp);
4560 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4561 status = ironlake_dp_detect(intel_dp);
4562 else
4563 status = g4x_dp_detect(intel_dp);
4564 if (status != connector_status_connected)
c8c8fb33 4565 goto out;
a9756bb5 4566
0d198328
AJ
4567 intel_dp_probe_oui(intel_dp);
4568
0e32b39c
DA
4569 ret = intel_dp_probe_mst(intel_dp);
4570 if (ret) {
4571 /* if we are in MST mode then this connector
4572 won't appear connected or have anything with EDID on it */
4573 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4574 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4575 status = connector_status_disconnected;
4576 goto out;
4577 }
4578
beb60608 4579 intel_dp_set_edid(intel_dp);
a9756bb5 4580
d63885da
PZ
4581 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4582 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4583 status = connector_status_connected;
4584
09b1eb13
TP
4585 /* Try to read the source of the interrupt */
4586 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4587 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4588 /* Clear interrupt source */
4589 drm_dp_dpcd_writeb(&intel_dp->aux,
4590 DP_DEVICE_SERVICE_IRQ_VECTOR,
4591 sink_irq_vector);
4592
4593 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4594 intel_dp_handle_test_request(intel_dp);
4595 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4596 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4597 }
4598
c8c8fb33 4599out:
beb60608 4600 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4601 return status;
a4fc5ed6
KP
4602}
4603
beb60608
CW
4604static void
4605intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4606{
df0e9248 4607 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4608 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4609 enum intel_display_power_domain power_domain;
a4fc5ed6 4610
beb60608
CW
4611 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4612 connector->base.id, connector->name);
4613 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4614
beb60608
CW
4615 if (connector->status != connector_status_connected)
4616 return;
671dedd2 4617
beb60608
CW
4618 power_domain = intel_dp_power_get(intel_dp);
4619
4620 intel_dp_set_edid(intel_dp);
4621
4622 intel_dp_power_put(intel_dp, power_domain);
4623
4624 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4625 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4626}
4627
4628static int intel_dp_get_modes(struct drm_connector *connector)
4629{
4630 struct intel_connector *intel_connector = to_intel_connector(connector);
4631 struct edid *edid;
4632
4633 edid = intel_connector->detect_edid;
4634 if (edid) {
4635 int ret = intel_connector_update_modes(connector, edid);
4636 if (ret)
4637 return ret;
4638 }
32f9d658 4639
f8779fda 4640 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4641 if (is_edp(intel_attached_dp(connector)) &&
4642 intel_connector->panel.fixed_mode) {
f8779fda 4643 struct drm_display_mode *mode;
beb60608
CW
4644
4645 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4646 intel_connector->panel.fixed_mode);
f8779fda 4647 if (mode) {
32f9d658
ZW
4648 drm_mode_probed_add(connector, mode);
4649 return 1;
4650 }
4651 }
beb60608 4652
32f9d658 4653 return 0;
a4fc5ed6
KP
4654}
4655
1aad7ac0
CW
4656static bool
4657intel_dp_detect_audio(struct drm_connector *connector)
4658{
1aad7ac0 4659 bool has_audio = false;
beb60608 4660 struct edid *edid;
1aad7ac0 4661
beb60608
CW
4662 edid = to_intel_connector(connector)->detect_edid;
4663 if (edid)
1aad7ac0 4664 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4665
1aad7ac0
CW
4666 return has_audio;
4667}
4668
f684960e
CW
4669static int
4670intel_dp_set_property(struct drm_connector *connector,
4671 struct drm_property *property,
4672 uint64_t val)
4673{
e953fd7b 4674 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4675 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4676 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4677 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4678 int ret;
4679
662595df 4680 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4681 if (ret)
4682 return ret;
4683
3f43c48d 4684 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4685 int i = val;
4686 bool has_audio;
4687
4688 if (i == intel_dp->force_audio)
f684960e
CW
4689 return 0;
4690
1aad7ac0 4691 intel_dp->force_audio = i;
f684960e 4692
c3e5f67b 4693 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4694 has_audio = intel_dp_detect_audio(connector);
4695 else
c3e5f67b 4696 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4697
4698 if (has_audio == intel_dp->has_audio)
f684960e
CW
4699 return 0;
4700
1aad7ac0 4701 intel_dp->has_audio = has_audio;
f684960e
CW
4702 goto done;
4703 }
4704
e953fd7b 4705 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4706 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4707 bool old_range = intel_dp->limited_color_range;
ae4edb80 4708
55bc60db
VS
4709 switch (val) {
4710 case INTEL_BROADCAST_RGB_AUTO:
4711 intel_dp->color_range_auto = true;
4712 break;
4713 case INTEL_BROADCAST_RGB_FULL:
4714 intel_dp->color_range_auto = false;
0f2a2a75 4715 intel_dp->limited_color_range = false;
55bc60db
VS
4716 break;
4717 case INTEL_BROADCAST_RGB_LIMITED:
4718 intel_dp->color_range_auto = false;
0f2a2a75 4719 intel_dp->limited_color_range = true;
55bc60db
VS
4720 break;
4721 default:
4722 return -EINVAL;
4723 }
ae4edb80
DV
4724
4725 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4726 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4727 return 0;
4728
e953fd7b
CW
4729 goto done;
4730 }
4731
53b41837
YN
4732 if (is_edp(intel_dp) &&
4733 property == connector->dev->mode_config.scaling_mode_property) {
4734 if (val == DRM_MODE_SCALE_NONE) {
4735 DRM_DEBUG_KMS("no scaling not supported\n");
4736 return -EINVAL;
4737 }
4738
4739 if (intel_connector->panel.fitting_mode == val) {
4740 /* the eDP scaling property is not changed */
4741 return 0;
4742 }
4743 intel_connector->panel.fitting_mode = val;
4744
4745 goto done;
4746 }
4747
f684960e
CW
4748 return -EINVAL;
4749
4750done:
c0c36b94
CW
4751 if (intel_encoder->base.crtc)
4752 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4753
4754 return 0;
4755}
4756
a4fc5ed6 4757static void
73845adf 4758intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4759{
1d508706 4760 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4761
10e972d3 4762 kfree(intel_connector->detect_edid);
beb60608 4763
9cd300e0
JN
4764 if (!IS_ERR_OR_NULL(intel_connector->edid))
4765 kfree(intel_connector->edid);
4766
acd8db10
PZ
4767 /* Can't call is_edp() since the encoder may have been destroyed
4768 * already. */
4769 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4770 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4771
a4fc5ed6 4772 drm_connector_cleanup(connector);
55f78c43 4773 kfree(connector);
a4fc5ed6
KP
4774}
4775
00c09d70 4776void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4777{
da63a9f2
PZ
4778 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4779 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4780
4f71d0cb 4781 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4782 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4783 if (is_edp(intel_dp)) {
4784 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4785 /*
4786 * vdd might still be enabled do to the delayed vdd off.
4787 * Make sure vdd is actually turned off here.
4788 */
773538e8 4789 pps_lock(intel_dp);
4be73780 4790 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4791 pps_unlock(intel_dp);
4792
01527b31
CT
4793 if (intel_dp->edp_notifier.notifier_call) {
4794 unregister_reboot_notifier(&intel_dp->edp_notifier);
4795 intel_dp->edp_notifier.notifier_call = NULL;
4796 }
bd943159 4797 }
c8bd0e49 4798 drm_encoder_cleanup(encoder);
da63a9f2 4799 kfree(intel_dig_port);
24d05927
DV
4800}
4801
07f9cd0b
ID
4802static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4803{
4804 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4805
4806 if (!is_edp(intel_dp))
4807 return;
4808
951468f3
VS
4809 /*
4810 * vdd might still be enabled do to the delayed vdd off.
4811 * Make sure vdd is actually turned off here.
4812 */
afa4e53a 4813 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4814 pps_lock(intel_dp);
07f9cd0b 4815 edp_panel_vdd_off_sync(intel_dp);
773538e8 4816 pps_unlock(intel_dp);
07f9cd0b
ID
4817}
4818
49e6bc51
VS
4819static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4820{
4821 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4822 struct drm_device *dev = intel_dig_port->base.base.dev;
4823 struct drm_i915_private *dev_priv = dev->dev_private;
4824 enum intel_display_power_domain power_domain;
4825
4826 lockdep_assert_held(&dev_priv->pps_mutex);
4827
4828 if (!edp_have_panel_vdd(intel_dp))
4829 return;
4830
4831 /*
4832 * The VDD bit needs a power domain reference, so if the bit is
4833 * already enabled when we boot or resume, grab this reference and
4834 * schedule a vdd off, so we don't hold on to the reference
4835 * indefinitely.
4836 */
4837 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4838 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4839 intel_display_power_get(dev_priv, power_domain);
4840
4841 edp_panel_vdd_schedule_off(intel_dp);
4842}
4843
6d93c0c4
ID
4844static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4845{
49e6bc51
VS
4846 struct intel_dp *intel_dp;
4847
4848 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4849 return;
4850
4851 intel_dp = enc_to_intel_dp(encoder);
4852
4853 pps_lock(intel_dp);
4854
4855 /*
4856 * Read out the current power sequencer assignment,
4857 * in case the BIOS did something with it.
4858 */
4859 if (IS_VALLEYVIEW(encoder->dev))
4860 vlv_initial_power_sequencer_setup(intel_dp);
4861
4862 intel_edp_panel_vdd_sanitize(intel_dp);
4863
4864 pps_unlock(intel_dp);
6d93c0c4
ID
4865}
4866
a4fc5ed6 4867static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4868 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4869 .detect = intel_dp_detect,
beb60608 4870 .force = intel_dp_force,
a4fc5ed6 4871 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4872 .set_property = intel_dp_set_property,
2545e4a6 4873 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4874 .destroy = intel_dp_connector_destroy,
c6f95f27 4875 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4876 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4877};
4878
4879static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4880 .get_modes = intel_dp_get_modes,
4881 .mode_valid = intel_dp_mode_valid,
df0e9248 4882 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4883};
4884
a4fc5ed6 4885static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4886 .reset = intel_dp_encoder_reset,
24d05927 4887 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4888};
4889
b2c5c181 4890enum irqreturn
13cf5504
DA
4891intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4892{
4893 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4894 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4895 struct drm_device *dev = intel_dig_port->base.base.dev;
4896 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4897 enum intel_display_power_domain power_domain;
b2c5c181 4898 enum irqreturn ret = IRQ_NONE;
1c767b33 4899
0e32b39c
DA
4900 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4901 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4902
7a7f84cc
VS
4903 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4904 /*
4905 * vdd off can generate a long pulse on eDP which
4906 * would require vdd on to handle it, and thus we
4907 * would end up in an endless cycle of
4908 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4909 */
4910 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4911 port_name(intel_dig_port->port));
a8b3d52f 4912 return IRQ_HANDLED;
7a7f84cc
VS
4913 }
4914
26fbb774
VS
4915 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4916 port_name(intel_dig_port->port),
0e32b39c 4917 long_hpd ? "long" : "short");
13cf5504 4918
1c767b33
ID
4919 power_domain = intel_display_port_power_domain(intel_encoder);
4920 intel_display_power_get(dev_priv, power_domain);
4921
0e32b39c 4922 if (long_hpd) {
5fa836a9
MK
4923 /* indicate that we need to restart link training */
4924 intel_dp->train_set_valid = false;
2a592bec 4925
7e66bcf2
JN
4926 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4927 goto mst_fail;
0e32b39c
DA
4928
4929 if (!intel_dp_get_dpcd(intel_dp)) {
4930 goto mst_fail;
4931 }
4932
4933 intel_dp_probe_oui(intel_dp);
4934
d14e7b6d
VS
4935 if (!intel_dp_probe_mst(intel_dp)) {
4936 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4937 intel_dp_check_link_status(intel_dp);
4938 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 4939 goto mst_fail;
d14e7b6d 4940 }
0e32b39c
DA
4941 } else {
4942 if (intel_dp->is_mst) {
1c767b33 4943 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4944 goto mst_fail;
4945 }
4946
4947 if (!intel_dp->is_mst) {
5b215bcf 4948 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4949 intel_dp_check_link_status(intel_dp);
5b215bcf 4950 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4951 }
4952 }
b2c5c181
DV
4953
4954 ret = IRQ_HANDLED;
4955
1c767b33 4956 goto put_power;
0e32b39c
DA
4957mst_fail:
4958 /* if we were in MST mode, and device is not there get out of MST mode */
4959 if (intel_dp->is_mst) {
4960 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4961 intel_dp->is_mst = false;
4962 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4963 }
1c767b33
ID
4964put_power:
4965 intel_display_power_put(dev_priv, power_domain);
4966
4967 return ret;
13cf5504
DA
4968}
4969
e3421a18
ZW
4970/* Return which DP Port should be selected for Transcoder DP control */
4971int
0206e353 4972intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4973{
4974 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4975 struct intel_encoder *intel_encoder;
4976 struct intel_dp *intel_dp;
e3421a18 4977
fa90ecef
PZ
4978 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4979 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4980
fa90ecef
PZ
4981 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4982 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4983 return intel_dp->output_reg;
e3421a18 4984 }
ea5b213a 4985
e3421a18
ZW
4986 return -1;
4987}
4988
477ec328 4989/* check the VBT to see whether the eDP is on another port */
5d8a7752 4990bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4991{
4992 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4993 union child_device_config *p_child;
36e83a18 4994 int i;
5d8a7752 4995 static const short port_mapping[] = {
477ec328
RV
4996 [PORT_B] = DVO_PORT_DPB,
4997 [PORT_C] = DVO_PORT_DPC,
4998 [PORT_D] = DVO_PORT_DPD,
4999 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5000 };
36e83a18 5001
53ce81a7
VS
5002 /*
5003 * eDP not supported on g4x. so bail out early just
5004 * for a bit extra safety in case the VBT is bonkers.
5005 */
5006 if (INTEL_INFO(dev)->gen < 5)
5007 return false;
5008
3b32a35b
VS
5009 if (port == PORT_A)
5010 return true;
5011
41aa3448 5012 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5013 return false;
5014
41aa3448
RV
5015 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5016 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5017
5d8a7752 5018 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5019 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5020 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5021 return true;
5022 }
5023 return false;
5024}
5025
0e32b39c 5026void
f684960e
CW
5027intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5028{
53b41837
YN
5029 struct intel_connector *intel_connector = to_intel_connector(connector);
5030
3f43c48d 5031 intel_attach_force_audio_property(connector);
e953fd7b 5032 intel_attach_broadcast_rgb_property(connector);
55bc60db 5033 intel_dp->color_range_auto = true;
53b41837
YN
5034
5035 if (is_edp(intel_dp)) {
5036 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5037 drm_object_attach_property(
5038 &connector->base,
53b41837 5039 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5040 DRM_MODE_SCALE_ASPECT);
5041 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5042 }
f684960e
CW
5043}
5044
dada1a9f
ID
5045static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5046{
5047 intel_dp->last_power_cycle = jiffies;
5048 intel_dp->last_power_on = jiffies;
5049 intel_dp->last_backlight_off = jiffies;
5050}
5051
67a54566
DV
5052static void
5053intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5054 struct intel_dp *intel_dp)
67a54566
DV
5055{
5056 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5057 struct edp_power_seq cur, vbt, spec,
5058 *final = &intel_dp->pps_delays;
b0a08bec
VK
5059 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5060 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5061
e39b999a
VS
5062 lockdep_assert_held(&dev_priv->pps_mutex);
5063
81ddbc69
VS
5064 /* already initialized? */
5065 if (final->t11_t12 != 0)
5066 return;
5067
b0a08bec
VK
5068 if (IS_BROXTON(dev)) {
5069 /*
5070 * TODO: BXT has 2 sets of PPS registers.
5071 * Correct Register for Broxton need to be identified
5072 * using VBT. hardcoding for now
5073 */
5074 pp_ctrl_reg = BXT_PP_CONTROL(0);
5075 pp_on_reg = BXT_PP_ON_DELAYS(0);
5076 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5077 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5078 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5079 pp_on_reg = PCH_PP_ON_DELAYS;
5080 pp_off_reg = PCH_PP_OFF_DELAYS;
5081 pp_div_reg = PCH_PP_DIVISOR;
5082 } else {
bf13e81b
JN
5083 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5084
5085 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5086 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5087 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5088 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5089 }
67a54566
DV
5090
5091 /* Workaround: Need to write PP_CONTROL with the unlock key as
5092 * the very first thing. */
b0a08bec 5093 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5094
453c5420
JB
5095 pp_on = I915_READ(pp_on_reg);
5096 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5097 if (!IS_BROXTON(dev)) {
5098 I915_WRITE(pp_ctrl_reg, pp_ctl);
5099 pp_div = I915_READ(pp_div_reg);
5100 }
67a54566
DV
5101
5102 /* Pull timing values out of registers */
5103 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5104 PANEL_POWER_UP_DELAY_SHIFT;
5105
5106 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5107 PANEL_LIGHT_ON_DELAY_SHIFT;
5108
5109 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5110 PANEL_LIGHT_OFF_DELAY_SHIFT;
5111
5112 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5113 PANEL_POWER_DOWN_DELAY_SHIFT;
5114
b0a08bec
VK
5115 if (IS_BROXTON(dev)) {
5116 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5117 BXT_POWER_CYCLE_DELAY_SHIFT;
5118 if (tmp > 0)
5119 cur.t11_t12 = (tmp - 1) * 1000;
5120 else
5121 cur.t11_t12 = 0;
5122 } else {
5123 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5124 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5125 }
67a54566
DV
5126
5127 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5128 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5129
41aa3448 5130 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5131
5132 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5133 * our hw here, which are all in 100usec. */
5134 spec.t1_t3 = 210 * 10;
5135 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5136 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5137 spec.t10 = 500 * 10;
5138 /* This one is special and actually in units of 100ms, but zero
5139 * based in the hw (so we need to add 100 ms). But the sw vbt
5140 * table multiplies it with 1000 to make it in units of 100usec,
5141 * too. */
5142 spec.t11_t12 = (510 + 100) * 10;
5143
5144 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5145 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5146
5147 /* Use the max of the register settings and vbt. If both are
5148 * unset, fall back to the spec limits. */
36b5f425 5149#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5150 spec.field : \
5151 max(cur.field, vbt.field))
5152 assign_final(t1_t3);
5153 assign_final(t8);
5154 assign_final(t9);
5155 assign_final(t10);
5156 assign_final(t11_t12);
5157#undef assign_final
5158
36b5f425 5159#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5160 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5161 intel_dp->backlight_on_delay = get_delay(t8);
5162 intel_dp->backlight_off_delay = get_delay(t9);
5163 intel_dp->panel_power_down_delay = get_delay(t10);
5164 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5165#undef get_delay
5166
f30d26e4
JN
5167 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5168 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5169 intel_dp->panel_power_cycle_delay);
5170
5171 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5172 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5173}
5174
5175static void
5176intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5177 struct intel_dp *intel_dp)
f30d26e4
JN
5178{
5179 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5180 u32 pp_on, pp_off, pp_div, port_sel = 0;
5181 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5182 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5183 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5184 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5185
e39b999a 5186 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5187
b0a08bec
VK
5188 if (IS_BROXTON(dev)) {
5189 /*
5190 * TODO: BXT has 2 sets of PPS registers.
5191 * Correct Register for Broxton need to be identified
5192 * using VBT. hardcoding for now
5193 */
5194 pp_ctrl_reg = BXT_PP_CONTROL(0);
5195 pp_on_reg = BXT_PP_ON_DELAYS(0);
5196 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5197
5198 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5199 pp_on_reg = PCH_PP_ON_DELAYS;
5200 pp_off_reg = PCH_PP_OFF_DELAYS;
5201 pp_div_reg = PCH_PP_DIVISOR;
5202 } else {
bf13e81b
JN
5203 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5204
5205 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5206 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5207 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5208 }
5209
b2f19d1a
PZ
5210 /*
5211 * And finally store the new values in the power sequencer. The
5212 * backlight delays are set to 1 because we do manual waits on them. For
5213 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5214 * we'll end up waiting for the backlight off delay twice: once when we
5215 * do the manual sleep, and once when we disable the panel and wait for
5216 * the PP_STATUS bit to become zero.
5217 */
f30d26e4 5218 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5219 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5220 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5221 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5222 /* Compute the divisor for the pp clock, simply match the Bspec
5223 * formula. */
b0a08bec
VK
5224 if (IS_BROXTON(dev)) {
5225 pp_div = I915_READ(pp_ctrl_reg);
5226 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5227 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5228 << BXT_POWER_CYCLE_DELAY_SHIFT);
5229 } else {
5230 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5231 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5232 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5233 }
67a54566
DV
5234
5235 /* Haswell doesn't have any port selection bits for the panel
5236 * power sequencer any more. */
bc7d38a4 5237 if (IS_VALLEYVIEW(dev)) {
ad933b56 5238 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5239 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5240 if (port == PORT_A)
a24c144c 5241 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5242 else
a24c144c 5243 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5244 }
5245
453c5420
JB
5246 pp_on |= port_sel;
5247
5248 I915_WRITE(pp_on_reg, pp_on);
5249 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5250 if (IS_BROXTON(dev))
5251 I915_WRITE(pp_ctrl_reg, pp_div);
5252 else
5253 I915_WRITE(pp_div_reg, pp_div);
67a54566 5254
67a54566 5255 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5256 I915_READ(pp_on_reg),
5257 I915_READ(pp_off_reg),
b0a08bec
VK
5258 IS_BROXTON(dev) ?
5259 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5260 I915_READ(pp_div_reg));
f684960e
CW
5261}
5262
b33a2815
VK
5263/**
5264 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5265 * @dev: DRM device
5266 * @refresh_rate: RR to be programmed
5267 *
5268 * This function gets called when refresh rate (RR) has to be changed from
5269 * one frequency to another. Switches can be between high and low RR
5270 * supported by the panel or to any other RR based on media playback (in
5271 * this case, RR value needs to be passed from user space).
5272 *
5273 * The caller of this function needs to take a lock on dev_priv->drrs.
5274 */
96178eeb 5275static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5276{
5277 struct drm_i915_private *dev_priv = dev->dev_private;
5278 struct intel_encoder *encoder;
96178eeb
VK
5279 struct intel_digital_port *dig_port = NULL;
5280 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5281 struct intel_crtc_state *config = NULL;
439d7ac0 5282 struct intel_crtc *intel_crtc = NULL;
96178eeb 5283 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5284
5285 if (refresh_rate <= 0) {
5286 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5287 return;
5288 }
5289
96178eeb
VK
5290 if (intel_dp == NULL) {
5291 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5292 return;
5293 }
5294
1fcc9d1c 5295 /*
e4d59f6b
RV
5296 * FIXME: This needs proper synchronization with psr state for some
5297 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5298 */
439d7ac0 5299
96178eeb
VK
5300 dig_port = dp_to_dig_port(intel_dp);
5301 encoder = &dig_port->base;
723f9aab 5302 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5303
5304 if (!intel_crtc) {
5305 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5306 return;
5307 }
5308
6e3c9717 5309 config = intel_crtc->config;
439d7ac0 5310
96178eeb 5311 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5312 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5313 return;
5314 }
5315
96178eeb
VK
5316 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5317 refresh_rate)
439d7ac0
PB
5318 index = DRRS_LOW_RR;
5319
96178eeb 5320 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5321 DRM_DEBUG_KMS(
5322 "DRRS requested for previously set RR...ignoring\n");
5323 return;
5324 }
5325
5326 if (!intel_crtc->active) {
5327 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5328 return;
5329 }
5330
44395bfe 5331 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5332 switch (index) {
5333 case DRRS_HIGH_RR:
5334 intel_dp_set_m_n(intel_crtc, M1_N1);
5335 break;
5336 case DRRS_LOW_RR:
5337 intel_dp_set_m_n(intel_crtc, M2_N2);
5338 break;
5339 case DRRS_MAX_RR:
5340 default:
5341 DRM_ERROR("Unsupported refreshrate type\n");
5342 }
5343 } else if (INTEL_INFO(dev)->gen > 6) {
649636ef
VS
5344 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5345 u32 val;
a4c30b1d 5346
649636ef 5347 val = I915_READ(reg);
439d7ac0 5348 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5349 if (IS_VALLEYVIEW(dev))
5350 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5351 else
5352 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5353 } else {
6fa7aec1
VK
5354 if (IS_VALLEYVIEW(dev))
5355 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5356 else
5357 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5358 }
5359 I915_WRITE(reg, val);
5360 }
5361
4e9ac947
VK
5362 dev_priv->drrs.refresh_rate_type = index;
5363
5364 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5365}
5366
b33a2815
VK
5367/**
5368 * intel_edp_drrs_enable - init drrs struct if supported
5369 * @intel_dp: DP struct
5370 *
5371 * Initializes frontbuffer_bits and drrs.dp
5372 */
c395578e
VK
5373void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5374{
5375 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5376 struct drm_i915_private *dev_priv = dev->dev_private;
5377 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5378 struct drm_crtc *crtc = dig_port->base.base.crtc;
5379 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5380
5381 if (!intel_crtc->config->has_drrs) {
5382 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5383 return;
5384 }
5385
5386 mutex_lock(&dev_priv->drrs.mutex);
5387 if (WARN_ON(dev_priv->drrs.dp)) {
5388 DRM_ERROR("DRRS already enabled\n");
5389 goto unlock;
5390 }
5391
5392 dev_priv->drrs.busy_frontbuffer_bits = 0;
5393
5394 dev_priv->drrs.dp = intel_dp;
5395
5396unlock:
5397 mutex_unlock(&dev_priv->drrs.mutex);
5398}
5399
b33a2815
VK
5400/**
5401 * intel_edp_drrs_disable - Disable DRRS
5402 * @intel_dp: DP struct
5403 *
5404 */
c395578e
VK
5405void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5406{
5407 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5408 struct drm_i915_private *dev_priv = dev->dev_private;
5409 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5410 struct drm_crtc *crtc = dig_port->base.base.crtc;
5411 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5412
5413 if (!intel_crtc->config->has_drrs)
5414 return;
5415
5416 mutex_lock(&dev_priv->drrs.mutex);
5417 if (!dev_priv->drrs.dp) {
5418 mutex_unlock(&dev_priv->drrs.mutex);
5419 return;
5420 }
5421
5422 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5423 intel_dp_set_drrs_state(dev_priv->dev,
5424 intel_dp->attached_connector->panel.
5425 fixed_mode->vrefresh);
5426
5427 dev_priv->drrs.dp = NULL;
5428 mutex_unlock(&dev_priv->drrs.mutex);
5429
5430 cancel_delayed_work_sync(&dev_priv->drrs.work);
5431}
5432
4e9ac947
VK
5433static void intel_edp_drrs_downclock_work(struct work_struct *work)
5434{
5435 struct drm_i915_private *dev_priv =
5436 container_of(work, typeof(*dev_priv), drrs.work.work);
5437 struct intel_dp *intel_dp;
5438
5439 mutex_lock(&dev_priv->drrs.mutex);
5440
5441 intel_dp = dev_priv->drrs.dp;
5442
5443 if (!intel_dp)
5444 goto unlock;
5445
439d7ac0 5446 /*
4e9ac947
VK
5447 * The delayed work can race with an invalidate hence we need to
5448 * recheck.
439d7ac0
PB
5449 */
5450
4e9ac947
VK
5451 if (dev_priv->drrs.busy_frontbuffer_bits)
5452 goto unlock;
439d7ac0 5453
4e9ac947
VK
5454 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5455 intel_dp_set_drrs_state(dev_priv->dev,
5456 intel_dp->attached_connector->panel.
5457 downclock_mode->vrefresh);
439d7ac0 5458
4e9ac947 5459unlock:
4e9ac947 5460 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5461}
5462
b33a2815 5463/**
0ddfd203 5464 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5465 * @dev: DRM device
5466 * @frontbuffer_bits: frontbuffer plane tracking bits
5467 *
0ddfd203
R
5468 * This function gets called everytime rendering on the given planes start.
5469 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5470 *
5471 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5472 */
a93fad0f
VK
5473void intel_edp_drrs_invalidate(struct drm_device *dev,
5474 unsigned frontbuffer_bits)
5475{
5476 struct drm_i915_private *dev_priv = dev->dev_private;
5477 struct drm_crtc *crtc;
5478 enum pipe pipe;
5479
9da7d693 5480 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5481 return;
5482
88f933a8 5483 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5484
a93fad0f 5485 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5486 if (!dev_priv->drrs.dp) {
5487 mutex_unlock(&dev_priv->drrs.mutex);
5488 return;
5489 }
5490
a93fad0f
VK
5491 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5492 pipe = to_intel_crtc(crtc)->pipe;
5493
c1d038c6
DV
5494 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5495 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5496
0ddfd203 5497 /* invalidate means busy screen hence upclock */
c1d038c6 5498 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5499 intel_dp_set_drrs_state(dev_priv->dev,
5500 dev_priv->drrs.dp->attached_connector->panel.
5501 fixed_mode->vrefresh);
a93fad0f 5502
a93fad0f
VK
5503 mutex_unlock(&dev_priv->drrs.mutex);
5504}
5505
b33a2815 5506/**
0ddfd203 5507 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5508 * @dev: DRM device
5509 * @frontbuffer_bits: frontbuffer plane tracking bits
5510 *
0ddfd203
R
5511 * This function gets called every time rendering on the given planes has
5512 * completed or flip on a crtc is completed. So DRRS should be upclocked
5513 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5514 * if no other planes are dirty.
b33a2815
VK
5515 *
5516 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5517 */
a93fad0f
VK
5518void intel_edp_drrs_flush(struct drm_device *dev,
5519 unsigned frontbuffer_bits)
5520{
5521 struct drm_i915_private *dev_priv = dev->dev_private;
5522 struct drm_crtc *crtc;
5523 enum pipe pipe;
5524
9da7d693 5525 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5526 return;
5527
88f933a8 5528 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5529
a93fad0f 5530 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5531 if (!dev_priv->drrs.dp) {
5532 mutex_unlock(&dev_priv->drrs.mutex);
5533 return;
5534 }
5535
a93fad0f
VK
5536 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5537 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5538
5539 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5540 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5541
0ddfd203 5542 /* flush means busy screen hence upclock */
c1d038c6 5543 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5544 intel_dp_set_drrs_state(dev_priv->dev,
5545 dev_priv->drrs.dp->attached_connector->panel.
5546 fixed_mode->vrefresh);
5547
5548 /*
5549 * flush also means no more activity hence schedule downclock, if all
5550 * other fbs are quiescent too
5551 */
5552 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5553 schedule_delayed_work(&dev_priv->drrs.work,
5554 msecs_to_jiffies(1000));
5555 mutex_unlock(&dev_priv->drrs.mutex);
5556}
5557
b33a2815
VK
5558/**
5559 * DOC: Display Refresh Rate Switching (DRRS)
5560 *
5561 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5562 * which enables swtching between low and high refresh rates,
5563 * dynamically, based on the usage scenario. This feature is applicable
5564 * for internal panels.
5565 *
5566 * Indication that the panel supports DRRS is given by the panel EDID, which
5567 * would list multiple refresh rates for one resolution.
5568 *
5569 * DRRS is of 2 types - static and seamless.
5570 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5571 * (may appear as a blink on screen) and is used in dock-undock scenario.
5572 * Seamless DRRS involves changing RR without any visual effect to the user
5573 * and can be used during normal system usage. This is done by programming
5574 * certain registers.
5575 *
5576 * Support for static/seamless DRRS may be indicated in the VBT based on
5577 * inputs from the panel spec.
5578 *
5579 * DRRS saves power by switching to low RR based on usage scenarios.
5580 *
5581 * eDP DRRS:-
5582 * The implementation is based on frontbuffer tracking implementation.
5583 * When there is a disturbance on the screen triggered by user activity or a
5584 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5585 * When there is no movement on screen, after a timeout of 1 second, a switch
5586 * to low RR is made.
5587 * For integration with frontbuffer tracking code,
5588 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5589 *
5590 * DRRS can be further extended to support other internal panels and also
5591 * the scenario of video playback wherein RR is set based on the rate
5592 * requested by userspace.
5593 */
5594
5595/**
5596 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5597 * @intel_connector: eDP connector
5598 * @fixed_mode: preferred mode of panel
5599 *
5600 * This function is called only once at driver load to initialize basic
5601 * DRRS stuff.
5602 *
5603 * Returns:
5604 * Downclock mode if panel supports it, else return NULL.
5605 * DRRS support is determined by the presence of downclock mode (apart
5606 * from VBT setting).
5607 */
4f9db5b5 5608static struct drm_display_mode *
96178eeb
VK
5609intel_dp_drrs_init(struct intel_connector *intel_connector,
5610 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5611{
5612 struct drm_connector *connector = &intel_connector->base;
96178eeb 5613 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5614 struct drm_i915_private *dev_priv = dev->dev_private;
5615 struct drm_display_mode *downclock_mode = NULL;
5616
9da7d693
DV
5617 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5618 mutex_init(&dev_priv->drrs.mutex);
5619
4f9db5b5
PB
5620 if (INTEL_INFO(dev)->gen <= 6) {
5621 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5622 return NULL;
5623 }
5624
5625 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5626 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5627 return NULL;
5628 }
5629
5630 downclock_mode = intel_find_panel_downclock
5631 (dev, fixed_mode, connector);
5632
5633 if (!downclock_mode) {
a1d26342 5634 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5635 return NULL;
5636 }
5637
96178eeb 5638 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5639
96178eeb 5640 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5641 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5642 return downclock_mode;
5643}
5644
ed92f0b2 5645static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5646 struct intel_connector *intel_connector)
ed92f0b2
PZ
5647{
5648 struct drm_connector *connector = &intel_connector->base;
5649 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5650 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5651 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5652 struct drm_i915_private *dev_priv = dev->dev_private;
5653 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5654 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5655 bool has_dpcd;
5656 struct drm_display_mode *scan;
5657 struct edid *edid;
6517d273 5658 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5659
5660 if (!is_edp(intel_dp))
5661 return true;
5662
49e6bc51
VS
5663 pps_lock(intel_dp);
5664 intel_edp_panel_vdd_sanitize(intel_dp);
5665 pps_unlock(intel_dp);
63635217 5666
ed92f0b2 5667 /* Cache DPCD and EDID for edp. */
ed92f0b2 5668 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5669
5670 if (has_dpcd) {
5671 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5672 dev_priv->no_aux_handshake =
5673 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5674 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5675 } else {
5676 /* if this fails, presume the device is a ghost */
5677 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5678 return false;
5679 }
5680
5681 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5682 pps_lock(intel_dp);
36b5f425 5683 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5684 pps_unlock(intel_dp);
ed92f0b2 5685
060c8778 5686 mutex_lock(&dev->mode_config.mutex);
0b99836f 5687 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5688 if (edid) {
5689 if (drm_add_edid_modes(connector, edid)) {
5690 drm_mode_connector_update_edid_property(connector,
5691 edid);
5692 drm_edid_to_eld(connector, edid);
5693 } else {
5694 kfree(edid);
5695 edid = ERR_PTR(-EINVAL);
5696 }
5697 } else {
5698 edid = ERR_PTR(-ENOENT);
5699 }
5700 intel_connector->edid = edid;
5701
5702 /* prefer fixed mode from EDID if available */
5703 list_for_each_entry(scan, &connector->probed_modes, head) {
5704 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5705 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5706 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5707 intel_connector, fixed_mode);
ed92f0b2
PZ
5708 break;
5709 }
5710 }
5711
5712 /* fallback to VBT if available for eDP */
5713 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5714 fixed_mode = drm_mode_duplicate(dev,
5715 dev_priv->vbt.lfp_lvds_vbt_mode);
5716 if (fixed_mode)
5717 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5718 }
060c8778 5719 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5720
01527b31
CT
5721 if (IS_VALLEYVIEW(dev)) {
5722 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5723 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5724
5725 /*
5726 * Figure out the current pipe for the initial backlight setup.
5727 * If the current pipe isn't valid, try the PPS pipe, and if that
5728 * fails just assume pipe A.
5729 */
5730 if (IS_CHERRYVIEW(dev))
5731 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5732 else
5733 pipe = PORT_TO_PIPE(intel_dp->DP);
5734
5735 if (pipe != PIPE_A && pipe != PIPE_B)
5736 pipe = intel_dp->pps_pipe;
5737
5738 if (pipe != PIPE_A && pipe != PIPE_B)
5739 pipe = PIPE_A;
5740
5741 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5742 pipe_name(pipe));
01527b31
CT
5743 }
5744
4f9db5b5 5745 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5746 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5747 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5748
5749 return true;
5750}
5751
16c25533 5752bool
f0fec3f2
PZ
5753intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5754 struct intel_connector *intel_connector)
a4fc5ed6 5755{
f0fec3f2
PZ
5756 struct drm_connector *connector = &intel_connector->base;
5757 struct intel_dp *intel_dp = &intel_dig_port->dp;
5758 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5759 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5760 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5761 enum port port = intel_dig_port->port;
0b99836f 5762 int type;
a4fc5ed6 5763
a4a5d2f8
VS
5764 intel_dp->pps_pipe = INVALID_PIPE;
5765
ec5b01dd 5766 /* intel_dp vfuncs */
b6b5e383
DL
5767 if (INTEL_INFO(dev)->gen >= 9)
5768 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5769 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5770 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5771 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5772 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5773 else if (HAS_PCH_SPLIT(dev))
5774 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5775 else
5776 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5777
b9ca5fad
DL
5778 if (INTEL_INFO(dev)->gen >= 9)
5779 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5780 else
5781 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5782
ad64217b
ACO
5783 if (HAS_DDI(dev))
5784 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5785
0767935e
DV
5786 /* Preserve the current hw state. */
5787 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5788 intel_dp->attached_connector = intel_connector;
3d3dc149 5789
3b32a35b 5790 if (intel_dp_is_edp(dev, port))
b329530c 5791 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5792 else
5793 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5794
f7d24902
ID
5795 /*
5796 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5797 * for DP the encoder type can be set by the caller to
5798 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5799 */
5800 if (type == DRM_MODE_CONNECTOR_eDP)
5801 intel_encoder->type = INTEL_OUTPUT_EDP;
5802
c17ed5b5
VS
5803 /* eDP only on port B and/or C on vlv/chv */
5804 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5805 port != PORT_B && port != PORT_C))
5806 return false;
5807
e7281eab
ID
5808 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5809 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5810 port_name(port));
5811
b329530c 5812 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5813 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5814
a4fc5ed6
KP
5815 connector->interlace_allowed = true;
5816 connector->doublescan_allowed = 0;
5817
f0fec3f2 5818 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5819 edp_panel_vdd_work);
a4fc5ed6 5820
df0e9248 5821 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5822 drm_connector_register(connector);
a4fc5ed6 5823
affa9354 5824 if (HAS_DDI(dev))
bcbc889b
PZ
5825 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5826 else
5827 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5828 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5829
0b99836f 5830 /* Set up the hotplug pin. */
ab9d7c30
PZ
5831 switch (port) {
5832 case PORT_A:
1d843f9d 5833 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5834 break;
5835 case PORT_B:
1d843f9d 5836 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5837 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5838 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5839 break;
5840 case PORT_C:
1d843f9d 5841 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5842 break;
5843 case PORT_D:
1d843f9d 5844 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5845 break;
26951caf
XZ
5846 case PORT_E:
5847 intel_encoder->hpd_pin = HPD_PORT_E;
5848 break;
ab9d7c30 5849 default:
ad1c0b19 5850 BUG();
5eb08b69
ZW
5851 }
5852
dada1a9f 5853 if (is_edp(intel_dp)) {
773538e8 5854 pps_lock(intel_dp);
1e74a324
VS
5855 intel_dp_init_panel_power_timestamps(intel_dp);
5856 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5857 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5858 else
36b5f425 5859 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5860 pps_unlock(intel_dp);
dada1a9f 5861 }
0095e6dc 5862
9d1a1031 5863 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5864
0e32b39c 5865 /* init MST on ports that can support it */
0c9b3715
JN
5866 if (HAS_DP_MST(dev) &&
5867 (port == PORT_B || port == PORT_C || port == PORT_D))
5868 intel_dp_mst_encoder_init(intel_dig_port,
5869 intel_connector->base.base.id);
0e32b39c 5870
36b5f425 5871 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5872 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5873 if (is_edp(intel_dp)) {
5874 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5875 /*
5876 * vdd might still be enabled do to the delayed vdd off.
5877 * Make sure vdd is actually turned off here.
5878 */
773538e8 5879 pps_lock(intel_dp);
4be73780 5880 edp_panel_vdd_off_sync(intel_dp);
773538e8 5881 pps_unlock(intel_dp);
15b1d171 5882 }
34ea3d38 5883 drm_connector_unregister(connector);
b2f246a8 5884 drm_connector_cleanup(connector);
16c25533 5885 return false;
b2f246a8 5886 }
32f9d658 5887
f684960e
CW
5888 intel_dp_add_properties(intel_dp, connector);
5889
a4fc5ed6
KP
5890 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5891 * 0xd. Failure to do so will result in spurious interrupts being
5892 * generated on the port when a cable is not attached.
5893 */
5894 if (IS_G4X(dev) && !IS_GM45(dev)) {
5895 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5896 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5897 }
16c25533 5898
aa7471d2
JN
5899 i915_debugfs_connector_add(connector);
5900
16c25533 5901 return true;
a4fc5ed6 5902}
f0fec3f2
PZ
5903
5904void
5905intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5906{
13cf5504 5907 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5908 struct intel_digital_port *intel_dig_port;
5909 struct intel_encoder *intel_encoder;
5910 struct drm_encoder *encoder;
5911 struct intel_connector *intel_connector;
5912
b14c5679 5913 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5914 if (!intel_dig_port)
5915 return;
5916
08d9bc92 5917 intel_connector = intel_connector_alloc();
11aee0f6
SM
5918 if (!intel_connector)
5919 goto err_connector_alloc;
f0fec3f2
PZ
5920
5921 intel_encoder = &intel_dig_port->base;
5922 encoder = &intel_encoder->base;
5923
5924 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5925 DRM_MODE_ENCODER_TMDS);
5926
5bfe2ac0 5927 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5928 intel_encoder->disable = intel_disable_dp;
00c09d70 5929 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5930 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5931 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5932 if (IS_CHERRYVIEW(dev)) {
9197c88b 5933 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5934 intel_encoder->pre_enable = chv_pre_enable_dp;
5935 intel_encoder->enable = vlv_enable_dp;
580d3811 5936 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 5937 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 5938 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5939 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5940 intel_encoder->pre_enable = vlv_pre_enable_dp;
5941 intel_encoder->enable = vlv_enable_dp;
49277c31 5942 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5943 } else {
ecff4f3b
JN
5944 intel_encoder->pre_enable = g4x_pre_enable_dp;
5945 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5946 if (INTEL_INFO(dev)->gen >= 5)
5947 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5948 }
f0fec3f2 5949
174edf1f 5950 intel_dig_port->port = port;
f0fec3f2
PZ
5951 intel_dig_port->dp.output_reg = output_reg;
5952
00c09d70 5953 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5954 if (IS_CHERRYVIEW(dev)) {
5955 if (port == PORT_D)
5956 intel_encoder->crtc_mask = 1 << 2;
5957 else
5958 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5959 } else {
5960 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5961 }
bc079e8b 5962 intel_encoder->cloneable = 0;
f0fec3f2 5963
13cf5504 5964 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 5965 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 5966
11aee0f6
SM
5967 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5968 goto err_init_connector;
5969
5970 return;
5971
5972err_init_connector:
5973 drm_encoder_cleanup(encoder);
5974 kfree(intel_connector);
5975err_connector_alloc:
5976 kfree(intel_dig_port);
5977
5978 return;
f0fec3f2 5979}
0e32b39c
DA
5980
5981void intel_dp_mst_suspend(struct drm_device *dev)
5982{
5983 struct drm_i915_private *dev_priv = dev->dev_private;
5984 int i;
5985
5986 /* disable MST */
5987 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 5988 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
5989 if (!intel_dig_port)
5990 continue;
5991
5992 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5993 if (!intel_dig_port->dp.can_mst)
5994 continue;
5995 if (intel_dig_port->dp.is_mst)
5996 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5997 }
5998 }
5999}
6000
6001void intel_dp_mst_resume(struct drm_device *dev)
6002{
6003 struct drm_i915_private *dev_priv = dev->dev_private;
6004 int i;
6005
6006 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6007 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6008 if (!intel_dig_port)
6009 continue;
6010 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6011 int ret;
6012
6013 if (!intel_dig_port->dp.can_mst)
6014 continue;
6015
6016 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6017 if (ret != 0) {
6018 intel_dp_check_mst_status(&intel_dig_port->dp);
6019 }
6020 }
6021 }
6022}