drm/i915: Do not wait atomically for display clocks
[linux-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
eeb6324d
PZ
160 u8 source_max, sink_max;
161
ccb1a831 162 source_max = intel_dig_port->max_lanes;
eeb6324d
PZ
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
cd9dde44
AJ
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
a4fc5ed6 185static int
c898261c 186intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 187{
cd9dde44 188 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
189}
190
fe27d53e
DA
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
c19de8eb 197static enum drm_mode_status
a4fc5ed6
KP
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
df0e9248 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
799487f5 206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
a4fc5ed6 207
dd06f90e
JN
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
210 return MODE_PANEL;
211
dd06f90e 212 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 213 return MODE_PANEL;
03afc4a2
DV
214
215 target_clock = fixed_mode->clock;
7de56f43
ZY
216 }
217
50fec21a 218 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 219 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
799487f5 224 if (mode_rate > max_rate || target_clock > max_dotclk)
c4867936 225 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
0af78a2b
DV
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
a4fc5ed6
KP
233 return MODE_OK;
234}
235
a4f1289e 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
c2af70e2 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
bf13e81b
JN
257static void
258intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 259 struct intel_dp *intel_dp);
bf13e81b
JN
260static void
261intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 262 struct intel_dp *intel_dp);
bf13e81b 263
773538e8
VS
264static void pps_lock(struct intel_dp *intel_dp)
265{
266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
267 struct intel_encoder *encoder = &intel_dig_port->base;
268 struct drm_device *dev = encoder->base.dev;
269 struct drm_i915_private *dev_priv = dev->dev_private;
270 enum intel_display_power_domain power_domain;
271
272 /*
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
275 */
25f78f58 276 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
277 intel_display_power_get(dev_priv, power_domain);
278
279 mutex_lock(&dev_priv->pps_mutex);
280}
281
282static void pps_unlock(struct intel_dp *intel_dp)
283{
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct intel_encoder *encoder = &intel_dig_port->base;
286 struct drm_device *dev = encoder->base.dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 enum intel_display_power_domain power_domain;
289
290 mutex_unlock(&dev_priv->pps_mutex);
291
25f78f58 292 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
293 intel_display_power_put(dev_priv, power_domain);
294}
295
961a0db0
VS
296static void
297vlv_power_sequencer_kick(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
303 bool pll_enabled, release_cl_override = false;
304 enum dpio_phy phy = DPIO_PHY(pipe);
305 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
306 uint32_t DP;
307
308 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe), port_name(intel_dig_port->port)))
311 return;
312
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe), port_name(intel_dig_port->port));
315
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
318 */
319 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
320 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
321 DP |= DP_PORT_WIDTH(1);
322 DP |= DP_LINK_TRAIN_PAT_1;
323
324 if (IS_CHERRYVIEW(dev))
325 DP |= DP_PIPE_SELECT_CHV(pipe);
326 else if (pipe == PIPE_B)
327 DP |= DP_PIPEB_SELECT;
328
d288f65f
VS
329 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
330
331 /*
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
334 */
0047eedc
VS
335 if (!pll_enabled) {
336 release_cl_override = IS_CHERRYVIEW(dev) &&
337 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
338
3f36b937
TU
339 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
340 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
342 pipe_name(pipe));
343 return;
344 }
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
19c8054c 392 for_each_intel_encoder(dev, encoder) {
a4a5d2f8
VS
393 struct intel_dp *tmp;
394
395 if (encoder->type != INTEL_OUTPUT_EDP)
396 continue;
397
398 tmp = enc_to_intel_dp(&encoder->base);
399
400 if (tmp->pps_pipe != INVALID_PIPE)
401 pipes &= ~(1 << tmp->pps_pipe);
402 }
403
404 /*
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
407 */
408 if (WARN_ON(pipes == 0))
a8c3344e
VS
409 pipe = PIPE_A;
410 else
411 pipe = ffs(pipes) - 1;
a4a5d2f8 412
a8c3344e
VS
413 vlv_steal_power_sequencer(dev, pipe);
414 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
415
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp->pps_pipe),
418 port_name(intel_dig_port->port));
419
420 /* init power sequencer on this pipe and port */
36b5f425
VS
421 intel_dp_init_panel_power_sequencer(dev, intel_dp);
422 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 423
961a0db0
VS
424 /*
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
427 */
428 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
429
430 return intel_dp->pps_pipe;
431}
432
6491ab27
VS
433typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
434 enum pipe pipe);
435
436static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
437 enum pipe pipe)
438{
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
440}
441
442static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
443 enum pipe pipe)
444{
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
446}
447
448static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return true;
452}
bf13e81b 453
a4a5d2f8 454static enum pipe
6491ab27
VS
455vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
456 enum port port,
457 vlv_pipe_check pipe_check)
a4a5d2f8
VS
458{
459 enum pipe pipe;
bf13e81b 460
bf13e81b
JN
461 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
462 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
463 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
464
465 if (port_sel != PANEL_PORT_SELECT_VLV(port))
466 continue;
467
6491ab27
VS
468 if (!pipe_check(dev_priv, pipe))
469 continue;
470
a4a5d2f8 471 return pipe;
bf13e81b
JN
472 }
473
a4a5d2f8
VS
474 return INVALID_PIPE;
475}
476
477static void
478vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
479{
480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
481 struct drm_device *dev = intel_dig_port->base.base.dev;
482 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
483 enum port port = intel_dig_port->port;
484
485 lockdep_assert_held(&dev_priv->pps_mutex);
486
487 /* try to find a pipe with this port selected */
6491ab27
VS
488 /* first pick one where the panel is on */
489 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490 vlv_pipe_has_pp_on);
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp->pps_pipe == INVALID_PIPE)
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 vlv_pipe_has_vdd_on);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498 vlv_pipe_any);
a4a5d2f8
VS
499
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp->pps_pipe == INVALID_PIPE) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
503 port_name(port));
504 return;
bf13e81b
JN
505 }
506
a4a5d2f8
VS
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port), pipe_name(intel_dp->pps_pipe));
509
36b5f425
VS
510 intel_dp_init_panel_power_sequencer(dev, intel_dp);
511 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
512}
513
773538e8
VS
514void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
515{
516 struct drm_device *dev = dev_priv->dev;
517 struct intel_encoder *encoder;
518
666a4537 519 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
773538e8
VS
520 return;
521
522 /*
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
530 */
531
19c8054c 532 for_each_intel_encoder(dev, encoder) {
773538e8
VS
533 struct intel_dp *intel_dp;
534
535 if (encoder->type != INTEL_OUTPUT_EDP)
536 continue;
537
538 intel_dp = enc_to_intel_dp(&encoder->base);
539 intel_dp->pps_pipe = INVALID_PIPE;
540 }
bf13e81b
JN
541}
542
f0f59a00
VS
543static i915_reg_t
544_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b
JN
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
b0a08bec
VK
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
f0f59a00
VS
556static i915_reg_t
557_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b
JN
558{
559 struct drm_device *dev = intel_dp_to_dev(intel_dp);
560
b0a08bec
VK
561 if (IS_BROXTON(dev))
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
564 return PCH_PP_STATUS;
565 else
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
567}
568
01527b31
CT
569/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572 void *unused)
573{
574 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 edp_notifier);
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
578
579 if (!is_edp(intel_dp) || code != SYS_RESTART)
580 return 0;
581
773538e8 582 pps_lock(intel_dp);
e39b999a 583
666a4537 584 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e39b999a 585 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
f0f59a00 586 i915_reg_t pp_ctrl_reg, pp_div_reg;
649636ef 587 u32 pp_div;
e39b999a 588
01527b31
CT
589 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
591 pp_div = I915_READ(pp_div_reg);
592 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg, pp_div | 0x1F);
596 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597 msleep(intel_dp->panel_power_cycle_delay);
598 }
599
773538e8 600 pps_unlock(intel_dp);
e39b999a 601
01527b31
CT
602 return 0;
603}
604
4be73780 605static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 606{
30add22d 607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
608 struct drm_i915_private *dev_priv = dev->dev_private;
609
e39b999a
VS
610 lockdep_assert_held(&dev_priv->pps_mutex);
611
666a4537 612 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
613 intel_dp->pps_pipe == INVALID_PIPE)
614 return false;
615
bf13e81b 616 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
617}
618
4be73780 619static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 620{
30add22d 621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
622 struct drm_i915_private *dev_priv = dev->dev_private;
623
e39b999a
VS
624 lockdep_assert_held(&dev_priv->pps_mutex);
625
666a4537 626 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
627 intel_dp->pps_pipe == INVALID_PIPE)
628 return false;
629
773538e8 630 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
631}
632
9b984dae
KP
633static void
634intel_dp_check_edp(struct intel_dp *intel_dp)
635{
30add22d 636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 637 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 638
9b984dae
KP
639 if (!is_edp(intel_dp))
640 return;
453c5420 641
4be73780 642 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
645 I915_READ(_pp_stat_reg(intel_dp)),
646 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
647 }
648}
649
9ee32fea
DV
650static uint32_t
651intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652{
653 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654 struct drm_device *dev = intel_dig_port->base.base.dev;
655 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 656 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
657 uint32_t status;
658 bool done;
659
ef04f00d 660#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 661 if (has_aux_irq)
b18ac466 662 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 663 msecs_to_jiffies_timeout(10));
9ee32fea
DV
664 else
665 done = wait_for_atomic(C, 10) == 0;
666 if (!done)
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
668 has_aux_irq);
669#undef C
670
671 return status;
672}
673
ec5b01dd 674static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 675{
174edf1f
PZ
676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
677 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 678
ec5b01dd
DL
679 /*
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 682 */
fce18c4c 683 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
ec5b01dd
DL
684}
685
686static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687{
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 690 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
691
692 if (index)
693 return 0;
694
695 if (intel_dig_port->port == PORT_A) {
fce18c4c 696 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
05024da3 697
ec5b01dd 698 } else {
fce18c4c 699 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
ec5b01dd
DL
700 }
701}
702
703static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704{
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private;
708
709 if (intel_dig_port->port == PORT_A) {
710 if (index)
711 return 0;
05024da3 712 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
56f5f700 713 } else if (HAS_PCH_LPT_H(dev_priv)) {
2c55c336 714 /* Workaround for non-ULT HSW */
bc86625a
CW
715 switch (index) {
716 case 0: return 63;
717 case 1: return 72;
718 default: return 0;
719 }
ec5b01dd 720 } else {
fce18c4c 721 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
2c55c336 722 }
b84a1cf8
RV
723}
724
ec5b01dd
DL
725static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726{
727 return index ? 0 : 100;
728}
729
b6b5e383
DL
730static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
731{
732 /*
733 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 * derive the clock from CDCLK automatically). We still implement the
735 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 */
737 return index ? 0 : 1;
738}
739
5ed12a19
DL
740static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
741 bool has_aux_irq,
742 int send_bytes,
743 uint32_t aux_clock_divider)
744{
745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
746 struct drm_device *dev = intel_dig_port->base.base.dev;
747 uint32_t precharge, timeout;
748
749 if (IS_GEN6(dev))
750 precharge = 3;
751 else
752 precharge = 5;
753
f3c6a3a7 754 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5ed12a19
DL
755 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 else
757 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758
759 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 760 DP_AUX_CH_CTL_DONE |
5ed12a19 761 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 762 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 763 timeout |
788d4433 764 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
765 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
766 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 767 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
768}
769
b9ca5fad
DL
770static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
771 bool has_aux_irq,
772 int send_bytes,
773 uint32_t unused)
774{
775 return DP_AUX_CH_CTL_SEND_BUSY |
776 DP_AUX_CH_CTL_DONE |
777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 DP_AUX_CH_CTL_TIME_OUT_1600us |
780 DP_AUX_CH_CTL_RECEIVE_ERROR |
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
783}
784
b84a1cf8
RV
785static int
786intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 787 const uint8_t *send, int send_bytes,
b84a1cf8
RV
788 uint8_t *recv, int recv_size)
789{
790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791 struct drm_device *dev = intel_dig_port->base.base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 793 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
bc86625a 794 uint32_t aux_clock_divider;
b84a1cf8
RV
795 int i, ret, recv_bytes;
796 uint32_t status;
5ed12a19 797 int try, clock = 0;
4e6b788c 798 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
799 bool vdd;
800
773538e8 801 pps_lock(intel_dp);
e39b999a 802
72c3500a
VS
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
1e0560e0 809 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
5eb08b69 818
11bee43e
JB
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
ef04f00d 821 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
822 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823 break;
824 msleep(1);
825 }
826
827 if (try == 3) {
02196c77
MK
828 static u32 last_status = -1;
829 const u32 status = I915_READ(ch_ctl);
830
831 if (status != last_status) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 status);
834 last_status = status;
835 }
836
9ee32fea
DV
837 ret = -EBUSY;
838 goto out;
4f7f7b7e
CW
839 }
840
46a5ae9f
PZ
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843 ret = -E2BIG;
844 goto out;
845 }
846
ec5b01dd 847 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
848 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849 has_aux_irq,
850 send_bytes,
851 aux_clock_divider);
5ed12a19 852
bc86625a
CW
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i = 0; i < send_bytes; i += 4)
330e20ec 857 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
a4f1289e
RV
858 intel_dp_pack_aux(send + i,
859 send_bytes - i));
bc86625a
CW
860
861 /* Send the command and wait for it to complete */
5ed12a19 862 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
863
864 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865
866 /* Clear done status and any errors */
867 I915_WRITE(ch_ctl,
868 status |
869 DP_AUX_CH_CTL_DONE |
870 DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 DP_AUX_CH_CTL_RECEIVE_ERROR);
872
74ebf294 873 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 874 continue;
74ebf294
TP
875
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
880 */
881 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 usleep_range(400, 500);
bc86625a 883 continue;
74ebf294 884 }
bc86625a 885 if (status & DP_AUX_CH_CTL_DONE)
e058c945 886 goto done;
bc86625a 887 }
a4fc5ed6
KP
888 }
889
a4fc5ed6 890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
892 ret = -EBUSY;
893 goto out;
a4fc5ed6
KP
894 }
895
e058c945 896done:
a4fc5ed6
KP
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
899 */
a5b3da54 900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
902 ret = -EIO;
903 goto out;
a5b3da54 904 }
1ae8c0a5
KP
905
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
a5b3da54 908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
910 ret = -ETIMEDOUT;
911 goto out;
a4fc5ed6
KP
912 }
913
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
917
918 /*
919 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920 * We have no idea of what happened so we return -EBUSY so
921 * drm layer takes care for the necessary retries.
922 */
923 if (recv_bytes == 0 || recv_bytes > 20) {
924 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
925 recv_bytes);
926 /*
927 * FIXME: This patch was created on top of a series that
928 * organize the retries at drm level. There EBUSY should
929 * also take care for 1ms wait before retrying.
930 * That aux retries re-org is still needed and after that is
931 * merged we remove this sleep from here.
932 */
933 usleep_range(1000, 1500);
934 ret = -EBUSY;
935 goto out;
936 }
937
a4fc5ed6
KP
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
0206e353 940
4f7f7b7e 941 for (i = 0; i < recv_bytes; i += 4)
330e20ec 942 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
a4f1289e 943 recv + i, recv_bytes - i);
a4fc5ed6 944
9ee32fea
DV
945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
948
884f19e9
JN
949 if (vdd)
950 edp_panel_vdd_off(intel_dp, false);
951
773538e8 952 pps_unlock(intel_dp);
e39b999a 953
9ee32fea 954 return ret;
a4fc5ed6
KP
955}
956
a6c8aff0
JN
957#define BARE_ADDRESS_SIZE 3
958#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
959static ssize_t
960intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 961{
9d1a1031
JN
962 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
963 uint8_t txbuf[20], rxbuf[20];
964 size_t txsize, rxsize;
a4fc5ed6 965 int ret;
a4fc5ed6 966
d2d9cbbd
VS
967 txbuf[0] = (msg->request << 4) |
968 ((msg->address >> 16) & 0xf);
969 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
970 txbuf[2] = msg->address & 0xff;
971 txbuf[3] = msg->size - 1;
46a5ae9f 972
9d1a1031
JN
973 switch (msg->request & ~DP_AUX_I2C_MOT) {
974 case DP_AUX_NATIVE_WRITE:
975 case DP_AUX_I2C_WRITE:
c1e74122 976 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 978 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 979
9d1a1031
JN
980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
a4fc5ed6 982
d81a67cc
ID
983 if (msg->buffer)
984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
985 else
986 WARN_ON(msg->size);
a4fc5ed6 987
9d1a1031
JN
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 991
a1ddefd8
JN
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
9d1a1031
JN
999 }
1000 break;
46a5ae9f 1001
9d1a1031
JN
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
a6c8aff0 1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1005 rxsize = msg->size + 1;
a4fc5ed6 1006
9d1a1031
JN
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
a4fc5ed6 1009
9d1a1031
JN
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1021 }
9d1a1031
JN
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
a4fc5ed6 1027 }
f51a44b9 1028
9d1a1031 1029 return ret;
a4fc5ed6
KP
1030}
1031
f0f59a00
VS
1032static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1033 enum port port)
da00bdcf
VS
1034{
1035 switch (port) {
1036 case PORT_B:
1037 case PORT_C:
1038 case PORT_D:
1039 return DP_AUX_CH_CTL(port);
1040 default:
1041 MISSING_CASE(port);
1042 return DP_AUX_CH_CTL(PORT_B);
1043 }
1044}
1045
f0f59a00
VS
1046static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1047 enum port port, int index)
330e20ec
VS
1048{
1049 switch (port) {
1050 case PORT_B:
1051 case PORT_C:
1052 case PORT_D:
1053 return DP_AUX_CH_DATA(port, index);
1054 default:
1055 MISSING_CASE(port);
1056 return DP_AUX_CH_DATA(PORT_B, index);
1057 }
1058}
1059
f0f59a00
VS
1060static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1061 enum port port)
da00bdcf
VS
1062{
1063 switch (port) {
1064 case PORT_A:
1065 return DP_AUX_CH_CTL(port);
1066 case PORT_B:
1067 case PORT_C:
1068 case PORT_D:
1069 return PCH_DP_AUX_CH_CTL(port);
1070 default:
1071 MISSING_CASE(port);
1072 return DP_AUX_CH_CTL(PORT_A);
1073 }
1074}
1075
f0f59a00
VS
1076static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1077 enum port port, int index)
330e20ec
VS
1078{
1079 switch (port) {
1080 case PORT_A:
1081 return DP_AUX_CH_DATA(port, index);
1082 case PORT_B:
1083 case PORT_C:
1084 case PORT_D:
1085 return PCH_DP_AUX_CH_DATA(port, index);
1086 default:
1087 MISSING_CASE(port);
1088 return DP_AUX_CH_DATA(PORT_A, index);
1089 }
1090}
1091
da00bdcf
VS
1092/*
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1095 */
1096static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1097{
1098 const struct ddi_vbt_port_info *info =
1099 &dev_priv->vbt.ddi_port_info[PORT_E];
1100
1101 switch (info->alternate_aux_channel) {
1102 case DP_AUX_A:
1103 return PORT_A;
1104 case DP_AUX_B:
1105 return PORT_B;
1106 case DP_AUX_C:
1107 return PORT_C;
1108 case DP_AUX_D:
1109 return PORT_D;
1110 default:
1111 MISSING_CASE(info->alternate_aux_channel);
1112 return PORT_A;
1113 }
1114}
1115
f0f59a00
VS
1116static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1117 enum port port)
da00bdcf
VS
1118{
1119 if (port == PORT_E)
1120 port = skl_porte_aux_port(dev_priv);
1121
1122 switch (port) {
1123 case PORT_A:
1124 case PORT_B:
1125 case PORT_C:
1126 case PORT_D:
1127 return DP_AUX_CH_CTL(port);
1128 default:
1129 MISSING_CASE(port);
1130 return DP_AUX_CH_CTL(PORT_A);
1131 }
1132}
1133
f0f59a00
VS
1134static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1135 enum port port, int index)
330e20ec
VS
1136{
1137 if (port == PORT_E)
1138 port = skl_porte_aux_port(dev_priv);
1139
1140 switch (port) {
1141 case PORT_A:
1142 case PORT_B:
1143 case PORT_C:
1144 case PORT_D:
1145 return DP_AUX_CH_DATA(port, index);
1146 default:
1147 MISSING_CASE(port);
1148 return DP_AUX_CH_DATA(PORT_A, index);
1149 }
1150}
1151
f0f59a00
VS
1152static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1153 enum port port)
330e20ec
VS
1154{
1155 if (INTEL_INFO(dev_priv)->gen >= 9)
1156 return skl_aux_ctl_reg(dev_priv, port);
1157 else if (HAS_PCH_SPLIT(dev_priv))
1158 return ilk_aux_ctl_reg(dev_priv, port);
1159 else
1160 return g4x_aux_ctl_reg(dev_priv, port);
1161}
1162
f0f59a00
VS
1163static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1164 enum port port, int index)
330e20ec
VS
1165{
1166 if (INTEL_INFO(dev_priv)->gen >= 9)
1167 return skl_aux_data_reg(dev_priv, port, index);
1168 else if (HAS_PCH_SPLIT(dev_priv))
1169 return ilk_aux_data_reg(dev_priv, port, index);
1170 else
1171 return g4x_aux_data_reg(dev_priv, port, index);
1172}
1173
1174static void intel_aux_reg_init(struct intel_dp *intel_dp)
1175{
1176 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1177 enum port port = dp_to_dig_port(intel_dp)->port;
1178 int i;
1179
1180 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1181 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1182 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1183}
1184
9d1a1031 1185static void
a121f4e5
VS
1186intel_dp_aux_fini(struct intel_dp *intel_dp)
1187{
1188 drm_dp_aux_unregister(&intel_dp->aux);
1189 kfree(intel_dp->aux.name);
1190}
1191
1192static int
9d1a1031
JN
1193intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1194{
1195 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1196 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1197 enum port port = intel_dig_port->port;
ab2c0672
DA
1198 int ret;
1199
330e20ec 1200 intel_aux_reg_init(intel_dp);
8316f337 1201
a121f4e5
VS
1202 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1203 if (!intel_dp->aux.name)
1204 return -ENOMEM;
1205
9d1a1031
JN
1206 intel_dp->aux.dev = dev->dev;
1207 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1208
a121f4e5
VS
1209 DRM_DEBUG_KMS("registering %s bus for %s\n",
1210 intel_dp->aux.name,
0b99836f 1211 connector->base.kdev->kobj.name);
8316f337 1212
4f71d0cb 1213 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1214 if (ret < 0) {
4f71d0cb 1215 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
a121f4e5
VS
1216 intel_dp->aux.name, ret);
1217 kfree(intel_dp->aux.name);
1218 return ret;
ab2c0672 1219 }
8a5e6aeb 1220
0b99836f
JN
1221 ret = sysfs_create_link(&connector->base.kdev->kobj,
1222 &intel_dp->aux.ddc.dev.kobj,
1223 intel_dp->aux.ddc.dev.kobj.name);
1224 if (ret < 0) {
a121f4e5
VS
1225 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1226 intel_dp->aux.name, ret);
1227 intel_dp_aux_fini(intel_dp);
1228 return ret;
ab2c0672 1229 }
a121f4e5
VS
1230
1231 return 0;
a4fc5ed6
KP
1232}
1233
80f65de3
ID
1234static void
1235intel_dp_connector_unregister(struct intel_connector *intel_connector)
1236{
1237 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1238
0e32b39c
DA
1239 if (!intel_connector->mst_port)
1240 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1241 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1242 intel_connector_unregister(intel_connector);
1243}
1244
5416d871 1245static void
840b32b7 1246skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1247{
1248 u32 ctrl1;
1249
dd3cd74a
ACO
1250 memset(&pipe_config->dpll_hw_state, 0,
1251 sizeof(pipe_config->dpll_hw_state));
1252
5416d871
DL
1253 pipe_config->ddi_pll_sel = SKL_DPLL0;
1254 pipe_config->dpll_hw_state.cfgcr1 = 0;
1255 pipe_config->dpll_hw_state.cfgcr2 = 0;
1256
1257 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1258 switch (pipe_config->port_clock / 2) {
c3346ef6 1259 case 81000:
71cd8423 1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1261 SKL_DPLL0);
1262 break;
c3346ef6 1263 case 135000:
71cd8423 1264 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1265 SKL_DPLL0);
1266 break;
c3346ef6 1267 case 270000:
71cd8423 1268 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1269 SKL_DPLL0);
1270 break;
c3346ef6 1271 case 162000:
71cd8423 1272 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1273 SKL_DPLL0);
1274 break;
1275 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1276 results in CDCLK change. Need to handle the change of CDCLK by
1277 disabling pipes and re-enabling them */
1278 case 108000:
71cd8423 1279 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1280 SKL_DPLL0);
1281 break;
1282 case 216000:
71cd8423 1283 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1284 SKL_DPLL0);
1285 break;
1286
5416d871
DL
1287 }
1288 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1289}
1290
6fa2d197 1291void
840b32b7 1292hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1293{
ee46f3c7
ACO
1294 memset(&pipe_config->dpll_hw_state, 0,
1295 sizeof(pipe_config->dpll_hw_state));
1296
840b32b7
VS
1297 switch (pipe_config->port_clock / 2) {
1298 case 81000:
0e50338c
DV
1299 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1300 break;
840b32b7 1301 case 135000:
0e50338c
DV
1302 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1303 break;
840b32b7 1304 case 270000:
0e50338c
DV
1305 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1306 break;
1307 }
1308}
1309
fc0f8e25 1310static int
12f6a2e2 1311intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1312{
94ca719e
VS
1313 if (intel_dp->num_sink_rates) {
1314 *sink_rates = intel_dp->sink_rates;
1315 return intel_dp->num_sink_rates;
fc0f8e25 1316 }
12f6a2e2
VS
1317
1318 *sink_rates = default_rates;
1319
1320 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1321}
1322
e588fa18 1323bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1324{
e588fa18
ACO
1325 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1326 struct drm_device *dev = dig_port->base.base.dev;
1327
ed63baaf 1328 /* WaDisableHBR2:skl */
e87a005d 1329 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1330 return false;
1331
1332 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1333 (INTEL_INFO(dev)->gen >= 9))
1334 return true;
1335 else
1336 return false;
1337}
1338
a8f3ef61 1339static int
e588fa18 1340intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1341{
e588fa18
ACO
1342 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1343 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1344 int size;
1345
64987fc5
SJ
1346 if (IS_BROXTON(dev)) {
1347 *source_rates = bxt_rates;
af7080f5 1348 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1349 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1350 *source_rates = skl_rates;
af7080f5
TS
1351 size = ARRAY_SIZE(skl_rates);
1352 } else {
1353 *source_rates = default_rates;
1354 size = ARRAY_SIZE(default_rates);
a8f3ef61 1355 }
636280ba 1356
ed63baaf 1357 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1358 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1359 size--;
636280ba 1360
af7080f5 1361 return size;
a8f3ef61
SJ
1362}
1363
c6bb3538
DV
1364static void
1365intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1366 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1367{
1368 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1369 const struct dp_link_dpll *divisor = NULL;
1370 int i, count = 0;
c6bb3538
DV
1371
1372 if (IS_G4X(dev)) {
9dd4ffdf
CML
1373 divisor = gen4_dpll;
1374 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1375 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1376 divisor = pch_dpll;
1377 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1378 } else if (IS_CHERRYVIEW(dev)) {
1379 divisor = chv_dpll;
1380 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1381 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1382 divisor = vlv_dpll;
1383 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1384 }
9dd4ffdf
CML
1385
1386 if (divisor && count) {
1387 for (i = 0; i < count; i++) {
840b32b7 1388 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1389 pipe_config->dpll = divisor[i].dpll;
1390 pipe_config->clock_set = true;
1391 break;
1392 }
1393 }
c6bb3538
DV
1394 }
1395}
1396
2ecae76a
VS
1397static int intersect_rates(const int *source_rates, int source_len,
1398 const int *sink_rates, int sink_len,
94ca719e 1399 int *common_rates)
a8f3ef61
SJ
1400{
1401 int i = 0, j = 0, k = 0;
1402
a8f3ef61
SJ
1403 while (i < source_len && j < sink_len) {
1404 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1405 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1406 return k;
94ca719e 1407 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1408 ++k;
1409 ++i;
1410 ++j;
1411 } else if (source_rates[i] < sink_rates[j]) {
1412 ++i;
1413 } else {
1414 ++j;
1415 }
1416 }
1417 return k;
1418}
1419
94ca719e
VS
1420static int intel_dp_common_rates(struct intel_dp *intel_dp,
1421 int *common_rates)
2ecae76a 1422{
2ecae76a
VS
1423 const int *source_rates, *sink_rates;
1424 int source_len, sink_len;
1425
1426 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1427 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1428
1429 return intersect_rates(source_rates, source_len,
1430 sink_rates, sink_len,
94ca719e 1431 common_rates);
2ecae76a
VS
1432}
1433
0336400e
VS
1434static void snprintf_int_array(char *str, size_t len,
1435 const int *array, int nelem)
1436{
1437 int i;
1438
1439 str[0] = '\0';
1440
1441 for (i = 0; i < nelem; i++) {
b2f505be 1442 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1443 if (r >= len)
1444 return;
1445 str += r;
1446 len -= r;
1447 }
1448}
1449
1450static void intel_dp_print_rates(struct intel_dp *intel_dp)
1451{
0336400e 1452 const int *source_rates, *sink_rates;
94ca719e
VS
1453 int source_len, sink_len, common_len;
1454 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1455 char str[128]; /* FIXME: too big for stack? */
1456
1457 if ((drm_debug & DRM_UT_KMS) == 0)
1458 return;
1459
e588fa18 1460 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1461 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1462 DRM_DEBUG_KMS("source rates: %s\n", str);
1463
1464 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1465 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1466 DRM_DEBUG_KMS("sink rates: %s\n", str);
1467
94ca719e
VS
1468 common_len = intel_dp_common_rates(intel_dp, common_rates);
1469 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1470 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1471}
1472
f4896f15 1473static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1474{
1475 int i = 0;
1476
1477 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1478 if (find == rates[i])
1479 break;
1480
1481 return i;
1482}
1483
50fec21a
VS
1484int
1485intel_dp_max_link_rate(struct intel_dp *intel_dp)
1486{
1487 int rates[DP_MAX_SUPPORTED_RATES] = {};
1488 int len;
1489
94ca719e 1490 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1491 if (WARN_ON(len <= 0))
1492 return 162000;
1493
1494 return rates[rate_to_index(0, rates) - 1];
1495}
1496
ed4e9c1d
VS
1497int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1498{
94ca719e 1499 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1500}
1501
94223d04
ACO
1502void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1503 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1504{
1505 if (intel_dp->num_sink_rates) {
1506 *link_bw = 0;
1507 *rate_select =
1508 intel_dp_rate_select(intel_dp, port_clock);
1509 } else {
1510 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1511 *rate_select = 0;
1512 }
1513}
1514
00c09d70 1515bool
5bfe2ac0 1516intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1517 struct intel_crtc_state *pipe_config)
a4fc5ed6 1518{
5bfe2ac0 1519 struct drm_device *dev = encoder->base.dev;
36008365 1520 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1521 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1522 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1523 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1524 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1525 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1526 int lane_count, clock;
56071a20 1527 int min_lane_count = 1;
eeb6324d 1528 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1529 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1530 int min_clock = 0;
a8f3ef61 1531 int max_clock;
083f9560 1532 int bpp, mode_rate;
ff9a6750 1533 int link_avail, link_clock;
94ca719e
VS
1534 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1535 int common_len;
04a60f9f 1536 uint8_t link_bw, rate_select;
a8f3ef61 1537
94ca719e 1538 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1539
1540 /* No common link rates between source and sink */
94ca719e 1541 WARN_ON(common_len <= 0);
a8f3ef61 1542
94ca719e 1543 max_clock = common_len - 1;
a4fc5ed6 1544
bc7d38a4 1545 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1546 pipe_config->has_pch_encoder = true;
1547
03afc4a2 1548 pipe_config->has_dp_encoder = true;
f769cd24 1549 pipe_config->has_drrs = false;
9fcb1704 1550 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1551
dd06f90e
JN
1552 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1553 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1554 adjusted_mode);
a1b2278e
CK
1555
1556 if (INTEL_INFO(dev)->gen >= 9) {
1557 int ret;
e435d6e5 1558 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1559 if (ret)
1560 return ret;
1561 }
1562
b5667627 1563 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1564 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1565 intel_connector->panel.fitting_mode);
1566 else
b074cec8
JB
1567 intel_pch_panel_fitting(intel_crtc, pipe_config,
1568 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1569 }
1570
cb1793ce 1571 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1572 return false;
1573
083f9560 1574 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1575 "max bw %d pixel clock %iKHz\n",
94ca719e 1576 max_lane_count, common_rates[max_clock],
241bfc38 1577 adjusted_mode->crtc_clock);
083f9560 1578
36008365
DV
1579 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1580 * bpc in between. */
3e7ca985 1581 bpp = pipe_config->pipe_bpp;
56071a20 1582 if (is_edp(intel_dp)) {
22ce5628
TS
1583
1584 /* Get bpp from vbt only for panels that dont have bpp in edid */
1585 if (intel_connector->base.display_info.bpc == 0 &&
1586 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1587 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1588 dev_priv->vbt.edp_bpp);
1589 bpp = dev_priv->vbt.edp_bpp;
1590 }
1591
344c5bbc
JN
1592 /*
1593 * Use the maximum clock and number of lanes the eDP panel
1594 * advertizes being capable of. The panels are generally
1595 * designed to support only a single clock and lane
1596 * configuration, and typically these values correspond to the
1597 * native resolution of the panel.
1598 */
1599 min_lane_count = max_lane_count;
1600 min_clock = max_clock;
7984211e 1601 }
657445fe 1602
36008365 1603 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1604 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1605 bpp);
36008365 1606
c6930992 1607 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1608 for (lane_count = min_lane_count;
1609 lane_count <= max_lane_count;
1610 lane_count <<= 1) {
1611
94ca719e 1612 link_clock = common_rates[clock];
36008365
DV
1613 link_avail = intel_dp_max_data_rate(link_clock,
1614 lane_count);
1615
1616 if (mode_rate <= link_avail) {
1617 goto found;
1618 }
1619 }
1620 }
1621 }
c4867936 1622
36008365 1623 return false;
3685a8f3 1624
36008365 1625found:
55bc60db
VS
1626 if (intel_dp->color_range_auto) {
1627 /*
1628 * See:
1629 * CEA-861-E - 5.1 Default Encoding Parameters
1630 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1631 */
0f2a2a75
VS
1632 pipe_config->limited_color_range =
1633 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1634 } else {
1635 pipe_config->limited_color_range =
1636 intel_dp->limited_color_range;
55bc60db
VS
1637 }
1638
90a6b7b0 1639 pipe_config->lane_count = lane_count;
a8f3ef61 1640
657445fe 1641 pipe_config->pipe_bpp = bpp;
94ca719e 1642 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1643
04a60f9f
VS
1644 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1645 &link_bw, &rate_select);
1646
1647 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1648 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1649 pipe_config->port_clock, bpp);
36008365
DV
1650 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1651 mode_rate, link_avail);
a4fc5ed6 1652
03afc4a2 1653 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1654 adjusted_mode->crtc_clock,
1655 pipe_config->port_clock,
03afc4a2 1656 &pipe_config->dp_m_n);
9d1a455b 1657
439d7ac0 1658 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1659 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1660 pipe_config->has_drrs = true;
439d7ac0
PB
1661 intel_link_compute_m_n(bpp, lane_count,
1662 intel_connector->panel.downclock_mode->clock,
1663 pipe_config->port_clock,
1664 &pipe_config->dp_m2_n2);
1665 }
1666
ef11bdb3 1667 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1668 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1669 else if (IS_BROXTON(dev))
1670 /* handled in ddi */;
5416d871 1671 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1672 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1673 else
840b32b7 1674 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1675
03afc4a2 1676 return true;
a4fc5ed6
KP
1677}
1678
901c2daf
VS
1679void intel_dp_set_link_params(struct intel_dp *intel_dp,
1680 const struct intel_crtc_state *pipe_config)
1681{
1682 intel_dp->link_rate = pipe_config->port_clock;
1683 intel_dp->lane_count = pipe_config->lane_count;
1684}
1685
8ac33ed3 1686static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1687{
b934223d 1688 struct drm_device *dev = encoder->base.dev;
417e822d 1689 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1690 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1691 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1692 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1693 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1694
901c2daf
VS
1695 intel_dp_set_link_params(intel_dp, crtc->config);
1696
417e822d 1697 /*
1a2eb460 1698 * There are four kinds of DP registers:
417e822d
KP
1699 *
1700 * IBX PCH
1a2eb460
KP
1701 * SNB CPU
1702 * IVB CPU
417e822d
KP
1703 * CPT PCH
1704 *
1705 * IBX PCH and CPU are the same for almost everything,
1706 * except that the CPU DP PLL is configured in this
1707 * register
1708 *
1709 * CPT PCH is quite different, having many bits moved
1710 * to the TRANS_DP_CTL register instead. That
1711 * configuration happens (oddly) in ironlake_pch_enable
1712 */
9c9e7927 1713
417e822d
KP
1714 /* Preserve the BIOS-computed detected bit. This is
1715 * supposed to be read-only.
1716 */
1717 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1718
417e822d 1719 /* Handle DP bits in common between all three register formats */
417e822d 1720 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1721 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1722
417e822d 1723 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1724
39e5fa88 1725 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1726 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1727 intel_dp->DP |= DP_SYNC_HS_HIGH;
1728 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1729 intel_dp->DP |= DP_SYNC_VS_HIGH;
1730 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1731
6aba5b6c 1732 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1733 intel_dp->DP |= DP_ENHANCED_FRAMING;
1734
7c62a164 1735 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1736 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1737 u32 trans_dp;
1738
39e5fa88 1739 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1740
1741 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1742 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1743 trans_dp |= TRANS_DP_ENH_FRAMING;
1744 else
1745 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1746 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1747 } else {
0f2a2a75 1748 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 1749 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
0f2a2a75 1750 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1751
1752 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1753 intel_dp->DP |= DP_SYNC_HS_HIGH;
1754 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1755 intel_dp->DP |= DP_SYNC_VS_HIGH;
1756 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1757
6aba5b6c 1758 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1759 intel_dp->DP |= DP_ENHANCED_FRAMING;
1760
39e5fa88 1761 if (IS_CHERRYVIEW(dev))
44f37d1f 1762 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1763 else if (crtc->pipe == PIPE_B)
1764 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1765 }
a4fc5ed6
KP
1766}
1767
ffd6749d
PZ
1768#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1769#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1770
1a5ef5b7
PZ
1771#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1772#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1773
ffd6749d
PZ
1774#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1775#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1776
4be73780 1777static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1778 u32 mask,
1779 u32 value)
bd943159 1780{
30add22d 1781 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1782 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1783 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 1784
e39b999a
VS
1785 lockdep_assert_held(&dev_priv->pps_mutex);
1786
bf13e81b
JN
1787 pp_stat_reg = _pp_stat_reg(intel_dp);
1788 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1789
99ea7127 1790 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1791 mask, value,
1792 I915_READ(pp_stat_reg),
1793 I915_READ(pp_ctrl_reg));
32ce697c 1794
3f177625
TU
1795 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
1796 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
99ea7127 1797 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1798 I915_READ(pp_stat_reg),
1799 I915_READ(pp_ctrl_reg));
54c136d4
CW
1800
1801 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1802}
32ce697c 1803
4be73780 1804static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1805{
1806 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1807 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1808}
1809
4be73780 1810static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1811{
1812 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1813 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1814}
1815
4be73780 1816static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 1817{
d28d4731
AK
1818 ktime_t panel_power_on_time;
1819 s64 panel_power_off_duration;
1820
99ea7127 1821 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c 1822
d28d4731
AK
1823 /* take the difference of currrent time and panel power off time
1824 * and then make panel wait for t11_t12 if needed. */
1825 panel_power_on_time = ktime_get_boottime();
1826 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1827
dce56b3c
PZ
1828 /* When we disable the VDD override bit last we have to do the manual
1829 * wait. */
d28d4731
AK
1830 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1831 wait_remaining_ms_from_jiffies(jiffies,
1832 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 1833
4be73780 1834 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1835}
1836
4be73780 1837static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1838{
1839 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1840 intel_dp->backlight_on_delay);
1841}
1842
4be73780 1843static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1844{
1845 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1846 intel_dp->backlight_off_delay);
1847}
99ea7127 1848
832dd3c1
KP
1849/* Read the current pp_control value, unlocking the register if it
1850 * is locked
1851 */
1852
453c5420 1853static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1854{
453c5420
JB
1855 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1856 struct drm_i915_private *dev_priv = dev->dev_private;
1857 u32 control;
832dd3c1 1858
e39b999a
VS
1859 lockdep_assert_held(&dev_priv->pps_mutex);
1860
bf13e81b 1861 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1862 if (!IS_BROXTON(dev)) {
1863 control &= ~PANEL_UNLOCK_MASK;
1864 control |= PANEL_UNLOCK_REGS;
1865 }
832dd3c1 1866 return control;
bd943159
KP
1867}
1868
951468f3
VS
1869/*
1870 * Must be paired with edp_panel_vdd_off().
1871 * Must hold pps_mutex around the whole on/off sequence.
1872 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1873 */
1e0560e0 1874static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1875{
30add22d 1876 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1877 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1878 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1879 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1880 enum intel_display_power_domain power_domain;
5d613501 1881 u32 pp;
f0f59a00 1882 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 1883 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1884
e39b999a
VS
1885 lockdep_assert_held(&dev_priv->pps_mutex);
1886
97af61f5 1887 if (!is_edp(intel_dp))
adddaaf4 1888 return false;
bd943159 1889
2c623c11 1890 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1891 intel_dp->want_panel_vdd = true;
99ea7127 1892
4be73780 1893 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1894 return need_to_disable;
b0665d57 1895
25f78f58 1896 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 1897 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1898
3936fcf4
VS
1899 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1900 port_name(intel_dig_port->port));
bd943159 1901
4be73780
DV
1902 if (!edp_have_panel_power(intel_dp))
1903 wait_panel_power_cycle(intel_dp);
99ea7127 1904
453c5420 1905 pp = ironlake_get_pp_control(intel_dp);
5d613501 1906 pp |= EDP_FORCE_VDD;
ebf33b18 1907
bf13e81b
JN
1908 pp_stat_reg = _pp_stat_reg(intel_dp);
1909 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1910
1911 I915_WRITE(pp_ctrl_reg, pp);
1912 POSTING_READ(pp_ctrl_reg);
1913 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1914 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1915 /*
1916 * If the panel wasn't on, delay before accessing aux channel
1917 */
4be73780 1918 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1919 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1920 port_name(intel_dig_port->port));
f01eca2e 1921 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1922 }
adddaaf4
JN
1923
1924 return need_to_disable;
1925}
1926
951468f3
VS
1927/*
1928 * Must be paired with intel_edp_panel_vdd_off() or
1929 * intel_edp_panel_off().
1930 * Nested calls to these functions are not allowed since
1931 * we drop the lock. Caller must use some higher level
1932 * locking to prevent nested calls from other threads.
1933 */
b80d6c78 1934void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1935{
c695b6b6 1936 bool vdd;
adddaaf4 1937
c695b6b6
VS
1938 if (!is_edp(intel_dp))
1939 return;
1940
773538e8 1941 pps_lock(intel_dp);
c695b6b6 1942 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1943 pps_unlock(intel_dp);
c695b6b6 1944
e2c719b7 1945 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1946 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1947}
1948
4be73780 1949static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1950{
30add22d 1951 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1952 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1953 struct intel_digital_port *intel_dig_port =
1954 dp_to_dig_port(intel_dp);
1955 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1956 enum intel_display_power_domain power_domain;
5d613501 1957 u32 pp;
f0f59a00 1958 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 1959
e39b999a 1960 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1961
15e899a0 1962 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1963
15e899a0 1964 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1965 return;
b0665d57 1966
3936fcf4
VS
1967 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1968 port_name(intel_dig_port->port));
bd943159 1969
be2c9196
VS
1970 pp = ironlake_get_pp_control(intel_dp);
1971 pp &= ~EDP_FORCE_VDD;
453c5420 1972
be2c9196
VS
1973 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1974 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1975
be2c9196
VS
1976 I915_WRITE(pp_ctrl_reg, pp);
1977 POSTING_READ(pp_ctrl_reg);
90791a5c 1978
be2c9196
VS
1979 /* Make sure sequencer is idle before allowing subsequent activity */
1980 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1981 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1982
be2c9196 1983 if ((pp & POWER_TARGET_ON) == 0)
d28d4731 1984 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 1985
25f78f58 1986 power_domain = intel_display_port_aux_power_domain(intel_encoder);
be2c9196 1987 intel_display_power_put(dev_priv, power_domain);
bd943159 1988}
5d613501 1989
4be73780 1990static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1991{
1992 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1993 struct intel_dp, panel_vdd_work);
bd943159 1994
773538e8 1995 pps_lock(intel_dp);
15e899a0
VS
1996 if (!intel_dp->want_panel_vdd)
1997 edp_panel_vdd_off_sync(intel_dp);
773538e8 1998 pps_unlock(intel_dp);
bd943159
KP
1999}
2000
aba86890
ID
2001static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2002{
2003 unsigned long delay;
2004
2005 /*
2006 * Queue the timer to fire a long time from now (relative to the power
2007 * down delay) to keep the panel power up across a sequence of
2008 * operations.
2009 */
2010 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2011 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2012}
2013
951468f3
VS
2014/*
2015 * Must be paired with edp_panel_vdd_on().
2016 * Must hold pps_mutex around the whole on/off sequence.
2017 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2018 */
4be73780 2019static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2020{
e39b999a
VS
2021 struct drm_i915_private *dev_priv =
2022 intel_dp_to_dev(intel_dp)->dev_private;
2023
2024 lockdep_assert_held(&dev_priv->pps_mutex);
2025
97af61f5
KP
2026 if (!is_edp(intel_dp))
2027 return;
5d613501 2028
e2c719b7 2029 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 2030 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 2031
bd943159
KP
2032 intel_dp->want_panel_vdd = false;
2033
aba86890 2034 if (sync)
4be73780 2035 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2036 else
2037 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2038}
2039
9f0fb5be 2040static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2041{
30add22d 2042 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2043 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 2044 u32 pp;
f0f59a00 2045 i915_reg_t pp_ctrl_reg;
9934c132 2046
9f0fb5be
VS
2047 lockdep_assert_held(&dev_priv->pps_mutex);
2048
97af61f5 2049 if (!is_edp(intel_dp))
bd943159 2050 return;
99ea7127 2051
3936fcf4
VS
2052 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2053 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 2054
e7a89ace
VS
2055 if (WARN(edp_have_panel_power(intel_dp),
2056 "eDP port %c panel power already on\n",
2057 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 2058 return;
9934c132 2059
4be73780 2060 wait_panel_power_cycle(intel_dp);
37c6c9b0 2061
bf13e81b 2062 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2063 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
2064 if (IS_GEN5(dev)) {
2065 /* ILK workaround: disable reset around power sequence */
2066 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2067 I915_WRITE(pp_ctrl_reg, pp);
2068 POSTING_READ(pp_ctrl_reg);
05ce1a49 2069 }
37c6c9b0 2070
1c0ae80a 2071 pp |= POWER_TARGET_ON;
99ea7127
KP
2072 if (!IS_GEN5(dev))
2073 pp |= PANEL_POWER_RESET;
2074
453c5420
JB
2075 I915_WRITE(pp_ctrl_reg, pp);
2076 POSTING_READ(pp_ctrl_reg);
9934c132 2077
4be73780 2078 wait_panel_on(intel_dp);
dce56b3c 2079 intel_dp->last_power_on = jiffies;
9934c132 2080
05ce1a49
KP
2081 if (IS_GEN5(dev)) {
2082 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2083 I915_WRITE(pp_ctrl_reg, pp);
2084 POSTING_READ(pp_ctrl_reg);
05ce1a49 2085 }
9f0fb5be 2086}
e39b999a 2087
9f0fb5be
VS
2088void intel_edp_panel_on(struct intel_dp *intel_dp)
2089{
2090 if (!is_edp(intel_dp))
2091 return;
2092
2093 pps_lock(intel_dp);
2094 edp_panel_on(intel_dp);
773538e8 2095 pps_unlock(intel_dp);
9934c132
JB
2096}
2097
9f0fb5be
VS
2098
2099static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2100{
4e6e1a54
ID
2101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2102 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2103 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2104 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2105 enum intel_display_power_domain power_domain;
99ea7127 2106 u32 pp;
f0f59a00 2107 i915_reg_t pp_ctrl_reg;
9934c132 2108
9f0fb5be
VS
2109 lockdep_assert_held(&dev_priv->pps_mutex);
2110
97af61f5
KP
2111 if (!is_edp(intel_dp))
2112 return;
37c6c9b0 2113
3936fcf4
VS
2114 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2115 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2116
3936fcf4
VS
2117 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2118 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2119
453c5420 2120 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2121 /* We need to switch off panel power _and_ force vdd, for otherwise some
2122 * panels get very unhappy and cease to work. */
b3064154
PJ
2123 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2124 EDP_BLC_ENABLE);
453c5420 2125
bf13e81b 2126 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2127
849e39f5
PZ
2128 intel_dp->want_panel_vdd = false;
2129
453c5420
JB
2130 I915_WRITE(pp_ctrl_reg, pp);
2131 POSTING_READ(pp_ctrl_reg);
9934c132 2132
d28d4731 2133 intel_dp->panel_power_off_time = ktime_get_boottime();
4be73780 2134 wait_panel_off(intel_dp);
849e39f5
PZ
2135
2136 /* We got a reference when we enabled the VDD. */
25f78f58 2137 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 2138 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2139}
e39b999a 2140
9f0fb5be
VS
2141void intel_edp_panel_off(struct intel_dp *intel_dp)
2142{
2143 if (!is_edp(intel_dp))
2144 return;
e39b999a 2145
9f0fb5be
VS
2146 pps_lock(intel_dp);
2147 edp_panel_off(intel_dp);
773538e8 2148 pps_unlock(intel_dp);
9934c132
JB
2149}
2150
1250d107
JN
2151/* Enable backlight in the panel power control. */
2152static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2153{
da63a9f2
PZ
2154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2155 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2156 struct drm_i915_private *dev_priv = dev->dev_private;
2157 u32 pp;
f0f59a00 2158 i915_reg_t pp_ctrl_reg;
32f9d658 2159
01cb9ea6
JB
2160 /*
2161 * If we enable the backlight right away following a panel power
2162 * on, we may see slight flicker as the panel syncs with the eDP
2163 * link. So delay a bit to make sure the image is solid before
2164 * allowing it to appear.
2165 */
4be73780 2166 wait_backlight_on(intel_dp);
e39b999a 2167
773538e8 2168 pps_lock(intel_dp);
e39b999a 2169
453c5420 2170 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2171 pp |= EDP_BLC_ENABLE;
453c5420 2172
bf13e81b 2173 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2174
2175 I915_WRITE(pp_ctrl_reg, pp);
2176 POSTING_READ(pp_ctrl_reg);
e39b999a 2177
773538e8 2178 pps_unlock(intel_dp);
32f9d658
ZW
2179}
2180
1250d107
JN
2181/* Enable backlight PWM and backlight PP control. */
2182void intel_edp_backlight_on(struct intel_dp *intel_dp)
2183{
2184 if (!is_edp(intel_dp))
2185 return;
2186
2187 DRM_DEBUG_KMS("\n");
2188
2189 intel_panel_enable_backlight(intel_dp->attached_connector);
2190 _intel_edp_backlight_on(intel_dp);
2191}
2192
2193/* Disable backlight in the panel power control. */
2194static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2195{
30add22d 2196 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2197 struct drm_i915_private *dev_priv = dev->dev_private;
2198 u32 pp;
f0f59a00 2199 i915_reg_t pp_ctrl_reg;
32f9d658 2200
f01eca2e
KP
2201 if (!is_edp(intel_dp))
2202 return;
2203
773538e8 2204 pps_lock(intel_dp);
e39b999a 2205
453c5420 2206 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2207 pp &= ~EDP_BLC_ENABLE;
453c5420 2208
bf13e81b 2209 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2210
2211 I915_WRITE(pp_ctrl_reg, pp);
2212 POSTING_READ(pp_ctrl_reg);
f7d2323c 2213
773538e8 2214 pps_unlock(intel_dp);
e39b999a
VS
2215
2216 intel_dp->last_backlight_off = jiffies;
f7d2323c 2217 edp_wait_backlight_off(intel_dp);
1250d107 2218}
f7d2323c 2219
1250d107
JN
2220/* Disable backlight PP control and backlight PWM. */
2221void intel_edp_backlight_off(struct intel_dp *intel_dp)
2222{
2223 if (!is_edp(intel_dp))
2224 return;
2225
2226 DRM_DEBUG_KMS("\n");
f7d2323c 2227
1250d107 2228 _intel_edp_backlight_off(intel_dp);
f7d2323c 2229 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2230}
a4fc5ed6 2231
73580fb7
JN
2232/*
2233 * Hook for controlling the panel power control backlight through the bl_power
2234 * sysfs attribute. Take care to handle multiple calls.
2235 */
2236static void intel_edp_backlight_power(struct intel_connector *connector,
2237 bool enable)
2238{
2239 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2240 bool is_enabled;
2241
773538e8 2242 pps_lock(intel_dp);
e39b999a 2243 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2244 pps_unlock(intel_dp);
73580fb7
JN
2245
2246 if (is_enabled == enable)
2247 return;
2248
23ba9373
JN
2249 DRM_DEBUG_KMS("panel power control backlight %s\n",
2250 enable ? "enable" : "disable");
73580fb7
JN
2251
2252 if (enable)
2253 _intel_edp_backlight_on(intel_dp);
2254 else
2255 _intel_edp_backlight_off(intel_dp);
2256}
2257
64e1077a
VS
2258static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2259{
2260 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2261 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2262 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2263
2264 I915_STATE_WARN(cur_state != state,
2265 "DP port %c state assertion failure (expected %s, current %s)\n",
2266 port_name(dig_port->port),
87ad3212 2267 onoff(state), onoff(cur_state));
64e1077a
VS
2268}
2269#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2270
2271static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2272{
2273 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2274
2275 I915_STATE_WARN(cur_state != state,
2276 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 2277 onoff(state), onoff(cur_state));
64e1077a
VS
2278}
2279#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2280#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2281
2bd2ad64 2282static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2283{
da63a9f2 2284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2285 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2286 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2287
64e1077a
VS
2288 assert_pipe_disabled(dev_priv, crtc->pipe);
2289 assert_dp_port_disabled(intel_dp);
2290 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2291
abfce949
VS
2292 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2293 crtc->config->port_clock);
2294
2295 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2296
2297 if (crtc->config->port_clock == 162000)
2298 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2299 else
2300 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2301
2302 I915_WRITE(DP_A, intel_dp->DP);
2303 POSTING_READ(DP_A);
2304 udelay(500);
2305
0767935e 2306 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2307
0767935e 2308 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2309 POSTING_READ(DP_A);
2310 udelay(200);
d240f20f
JB
2311}
2312
2bd2ad64 2313static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2314{
da63a9f2 2315 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2316 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2317 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2318
64e1077a
VS
2319 assert_pipe_disabled(dev_priv, crtc->pipe);
2320 assert_dp_port_disabled(intel_dp);
2321 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2322
abfce949
VS
2323 DRM_DEBUG_KMS("disabling eDP PLL\n");
2324
6fec7662 2325 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2326
6fec7662 2327 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2328 POSTING_READ(DP_A);
d240f20f
JB
2329 udelay(200);
2330}
2331
c7ad3810 2332/* If the sink supports it, try to set the power state appropriately */
c19b0669 2333void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2334{
2335 int ret, i;
2336
2337 /* Should have a valid DPCD by this point */
2338 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2339 return;
2340
2341 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2342 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2343 DP_SET_POWER_D3);
c7ad3810
JB
2344 } else {
2345 /*
2346 * When turning on, we need to retry for 1ms to give the sink
2347 * time to wake up.
2348 */
2349 for (i = 0; i < 3; i++) {
9d1a1031
JN
2350 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2351 DP_SET_POWER_D0);
c7ad3810
JB
2352 if (ret == 1)
2353 break;
2354 msleep(1);
2355 }
2356 }
f9cac721
JN
2357
2358 if (ret != 1)
2359 DRM_DEBUG_KMS("failed to %s sink power state\n",
2360 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2361}
2362
19d8fe15
DV
2363static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2364 enum pipe *pipe)
d240f20f 2365{
19d8fe15 2366 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2367 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2368 struct drm_device *dev = encoder->base.dev;
2369 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2370 enum intel_display_power_domain power_domain;
2371 u32 tmp;
6fa9a5ec 2372 bool ret;
6d129bea
ID
2373
2374 power_domain = intel_display_port_power_domain(encoder);
6fa9a5ec 2375 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
6d129bea
ID
2376 return false;
2377
6fa9a5ec
ID
2378 ret = false;
2379
6d129bea 2380 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2381
2382 if (!(tmp & DP_PORT_EN))
6fa9a5ec 2383 goto out;
19d8fe15 2384
39e5fa88 2385 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2386 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2387 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2388 enum pipe p;
19d8fe15 2389
adc289d7
VS
2390 for_each_pipe(dev_priv, p) {
2391 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2392 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2393 *pipe = p;
6fa9a5ec
ID
2394 ret = true;
2395
2396 goto out;
19d8fe15
DV
2397 }
2398 }
19d8fe15 2399
4a0833ec 2400 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
f0f59a00 2401 i915_mmio_reg_offset(intel_dp->output_reg));
39e5fa88
VS
2402 } else if (IS_CHERRYVIEW(dev)) {
2403 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2404 } else {
2405 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2406 }
d240f20f 2407
6fa9a5ec
ID
2408 ret = true;
2409
2410out:
2411 intel_display_power_put(dev_priv, power_domain);
2412
2413 return ret;
19d8fe15 2414}
d240f20f 2415
045ac3b5 2416static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2417 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2418{
2419 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2420 u32 tmp, flags = 0;
63000ef6
XZ
2421 struct drm_device *dev = encoder->base.dev;
2422 struct drm_i915_private *dev_priv = dev->dev_private;
2423 enum port port = dp_to_dig_port(intel_dp)->port;
2424 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
045ac3b5 2425
9ed109a7 2426 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2427
2428 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2429
39e5fa88 2430 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2431 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2432
2433 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2434 flags |= DRM_MODE_FLAG_PHSYNC;
2435 else
2436 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2437
b81e34c2 2438 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2439 flags |= DRM_MODE_FLAG_PVSYNC;
2440 else
2441 flags |= DRM_MODE_FLAG_NVSYNC;
2442 } else {
39e5fa88 2443 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2444 flags |= DRM_MODE_FLAG_PHSYNC;
2445 else
2446 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2447
39e5fa88 2448 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2449 flags |= DRM_MODE_FLAG_PVSYNC;
2450 else
2451 flags |= DRM_MODE_FLAG_NVSYNC;
2452 }
045ac3b5 2453
2d112de7 2454 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2455
8c875fca 2456 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 2457 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
2458 pipe_config->limited_color_range = true;
2459
eb14cb74
VS
2460 pipe_config->has_dp_encoder = true;
2461
90a6b7b0
VS
2462 pipe_config->lane_count =
2463 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2464
eb14cb74
VS
2465 intel_dp_get_m_n(crtc, pipe_config);
2466
18442d08 2467 if (port == PORT_A) {
b377e0df 2468 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2469 pipe_config->port_clock = 162000;
2470 else
2471 pipe_config->port_clock = 270000;
2472 }
18442d08 2473
e3b247da
VS
2474 pipe_config->base.adjusted_mode.crtc_clock =
2475 intel_dotclock_calculate(pipe_config->port_clock,
2476 &pipe_config->dp_m_n);
7f16e5c1 2477
c6cd2ee2
JN
2478 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2479 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2480 /*
2481 * This is a big fat ugly hack.
2482 *
2483 * Some machines in UEFI boot mode provide us a VBT that has 18
2484 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2485 * unknown we fail to light up. Yet the same BIOS boots up with
2486 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2487 * max, not what it tells us to use.
2488 *
2489 * Note: This will still be broken if the eDP panel is not lit
2490 * up by the BIOS, and thus we can't get the mode at module
2491 * load.
2492 */
2493 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2494 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2495 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2496 }
045ac3b5
JB
2497}
2498
e8cb4558 2499static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2500{
e8cb4558 2501 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2502 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2503 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2504
6e3c9717 2505 if (crtc->config->has_audio)
495a5bb8 2506 intel_audio_codec_disable(encoder);
6cb49835 2507
b32c6f48
RV
2508 if (HAS_PSR(dev) && !HAS_DDI(dev))
2509 intel_psr_disable(intel_dp);
2510
6cb49835
DV
2511 /* Make sure the panel is off before trying to change the mode. But also
2512 * ensure that we have vdd while we switch off the panel. */
24f3e092 2513 intel_edp_panel_vdd_on(intel_dp);
4be73780 2514 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2515 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2516 intel_edp_panel_off(intel_dp);
3739850b 2517
08aff3fe
VS
2518 /* disable the port before the pipe on g4x */
2519 if (INTEL_INFO(dev)->gen < 5)
3739850b 2520 intel_dp_link_down(intel_dp);
d240f20f
JB
2521}
2522
08aff3fe 2523static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2524{
2bd2ad64 2525 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2526 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2527
49277c31 2528 intel_dp_link_down(intel_dp);
abfce949
VS
2529
2530 /* Only ilk+ has port A */
08aff3fe
VS
2531 if (port == PORT_A)
2532 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2533}
2534
2535static void vlv_post_disable_dp(struct intel_encoder *encoder)
2536{
2537 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2538
2539 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2540}
2541
a8f327fb
VS
2542static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2543 bool reset)
580d3811 2544{
a8f327fb
VS
2545 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2546 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2547 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2548 enum pipe pipe = crtc->pipe;
2549 uint32_t val;
580d3811 2550
a8f327fb
VS
2551 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2552 if (reset)
2553 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2554 else
2555 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2556 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2557
a8f327fb
VS
2558 if (crtc->config->lane_count > 2) {
2559 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2560 if (reset)
2561 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2562 else
2563 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2564 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2565 }
580d3811 2566
97fd4d5c 2567 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2568 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2569 if (reset)
2570 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2571 else
2572 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2573 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2574
a8f327fb 2575 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2576 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2577 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2578 if (reset)
2579 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2580 else
2581 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2582 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2583 }
a8f327fb 2584}
97fd4d5c 2585
a8f327fb
VS
2586static void chv_post_disable_dp(struct intel_encoder *encoder)
2587{
2588 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2589 struct drm_device *dev = encoder->base.dev;
2590 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2591
a8f327fb
VS
2592 intel_dp_link_down(intel_dp);
2593
2594 mutex_lock(&dev_priv->sb_lock);
2595
2596 /* Assert data lane reset */
2597 chv_data_lane_soft_reset(encoder, true);
580d3811 2598
a580516d 2599 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2600}
2601
7b13b58a
VS
2602static void
2603_intel_dp_set_link_train(struct intel_dp *intel_dp,
2604 uint32_t *DP,
2605 uint8_t dp_train_pat)
2606{
2607 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2608 struct drm_device *dev = intel_dig_port->base.base.dev;
2609 struct drm_i915_private *dev_priv = dev->dev_private;
2610 enum port port = intel_dig_port->port;
2611
2612 if (HAS_DDI(dev)) {
2613 uint32_t temp = I915_READ(DP_TP_CTL(port));
2614
2615 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2616 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2617 else
2618 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2619
2620 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2621 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2622 case DP_TRAINING_PATTERN_DISABLE:
2623 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2624
2625 break;
2626 case DP_TRAINING_PATTERN_1:
2627 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2628 break;
2629 case DP_TRAINING_PATTERN_2:
2630 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2631 break;
2632 case DP_TRAINING_PATTERN_3:
2633 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2634 break;
2635 }
2636 I915_WRITE(DP_TP_CTL(port), temp);
2637
39e5fa88
VS
2638 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2639 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2640 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2641
2642 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2643 case DP_TRAINING_PATTERN_DISABLE:
2644 *DP |= DP_LINK_TRAIN_OFF_CPT;
2645 break;
2646 case DP_TRAINING_PATTERN_1:
2647 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2648 break;
2649 case DP_TRAINING_PATTERN_2:
2650 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2651 break;
2652 case DP_TRAINING_PATTERN_3:
2653 DRM_ERROR("DP training pattern 3 not supported\n");
2654 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2655 break;
2656 }
2657
2658 } else {
2659 if (IS_CHERRYVIEW(dev))
2660 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2661 else
2662 *DP &= ~DP_LINK_TRAIN_MASK;
2663
2664 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2665 case DP_TRAINING_PATTERN_DISABLE:
2666 *DP |= DP_LINK_TRAIN_OFF;
2667 break;
2668 case DP_TRAINING_PATTERN_1:
2669 *DP |= DP_LINK_TRAIN_PAT_1;
2670 break;
2671 case DP_TRAINING_PATTERN_2:
2672 *DP |= DP_LINK_TRAIN_PAT_2;
2673 break;
2674 case DP_TRAINING_PATTERN_3:
2675 if (IS_CHERRYVIEW(dev)) {
2676 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2677 } else {
2678 DRM_ERROR("DP training pattern 3 not supported\n");
2679 *DP |= DP_LINK_TRAIN_PAT_2;
2680 }
2681 break;
2682 }
2683 }
2684}
2685
2686static void intel_dp_enable_port(struct intel_dp *intel_dp)
2687{
2688 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2689 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2690 struct intel_crtc *crtc =
2691 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2692
7b13b58a
VS
2693 /* enable with pattern 1 (as per spec) */
2694 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2695 DP_TRAINING_PATTERN_1);
2696
2697 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2698 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2699
2700 /*
2701 * Magic for VLV/CHV. We _must_ first set up the register
2702 * without actually enabling the port, and then do another
2703 * write to enable the port. Otherwise link training will
2704 * fail when the power sequencer is freshly used for this port.
2705 */
2706 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2707 if (crtc->config->has_audio)
2708 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2709
2710 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2711 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2712}
2713
e8cb4558 2714static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2715{
e8cb4558
DV
2716 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2717 struct drm_device *dev = encoder->base.dev;
2718 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2719 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2720 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2721 enum port port = dp_to_dig_port(intel_dp)->port;
2722 enum pipe pipe = crtc->pipe;
5d613501 2723
0c33d8d7
DV
2724 if (WARN_ON(dp_reg & DP_PORT_EN))
2725 return;
5d613501 2726
093e3f13
VS
2727 pps_lock(intel_dp);
2728
666a4537 2729 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
093e3f13
VS
2730 vlv_init_panel_power_sequencer(intel_dp);
2731
7864578a
VS
2732 /*
2733 * We get an occasional spurious underrun between the port
2734 * enable and vdd enable, when enabling port A eDP.
2735 *
2736 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2737 */
2738 if (port == PORT_A)
2739 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2740
7b13b58a 2741 intel_dp_enable_port(intel_dp);
093e3f13 2742
d6fbdd15
VS
2743 if (port == PORT_A && IS_GEN5(dev_priv)) {
2744 /*
2745 * Underrun reporting for the other pipe was disabled in
2746 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2747 * enabled, so it's now safe to re-enable underrun reporting.
2748 */
2749 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2750 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2751 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2752 }
2753
093e3f13
VS
2754 edp_panel_vdd_on(intel_dp);
2755 edp_panel_on(intel_dp);
2756 edp_panel_vdd_off(intel_dp, true);
2757
7864578a
VS
2758 if (port == PORT_A)
2759 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2760
093e3f13
VS
2761 pps_unlock(intel_dp);
2762
666a4537 2763 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e0fce78f
VS
2764 unsigned int lane_mask = 0x0;
2765
2766 if (IS_CHERRYVIEW(dev))
2767 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2768
9b6de0a1
VS
2769 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2770 lane_mask);
e0fce78f 2771 }
61234fa5 2772
f01eca2e 2773 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2774 intel_dp_start_link_train(intel_dp);
3ab9c637 2775 intel_dp_stop_link_train(intel_dp);
c1dec79a 2776
6e3c9717 2777 if (crtc->config->has_audio) {
c1dec79a 2778 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2779 pipe_name(pipe));
c1dec79a
JN
2780 intel_audio_codec_enable(encoder);
2781 }
ab1f90f9 2782}
89b667f8 2783
ecff4f3b
JN
2784static void g4x_enable_dp(struct intel_encoder *encoder)
2785{
828f5c6e
JN
2786 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2787
ecff4f3b 2788 intel_enable_dp(encoder);
4be73780 2789 intel_edp_backlight_on(intel_dp);
ab1f90f9 2790}
89b667f8 2791
ab1f90f9
JN
2792static void vlv_enable_dp(struct intel_encoder *encoder)
2793{
828f5c6e
JN
2794 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2795
4be73780 2796 intel_edp_backlight_on(intel_dp);
b32c6f48 2797 intel_psr_enable(intel_dp);
d240f20f
JB
2798}
2799
ecff4f3b 2800static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2801{
d6fbdd15 2802 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2803 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2804 enum port port = dp_to_dig_port(intel_dp)->port;
2805 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2806
8ac33ed3
DV
2807 intel_dp_prepare(encoder);
2808
d6fbdd15
VS
2809 if (port == PORT_A && IS_GEN5(dev_priv)) {
2810 /*
2811 * We get FIFO underruns on the other pipe when
2812 * enabling the CPU eDP PLL, and when enabling CPU
2813 * eDP port. We could potentially avoid the PLL
2814 * underrun with a vblank wait just prior to enabling
2815 * the PLL, but that doesn't appear to help the port
2816 * enable case. Just sweep it all under the rug.
2817 */
2818 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2819 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2820 }
2821
d41f1efb 2822 /* Only ilk+ has port A */
abfce949 2823 if (port == PORT_A)
ab1f90f9
JN
2824 ironlake_edp_pll_on(intel_dp);
2825}
2826
83b84597
VS
2827static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2828{
2829 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2830 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2831 enum pipe pipe = intel_dp->pps_pipe;
f0f59a00 2832 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
83b84597
VS
2833
2834 edp_panel_vdd_off_sync(intel_dp);
2835
2836 /*
2837 * VLV seems to get confused when multiple power seqeuencers
2838 * have the same port selected (even if only one has power/vdd
2839 * enabled). The failure manifests as vlv_wait_port_ready() failing
2840 * CHV on the other hand doesn't seem to mind having the same port
2841 * selected in multiple power seqeuencers, but let's clear the
2842 * port select always when logically disconnecting a power sequencer
2843 * from a port.
2844 */
2845 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2846 pipe_name(pipe), port_name(intel_dig_port->port));
2847 I915_WRITE(pp_on_reg, 0);
2848 POSTING_READ(pp_on_reg);
2849
2850 intel_dp->pps_pipe = INVALID_PIPE;
2851}
2852
a4a5d2f8
VS
2853static void vlv_steal_power_sequencer(struct drm_device *dev,
2854 enum pipe pipe)
2855{
2856 struct drm_i915_private *dev_priv = dev->dev_private;
2857 struct intel_encoder *encoder;
2858
2859 lockdep_assert_held(&dev_priv->pps_mutex);
2860
ac3c12e4
VS
2861 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2862 return;
2863
19c8054c 2864 for_each_intel_encoder(dev, encoder) {
a4a5d2f8 2865 struct intel_dp *intel_dp;
773538e8 2866 enum port port;
a4a5d2f8
VS
2867
2868 if (encoder->type != INTEL_OUTPUT_EDP)
2869 continue;
2870
2871 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2872 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2873
2874 if (intel_dp->pps_pipe != pipe)
2875 continue;
2876
2877 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2878 pipe_name(pipe), port_name(port));
a4a5d2f8 2879
e02f9a06 2880 WARN(encoder->base.crtc,
034e43c6
VS
2881 "stealing pipe %c power sequencer from active eDP port %c\n",
2882 pipe_name(pipe), port_name(port));
a4a5d2f8 2883
a4a5d2f8 2884 /* make sure vdd is off before we steal it */
83b84597 2885 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2886 }
2887}
2888
2889static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2890{
2891 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2892 struct intel_encoder *encoder = &intel_dig_port->base;
2893 struct drm_device *dev = encoder->base.dev;
2894 struct drm_i915_private *dev_priv = dev->dev_private;
2895 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2896
2897 lockdep_assert_held(&dev_priv->pps_mutex);
2898
093e3f13
VS
2899 if (!is_edp(intel_dp))
2900 return;
2901
a4a5d2f8
VS
2902 if (intel_dp->pps_pipe == crtc->pipe)
2903 return;
2904
2905 /*
2906 * If another power sequencer was being used on this
2907 * port previously make sure to turn off vdd there while
2908 * we still have control of it.
2909 */
2910 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2911 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2912
2913 /*
2914 * We may be stealing the power
2915 * sequencer from another port.
2916 */
2917 vlv_steal_power_sequencer(dev, crtc->pipe);
2918
2919 /* now it's all ours */
2920 intel_dp->pps_pipe = crtc->pipe;
2921
2922 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2923 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2924
2925 /* init power sequencer on this pipe and port */
36b5f425
VS
2926 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2927 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2928}
2929
ab1f90f9 2930static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2931{
2bd2ad64 2932 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2933 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2934 struct drm_device *dev = encoder->base.dev;
89b667f8 2935 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2936 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2937 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2938 int pipe = intel_crtc->pipe;
2939 u32 val;
a4fc5ed6 2940
a580516d 2941 mutex_lock(&dev_priv->sb_lock);
89b667f8 2942
ab3c759a 2943 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2944 val = 0;
2945 if (pipe)
2946 val |= (1<<21);
2947 else
2948 val &= ~(1<<21);
2949 val |= 0x001000c4;
ab3c759a
CML
2950 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2951 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2952 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2953
a580516d 2954 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2955
2956 intel_enable_dp(encoder);
89b667f8
JB
2957}
2958
ecff4f3b 2959static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2960{
2961 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2962 struct drm_device *dev = encoder->base.dev;
2963 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2964 struct intel_crtc *intel_crtc =
2965 to_intel_crtc(encoder->base.crtc);
e4607fcf 2966 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2967 int pipe = intel_crtc->pipe;
89b667f8 2968
8ac33ed3
DV
2969 intel_dp_prepare(encoder);
2970
89b667f8 2971 /* Program Tx lane resets to default */
a580516d 2972 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2973 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2974 DPIO_PCS_TX_LANE2_RESET |
2975 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2976 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2977 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2978 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2979 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2980 DPIO_PCS_CLK_SOFT_RESET);
2981
2982 /* Fix up inter-pair skew failure */
ab3c759a
CML
2983 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2984 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2985 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2986 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2987}
2988
e4a1d846
CML
2989static void chv_pre_enable_dp(struct intel_encoder *encoder)
2990{
2991 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2992 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2993 struct drm_device *dev = encoder->base.dev;
2994 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2995 struct intel_crtc *intel_crtc =
2996 to_intel_crtc(encoder->base.crtc);
2997 enum dpio_channel ch = vlv_dport_to_channel(dport);
2998 int pipe = intel_crtc->pipe;
2e523e98 2999 int data, i, stagger;
949c1d43 3000 u32 val;
e4a1d846 3001
a580516d 3002 mutex_lock(&dev_priv->sb_lock);
949c1d43 3003
570e2a74
VS
3004 /* allow hardware to manage TX FIFO reset source */
3005 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3006 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3007 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3008
e0fce78f
VS
3009 if (intel_crtc->config->lane_count > 2) {
3010 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3011 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3012 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3013 }
570e2a74 3014
949c1d43 3015 /* Program Tx lane latency optimal setting*/
e0fce78f 3016 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 3017 /* Set the upar bit */
e0fce78f
VS
3018 if (intel_crtc->config->lane_count == 1)
3019 data = 0x0;
3020 else
3021 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
3022 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3023 data << DPIO_UPAR_SHIFT);
3024 }
3025
3026 /* Data lane stagger programming */
2e523e98
VS
3027 if (intel_crtc->config->port_clock > 270000)
3028 stagger = 0x18;
3029 else if (intel_crtc->config->port_clock > 135000)
3030 stagger = 0xd;
3031 else if (intel_crtc->config->port_clock > 67500)
3032 stagger = 0x7;
3033 else if (intel_crtc->config->port_clock > 33750)
3034 stagger = 0x4;
3035 else
3036 stagger = 0x2;
3037
3038 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3039 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3040 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3041
e0fce78f
VS
3042 if (intel_crtc->config->lane_count > 2) {
3043 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3044 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3045 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3046 }
2e523e98
VS
3047
3048 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3049 DPIO_LANESTAGGER_STRAP(stagger) |
3050 DPIO_LANESTAGGER_STRAP_OVRD |
3051 DPIO_TX1_STAGGER_MASK(0x1f) |
3052 DPIO_TX1_STAGGER_MULT(6) |
3053 DPIO_TX2_STAGGER_MULT(0));
3054
e0fce78f
VS
3055 if (intel_crtc->config->lane_count > 2) {
3056 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3057 DPIO_LANESTAGGER_STRAP(stagger) |
3058 DPIO_LANESTAGGER_STRAP_OVRD |
3059 DPIO_TX1_STAGGER_MASK(0x1f) |
3060 DPIO_TX1_STAGGER_MULT(7) |
3061 DPIO_TX2_STAGGER_MULT(5));
3062 }
e4a1d846 3063
a8f327fb
VS
3064 /* Deassert data lane reset */
3065 chv_data_lane_soft_reset(encoder, false);
3066
a580516d 3067 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 3068
e4a1d846 3069 intel_enable_dp(encoder);
b0b33846
VS
3070
3071 /* Second common lane will stay alive on its own now */
3072 if (dport->release_cl2_override) {
3073 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3074 dport->release_cl2_override = false;
3075 }
e4a1d846
CML
3076}
3077
9197c88b
VS
3078static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3079{
3080 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3081 struct drm_device *dev = encoder->base.dev;
3082 struct drm_i915_private *dev_priv = dev->dev_private;
3083 struct intel_crtc *intel_crtc =
3084 to_intel_crtc(encoder->base.crtc);
3085 enum dpio_channel ch = vlv_dport_to_channel(dport);
3086 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
3087 unsigned int lane_mask =
3088 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
3089 u32 val;
3090
625695f8
VS
3091 intel_dp_prepare(encoder);
3092
b0b33846
VS
3093 /*
3094 * Must trick the second common lane into life.
3095 * Otherwise we can't even access the PLL.
3096 */
3097 if (ch == DPIO_CH0 && pipe == PIPE_B)
3098 dport->release_cl2_override =
3099 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3100
e0fce78f
VS
3101 chv_phy_powergate_lanes(encoder, true, lane_mask);
3102
a580516d 3103 mutex_lock(&dev_priv->sb_lock);
9197c88b 3104
a8f327fb
VS
3105 /* Assert data lane reset */
3106 chv_data_lane_soft_reset(encoder, true);
3107
b9e5ac3c
VS
3108 /* program left/right clock distribution */
3109 if (pipe != PIPE_B) {
3110 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3111 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3112 if (ch == DPIO_CH0)
3113 val |= CHV_BUFLEFTENA1_FORCE;
3114 if (ch == DPIO_CH1)
3115 val |= CHV_BUFRIGHTENA1_FORCE;
3116 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3117 } else {
3118 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3119 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3120 if (ch == DPIO_CH0)
3121 val |= CHV_BUFLEFTENA2_FORCE;
3122 if (ch == DPIO_CH1)
3123 val |= CHV_BUFRIGHTENA2_FORCE;
3124 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3125 }
3126
9197c88b
VS
3127 /* program clock channel usage */
3128 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3129 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3130 if (pipe != PIPE_B)
3131 val &= ~CHV_PCS_USEDCLKCHANNEL;
3132 else
3133 val |= CHV_PCS_USEDCLKCHANNEL;
3134 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3135
e0fce78f
VS
3136 if (intel_crtc->config->lane_count > 2) {
3137 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3138 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3139 if (pipe != PIPE_B)
3140 val &= ~CHV_PCS_USEDCLKCHANNEL;
3141 else
3142 val |= CHV_PCS_USEDCLKCHANNEL;
3143 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3144 }
9197c88b
VS
3145
3146 /*
3147 * This a a bit weird since generally CL
3148 * matches the pipe, but here we need to
3149 * pick the CL based on the port.
3150 */
3151 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3152 if (pipe != PIPE_B)
3153 val &= ~CHV_CMN_USEDCLKCHANNEL;
3154 else
3155 val |= CHV_CMN_USEDCLKCHANNEL;
3156 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3157
a580516d 3158 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3159}
3160
d6db995f
VS
3161static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3162{
3163 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3164 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3165 u32 val;
3166
3167 mutex_lock(&dev_priv->sb_lock);
3168
3169 /* disable left/right clock distribution */
3170 if (pipe != PIPE_B) {
3171 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3172 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3173 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3174 } else {
3175 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3176 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3177 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3178 }
3179
3180 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3181
b0b33846
VS
3182 /*
3183 * Leave the power down bit cleared for at least one
3184 * lane so that chv_powergate_phy_ch() will power
3185 * on something when the channel is otherwise unused.
3186 * When the port is off and the override is removed
3187 * the lanes power down anyway, so otherwise it doesn't
3188 * really matter what the state of power down bits is
3189 * after this.
3190 */
e0fce78f 3191 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3192}
3193
a4fc5ed6 3194/*
df0c237d
JB
3195 * Native read with retry for link status and receiver capability reads for
3196 * cases where the sink may still be asleep.
9d1a1031
JN
3197 *
3198 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3199 * supposed to retry 3 times per the spec.
a4fc5ed6 3200 */
9d1a1031
JN
3201static ssize_t
3202intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3203 void *buffer, size_t size)
a4fc5ed6 3204{
9d1a1031
JN
3205 ssize_t ret;
3206 int i;
61da5fab 3207
f6a19066
VS
3208 /*
3209 * Sometime we just get the same incorrect byte repeated
3210 * over the entire buffer. Doing just one throw away read
3211 * initially seems to "solve" it.
3212 */
3213 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3214
61da5fab 3215 for (i = 0; i < 3; i++) {
9d1a1031
JN
3216 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3217 if (ret == size)
3218 return ret;
61da5fab
JB
3219 msleep(1);
3220 }
a4fc5ed6 3221
9d1a1031 3222 return ret;
a4fc5ed6
KP
3223}
3224
3225/*
3226 * Fetch AUX CH registers 0x202 - 0x207 which contain
3227 * link status information
3228 */
94223d04 3229bool
93f62dad 3230intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3231{
9d1a1031
JN
3232 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3233 DP_LANE0_1_STATUS,
3234 link_status,
3235 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3236}
3237
1100244e 3238/* These are source-specific values. */
94223d04 3239uint8_t
1a2eb460 3240intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3241{
30add22d 3242 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3243 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3244 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3245
9314726b
VK
3246 if (IS_BROXTON(dev))
3247 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3248 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3249 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3250 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3251 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
666a4537 3252 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
bd60018a 3253 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3254 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3255 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3256 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3257 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3258 else
bd60018a 3259 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3260}
3261
94223d04 3262uint8_t
1a2eb460
KP
3263intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3264{
30add22d 3265 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3266 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3267
5a9d1f1a
DL
3268 if (INTEL_INFO(dev)->gen >= 9) {
3269 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3271 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3273 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3275 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3277 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3278 default:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3280 }
3281 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3282 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3284 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3285 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3286 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3288 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3290 default:
bd60018a 3291 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3292 }
666a4537 3293 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e2fa6fba 3294 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3295 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3296 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3298 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3300 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3302 default:
bd60018a 3303 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3304 }
bc7d38a4 3305 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3306 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3308 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3311 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3312 default:
bd60018a 3313 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3314 }
3315 } else {
3316 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3317 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3318 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3320 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3322 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3323 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3324 default:
bd60018a 3325 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3326 }
a4fc5ed6
KP
3327 }
3328}
3329
5829975c 3330static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3331{
3332 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3333 struct drm_i915_private *dev_priv = dev->dev_private;
3334 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3335 struct intel_crtc *intel_crtc =
3336 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3337 unsigned long demph_reg_value, preemph_reg_value,
3338 uniqtranscale_reg_value;
3339 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3340 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3341 int pipe = intel_crtc->pipe;
e2fa6fba
P
3342
3343 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3344 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3345 preemph_reg_value = 0x0004000;
3346 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3348 demph_reg_value = 0x2B405555;
3349 uniqtranscale_reg_value = 0x552AB83A;
3350 break;
bd60018a 3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3352 demph_reg_value = 0x2B404040;
3353 uniqtranscale_reg_value = 0x5548B83A;
3354 break;
bd60018a 3355 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3356 demph_reg_value = 0x2B245555;
3357 uniqtranscale_reg_value = 0x5560B83A;
3358 break;
bd60018a 3359 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3360 demph_reg_value = 0x2B405555;
3361 uniqtranscale_reg_value = 0x5598DA3A;
3362 break;
3363 default:
3364 return 0;
3365 }
3366 break;
bd60018a 3367 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3368 preemph_reg_value = 0x0002000;
3369 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3371 demph_reg_value = 0x2B404040;
3372 uniqtranscale_reg_value = 0x5552B83A;
3373 break;
bd60018a 3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3375 demph_reg_value = 0x2B404848;
3376 uniqtranscale_reg_value = 0x5580B83A;
3377 break;
bd60018a 3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3379 demph_reg_value = 0x2B404040;
3380 uniqtranscale_reg_value = 0x55ADDA3A;
3381 break;
3382 default:
3383 return 0;
3384 }
3385 break;
bd60018a 3386 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3387 preemph_reg_value = 0x0000000;
3388 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3390 demph_reg_value = 0x2B305555;
3391 uniqtranscale_reg_value = 0x5570B83A;
3392 break;
bd60018a 3393 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3394 demph_reg_value = 0x2B2B4040;
3395 uniqtranscale_reg_value = 0x55ADDA3A;
3396 break;
3397 default:
3398 return 0;
3399 }
3400 break;
bd60018a 3401 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3402 preemph_reg_value = 0x0006000;
3403 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3404 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3405 demph_reg_value = 0x1B405555;
3406 uniqtranscale_reg_value = 0x55ADDA3A;
3407 break;
3408 default:
3409 return 0;
3410 }
3411 break;
3412 default:
3413 return 0;
3414 }
3415
a580516d 3416 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3417 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3418 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3419 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3420 uniqtranscale_reg_value);
ab3c759a
CML
3421 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3422 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3423 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3424 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3425 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3426
3427 return 0;
3428}
3429
67fa24b4
VS
3430static bool chv_need_uniq_trans_scale(uint8_t train_set)
3431{
3432 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3433 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3434}
3435
5829975c 3436static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3437{
3438 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3439 struct drm_i915_private *dev_priv = dev->dev_private;
3440 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3441 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3442 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3443 uint8_t train_set = intel_dp->train_set[0];
3444 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3445 enum pipe pipe = intel_crtc->pipe;
3446 int i;
e4a1d846
CML
3447
3448 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3449 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3450 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3452 deemph_reg_value = 128;
3453 margin_reg_value = 52;
3454 break;
bd60018a 3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3456 deemph_reg_value = 128;
3457 margin_reg_value = 77;
3458 break;
bd60018a 3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3460 deemph_reg_value = 128;
3461 margin_reg_value = 102;
3462 break;
bd60018a 3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3464 deemph_reg_value = 128;
3465 margin_reg_value = 154;
3466 /* FIXME extra to set for 1200 */
3467 break;
3468 default:
3469 return 0;
3470 }
3471 break;
bd60018a 3472 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3473 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3475 deemph_reg_value = 85;
3476 margin_reg_value = 78;
3477 break;
bd60018a 3478 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3479 deemph_reg_value = 85;
3480 margin_reg_value = 116;
3481 break;
bd60018a 3482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3483 deemph_reg_value = 85;
3484 margin_reg_value = 154;
3485 break;
3486 default:
3487 return 0;
3488 }
3489 break;
bd60018a 3490 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3491 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3492 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3493 deemph_reg_value = 64;
3494 margin_reg_value = 104;
3495 break;
bd60018a 3496 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3497 deemph_reg_value = 64;
3498 margin_reg_value = 154;
3499 break;
3500 default:
3501 return 0;
3502 }
3503 break;
bd60018a 3504 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3505 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3506 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3507 deemph_reg_value = 43;
3508 margin_reg_value = 154;
3509 break;
3510 default:
3511 return 0;
3512 }
3513 break;
3514 default:
3515 return 0;
3516 }
3517
a580516d 3518 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3519
3520 /* Clear calc init */
1966e59e
VS
3521 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3522 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3523 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3524 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3525 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3526
e0fce78f
VS
3527 if (intel_crtc->config->lane_count > 2) {
3528 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3529 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3530 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3531 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3532 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3533 }
e4a1d846 3534
a02ef3c7
VS
3535 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3536 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3537 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3538 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3539
e0fce78f
VS
3540 if (intel_crtc->config->lane_count > 2) {
3541 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3542 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3543 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3544 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3545 }
a02ef3c7 3546
e4a1d846 3547 /* Program swing deemph */
e0fce78f 3548 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3549 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3550 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3551 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3552 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3553 }
e4a1d846
CML
3554
3555 /* Program swing margin */
e0fce78f 3556 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3557 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3558
1fb44505
VS
3559 val &= ~DPIO_SWING_MARGIN000_MASK;
3560 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3561
3562 /*
3563 * Supposedly this value shouldn't matter when unique transition
3564 * scale is disabled, but in fact it does matter. Let's just
3565 * always program the same value and hope it's OK.
3566 */
3567 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3568 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3569
f72df8db
VS
3570 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3571 }
e4a1d846 3572
67fa24b4
VS
3573 /*
3574 * The document said it needs to set bit 27 for ch0 and bit 26
3575 * for ch1. Might be a typo in the doc.
3576 * For now, for this unique transition scale selection, set bit
3577 * 27 for ch0 and ch1.
3578 */
e0fce78f 3579 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3580 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3581 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3582 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3583 else
3584 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3585 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3586 }
3587
3588 /* Start swing calculation */
1966e59e
VS
3589 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3590 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3591 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3592
e0fce78f
VS
3593 if (intel_crtc->config->lane_count > 2) {
3594 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3595 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3596 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3597 }
e4a1d846 3598
a580516d 3599 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3600
3601 return 0;
3602}
3603
a4fc5ed6 3604static uint32_t
5829975c 3605gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3606{
3cf2efb1 3607 uint32_t signal_levels = 0;
a4fc5ed6 3608
3cf2efb1 3609 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3610 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3611 default:
3612 signal_levels |= DP_VOLTAGE_0_4;
3613 break;
bd60018a 3614 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3615 signal_levels |= DP_VOLTAGE_0_6;
3616 break;
bd60018a 3617 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3618 signal_levels |= DP_VOLTAGE_0_8;
3619 break;
bd60018a 3620 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3621 signal_levels |= DP_VOLTAGE_1_2;
3622 break;
3623 }
3cf2efb1 3624 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3625 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3626 default:
3627 signal_levels |= DP_PRE_EMPHASIS_0;
3628 break;
bd60018a 3629 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3630 signal_levels |= DP_PRE_EMPHASIS_3_5;
3631 break;
bd60018a 3632 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3633 signal_levels |= DP_PRE_EMPHASIS_6;
3634 break;
bd60018a 3635 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3636 signal_levels |= DP_PRE_EMPHASIS_9_5;
3637 break;
3638 }
3639 return signal_levels;
3640}
3641
e3421a18
ZW
3642/* Gen6's DP voltage swing and pre-emphasis control */
3643static uint32_t
5829975c 3644gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3645{
3c5a62b5
YL
3646 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3647 DP_TRAIN_PRE_EMPHASIS_MASK);
3648 switch (signal_levels) {
bd60018a
SJ
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3651 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3652 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3653 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3654 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3656 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3657 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3658 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3659 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3661 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3662 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3663 default:
3c5a62b5
YL
3664 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3665 "0x%x\n", signal_levels);
3666 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3667 }
3668}
3669
1a2eb460
KP
3670/* Gen7's DP voltage swing and pre-emphasis control */
3671static uint32_t
5829975c 3672gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3673{
3674 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3675 DP_TRAIN_PRE_EMPHASIS_MASK);
3676 switch (signal_levels) {
bd60018a 3677 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3678 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3679 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3680 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3681 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3682 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3683
bd60018a 3684 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3685 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3686 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3687 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3688
bd60018a 3689 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3690 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3691 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3692 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3693
3694 default:
3695 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3696 "0x%x\n", signal_levels);
3697 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3698 }
3699}
3700
94223d04 3701void
f4eb692e 3702intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3703{
3704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3705 enum port port = intel_dig_port->port;
f0a3424e 3706 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3707 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3708 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3709 uint8_t train_set = intel_dp->train_set[0];
3710
f8896f5d
DW
3711 if (HAS_DDI(dev)) {
3712 signal_levels = ddi_signal_levels(intel_dp);
3713
3714 if (IS_BROXTON(dev))
3715 signal_levels = 0;
3716 else
3717 mask = DDI_BUF_EMP_MASK;
e4a1d846 3718 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3719 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3720 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3721 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3722 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3723 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3724 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3725 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3726 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3727 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3728 } else {
5829975c 3729 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3730 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3731 }
3732
96fb9f9b
VK
3733 if (mask)
3734 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3735
3736 DRM_DEBUG_KMS("Using vswing level %d\n",
3737 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3738 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3739 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3740 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3741
f4eb692e 3742 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3743
3744 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3745 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3746}
3747
94223d04 3748void
e9c176d5
ACO
3749intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3750 uint8_t dp_train_pat)
a4fc5ed6 3751{
174edf1f 3752 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3753 struct drm_i915_private *dev_priv =
3754 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3755
f4eb692e 3756 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3757
f4eb692e 3758 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3759 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3760}
3761
94223d04 3762void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3763{
3764 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3765 struct drm_device *dev = intel_dig_port->base.base.dev;
3766 struct drm_i915_private *dev_priv = dev->dev_private;
3767 enum port port = intel_dig_port->port;
3768 uint32_t val;
3769
3770 if (!HAS_DDI(dev))
3771 return;
3772
3773 val = I915_READ(DP_TP_CTL(port));
3774 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3775 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3776 I915_WRITE(DP_TP_CTL(port), val);
3777
3778 /*
3779 * On PORT_A we can have only eDP in SST mode. There the only reason
3780 * we need to set idle transmission mode is to work around a HW issue
3781 * where we enable the pipe while not in idle link-training mode.
3782 * In this case there is requirement to wait for a minimum number of
3783 * idle patterns to be sent.
3784 */
3785 if (port == PORT_A)
3786 return;
3787
3788 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3789 1))
3790 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3791}
3792
a4fc5ed6 3793static void
ea5b213a 3794intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3795{
da63a9f2 3796 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3797 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3798 enum port port = intel_dig_port->port;
da63a9f2 3799 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3800 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3801 uint32_t DP = intel_dp->DP;
a4fc5ed6 3802
bc76e320 3803 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3804 return;
3805
0c33d8d7 3806 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3807 return;
3808
28c97730 3809 DRM_DEBUG_KMS("\n");
32f9d658 3810
39e5fa88
VS
3811 if ((IS_GEN7(dev) && port == PORT_A) ||
3812 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3813 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3814 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3815 } else {
aad3d14d
VS
3816 if (IS_CHERRYVIEW(dev))
3817 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3818 else
3819 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3820 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3821 }
1612c8bd 3822 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3823 POSTING_READ(intel_dp->output_reg);
5eb08b69 3824
1612c8bd
VS
3825 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3826 I915_WRITE(intel_dp->output_reg, DP);
3827 POSTING_READ(intel_dp->output_reg);
3828
3829 /*
3830 * HW workaround for IBX, we need to move the port
3831 * to transcoder A after disabling it to allow the
3832 * matching HDMI port to be enabled on transcoder A.
3833 */
3834 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3835 /*
3836 * We get CPU/PCH FIFO underruns on the other pipe when
3837 * doing the workaround. Sweep them under the rug.
3838 */
3839 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3840 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3841
1612c8bd
VS
3842 /* always enable with pattern 1 (as per spec) */
3843 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3844 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3845 I915_WRITE(intel_dp->output_reg, DP);
3846 POSTING_READ(intel_dp->output_reg);
3847
3848 DP &= ~DP_PORT_EN;
5bddd17f 3849 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3850 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3851
3852 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3853 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3854 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3855 }
3856
f01eca2e 3857 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3858
3859 intel_dp->DP = DP;
a4fc5ed6
KP
3860}
3861
26d61aad
KP
3862static bool
3863intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3864{
a031d709
RV
3865 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3866 struct drm_device *dev = dig_port->base.base.dev;
3867 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3868 uint8_t rev;
a031d709 3869
9d1a1031
JN
3870 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3871 sizeof(intel_dp->dpcd)) < 0)
edb39244 3872 return false; /* aux transfer failed */
92fd8fd1 3873
a8e98153 3874 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3875
edb39244
AJ
3876 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3877 return false; /* DPCD not present */
3878
2293bb5c
SK
3879 /* Check if the panel supports PSR */
3880 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3881 if (is_edp(intel_dp)) {
9d1a1031
JN
3882 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3883 intel_dp->psr_dpcd,
3884 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3885 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3886 dev_priv->psr.sink_support = true;
50003939 3887 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3888 }
474d1ec4
SJ
3889
3890 if (INTEL_INFO(dev)->gen >= 9 &&
3891 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3892 uint8_t frame_sync_cap;
3893
3894 dev_priv->psr.sink_support = true;
3895 intel_dp_dpcd_read_wake(&intel_dp->aux,
3896 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3897 &frame_sync_cap, 1);
3898 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3899 /* PSR2 needs frame sync as well */
3900 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3901 DRM_DEBUG_KMS("PSR2 %s on sink",
3902 dev_priv->psr.psr2_support ? "supported" : "not supported");
3903 }
50003939
JN
3904 }
3905
bc5133d5 3906 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3907 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3908 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3909
fc0f8e25
SJ
3910 /* Intermediate frequency support */
3911 if (is_edp(intel_dp) &&
3912 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3913 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3914 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3915 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3916 int i;
3917
fc0f8e25
SJ
3918 intel_dp_dpcd_read_wake(&intel_dp->aux,
3919 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3920 sink_rates,
3921 sizeof(sink_rates));
ea2d8a42 3922
94ca719e
VS
3923 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3924 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3925
3926 if (val == 0)
3927 break;
3928
af77b974
SJ
3929 /* Value read is in kHz while drm clock is saved in deca-kHz */
3930 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3931 }
94ca719e 3932 intel_dp->num_sink_rates = i;
fc0f8e25 3933 }
0336400e
VS
3934
3935 intel_dp_print_rates(intel_dp);
3936
edb39244
AJ
3937 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3938 DP_DWN_STRM_PORT_PRESENT))
3939 return true; /* native DP sink */
3940
3941 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3942 return true; /* no per-port downstream info */
3943
9d1a1031
JN
3944 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3945 intel_dp->downstream_ports,
3946 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3947 return false; /* downstream port status fetch failed */
3948
3949 return true;
92fd8fd1
KP
3950}
3951
0d198328
AJ
3952static void
3953intel_dp_probe_oui(struct intel_dp *intel_dp)
3954{
3955 u8 buf[3];
3956
3957 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3958 return;
3959
9d1a1031 3960 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3961 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3962 buf[0], buf[1], buf[2]);
3963
9d1a1031 3964 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3965 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3966 buf[0], buf[1], buf[2]);
3967}
3968
0e32b39c
DA
3969static bool
3970intel_dp_probe_mst(struct intel_dp *intel_dp)
3971{
3972 u8 buf[1];
3973
3974 if (!intel_dp->can_mst)
3975 return false;
3976
3977 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3978 return false;
3979
0e32b39c
DA
3980 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3981 if (buf[0] & DP_MST_CAP) {
3982 DRM_DEBUG_KMS("Sink is MST capable\n");
3983 intel_dp->is_mst = true;
3984 } else {
3985 DRM_DEBUG_KMS("Sink is not MST capable\n");
3986 intel_dp->is_mst = false;
3987 }
3988 }
0e32b39c
DA
3989
3990 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3991 return intel_dp->is_mst;
3992}
3993
e5a1cab5 3994static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3995{
082dcc7c 3996 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 3997 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c 3998 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3999 u8 buf;
e5a1cab5 4000 int ret = 0;
c6297843
RV
4001 int count = 0;
4002 int attempts = 10;
d2e216d0 4003
082dcc7c
RV
4004 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4005 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4006 ret = -EIO;
4007 goto out;
4373f0f2
PZ
4008 }
4009
082dcc7c 4010 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4011 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4012 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4013 ret = -EIO;
4014 goto out;
4015 }
d2e216d0 4016
c6297843
RV
4017 do {
4018 intel_wait_for_vblank(dev, intel_crtc->pipe);
4019
4020 if (drm_dp_dpcd_readb(&intel_dp->aux,
4021 DP_TEST_SINK_MISC, &buf) < 0) {
4022 ret = -EIO;
4023 goto out;
4024 }
4025 count = buf & DP_TEST_COUNT_MASK;
4026 } while (--attempts && count);
4027
4028 if (attempts == 0) {
dc5a9037 4029 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
c6297843
RV
4030 ret = -ETIMEDOUT;
4031 }
4032
e5a1cab5 4033 out:
082dcc7c 4034 hsw_enable_ips(intel_crtc);
e5a1cab5 4035 return ret;
082dcc7c
RV
4036}
4037
4038static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4039{
4040 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 4041 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c
RV
4042 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4043 u8 buf;
e5a1cab5
RV
4044 int ret;
4045
082dcc7c
RV
4046 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4047 return -EIO;
4048
4049 if (!(buf & DP_TEST_CRC_SUPPORTED))
4050 return -ENOTTY;
4051
4052 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4053 return -EIO;
4054
6d8175da
RV
4055 if (buf & DP_TEST_SINK_START) {
4056 ret = intel_dp_sink_crc_stop(intel_dp);
4057 if (ret)
4058 return ret;
4059 }
4060
082dcc7c 4061 hsw_disable_ips(intel_crtc);
1dda5f93 4062
9d1a1031 4063 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4064 buf | DP_TEST_SINK_START) < 0) {
4065 hsw_enable_ips(intel_crtc);
4066 return -EIO;
4373f0f2
PZ
4067 }
4068
d72f9d91 4069 intel_wait_for_vblank(dev, intel_crtc->pipe);
082dcc7c
RV
4070 return 0;
4071}
4072
4073int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4074{
4075 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4076 struct drm_device *dev = dig_port->base.base.dev;
4077 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4078 u8 buf;
621d4c76 4079 int count, ret;
082dcc7c 4080 int attempts = 6;
082dcc7c
RV
4081
4082 ret = intel_dp_sink_crc_start(intel_dp);
4083 if (ret)
4084 return ret;
4085
ad9dc91b 4086 do {
621d4c76
RV
4087 intel_wait_for_vblank(dev, intel_crtc->pipe);
4088
1dda5f93 4089 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4090 DP_TEST_SINK_MISC, &buf) < 0) {
4091 ret = -EIO;
afe0d67e 4092 goto stop;
4373f0f2 4093 }
621d4c76 4094 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4095
7e38eeff 4096 } while (--attempts && count == 0);
ad9dc91b
RV
4097
4098 if (attempts == 0) {
7e38eeff
RV
4099 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4100 ret = -ETIMEDOUT;
4101 goto stop;
4102 }
4103
4104 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4105 ret = -EIO;
4106 goto stop;
ad9dc91b 4107 }
d2e216d0 4108
afe0d67e 4109stop:
082dcc7c 4110 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4111 return ret;
d2e216d0
RV
4112}
4113
a60f0e38
JB
4114static bool
4115intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4116{
9d1a1031
JN
4117 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4118 DP_DEVICE_SERVICE_IRQ_VECTOR,
4119 sink_irq_vector, 1) == 1;
a60f0e38
JB
4120}
4121
0e32b39c
DA
4122static bool
4123intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4124{
4125 int ret;
4126
4127 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4128 DP_SINK_COUNT_ESI,
4129 sink_irq_vector, 14);
4130 if (ret != 14)
4131 return false;
4132
4133 return true;
4134}
4135
c5d5ab7a
TP
4136static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4137{
4138 uint8_t test_result = DP_TEST_ACK;
4139 return test_result;
4140}
4141
4142static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4143{
4144 uint8_t test_result = DP_TEST_NAK;
4145 return test_result;
4146}
4147
4148static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4149{
c5d5ab7a 4150 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4151 struct intel_connector *intel_connector = intel_dp->attached_connector;
4152 struct drm_connector *connector = &intel_connector->base;
4153
4154 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4155 connector->edid_corrupt ||
559be30c
TP
4156 intel_dp->aux.i2c_defer_count > 6) {
4157 /* Check EDID read for NACKs, DEFERs and corruption
4158 * (DP CTS 1.2 Core r1.1)
4159 * 4.2.2.4 : Failed EDID read, I2C_NAK
4160 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4161 * 4.2.2.6 : EDID corruption detected
4162 * Use failsafe mode for all cases
4163 */
4164 if (intel_dp->aux.i2c_nack_count > 0 ||
4165 intel_dp->aux.i2c_defer_count > 0)
4166 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4167 intel_dp->aux.i2c_nack_count,
4168 intel_dp->aux.i2c_defer_count);
4169 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4170 } else {
f79b468e
TS
4171 struct edid *block = intel_connector->detect_edid;
4172
4173 /* We have to write the checksum
4174 * of the last block read
4175 */
4176 block += intel_connector->detect_edid->extensions;
4177
559be30c
TP
4178 if (!drm_dp_dpcd_write(&intel_dp->aux,
4179 DP_TEST_EDID_CHECKSUM,
f79b468e 4180 &block->checksum,
5a1cc655 4181 1))
559be30c
TP
4182 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4183
4184 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4185 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4186 }
4187
4188 /* Set test active flag here so userspace doesn't interrupt things */
4189 intel_dp->compliance_test_active = 1;
4190
c5d5ab7a
TP
4191 return test_result;
4192}
4193
4194static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4195{
c5d5ab7a
TP
4196 uint8_t test_result = DP_TEST_NAK;
4197 return test_result;
4198}
4199
4200static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4201{
4202 uint8_t response = DP_TEST_NAK;
4203 uint8_t rxdata = 0;
4204 int status = 0;
4205
c5d5ab7a
TP
4206 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4207 if (status <= 0) {
4208 DRM_DEBUG_KMS("Could not read test request from sink\n");
4209 goto update_status;
4210 }
4211
4212 switch (rxdata) {
4213 case DP_TEST_LINK_TRAINING:
4214 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4215 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4216 response = intel_dp_autotest_link_training(intel_dp);
4217 break;
4218 case DP_TEST_LINK_VIDEO_PATTERN:
4219 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4220 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4221 response = intel_dp_autotest_video_pattern(intel_dp);
4222 break;
4223 case DP_TEST_LINK_EDID_READ:
4224 DRM_DEBUG_KMS("EDID test requested\n");
4225 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4226 response = intel_dp_autotest_edid(intel_dp);
4227 break;
4228 case DP_TEST_LINK_PHY_TEST_PATTERN:
4229 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4230 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4231 response = intel_dp_autotest_phy_pattern(intel_dp);
4232 break;
4233 default:
4234 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4235 break;
4236 }
4237
4238update_status:
4239 status = drm_dp_dpcd_write(&intel_dp->aux,
4240 DP_TEST_RESPONSE,
4241 &response, 1);
4242 if (status <= 0)
4243 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4244}
4245
0e32b39c
DA
4246static int
4247intel_dp_check_mst_status(struct intel_dp *intel_dp)
4248{
4249 bool bret;
4250
4251 if (intel_dp->is_mst) {
4252 u8 esi[16] = { 0 };
4253 int ret = 0;
4254 int retry;
4255 bool handled;
4256 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4257go_again:
4258 if (bret == true) {
4259
4260 /* check link status - esi[10] = 0x200c */
90a6b7b0 4261 if (intel_dp->active_mst_links &&
901c2daf 4262 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4263 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4264 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4265 intel_dp_stop_link_train(intel_dp);
4266 }
4267
6f34cc39 4268 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4269 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4270
4271 if (handled) {
4272 for (retry = 0; retry < 3; retry++) {
4273 int wret;
4274 wret = drm_dp_dpcd_write(&intel_dp->aux,
4275 DP_SINK_COUNT_ESI+1,
4276 &esi[1], 3);
4277 if (wret == 3) {
4278 break;
4279 }
4280 }
4281
4282 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4283 if (bret == true) {
6f34cc39 4284 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4285 goto go_again;
4286 }
4287 } else
4288 ret = 0;
4289
4290 return ret;
4291 } else {
4292 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4293 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4294 intel_dp->is_mst = false;
4295 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4296 /* send a hotplug event */
4297 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4298 }
4299 }
4300 return -EINVAL;
4301}
4302
a4fc5ed6
KP
4303/*
4304 * According to DP spec
4305 * 5.1.2:
4306 * 1. Read DPCD
4307 * 2. Configure link according to Receiver Capabilities
4308 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4309 * 4. Check link status on receipt of hot-plug interrupt
4310 */
a5146200 4311static void
ea5b213a 4312intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4313{
5b215bcf 4314 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4315 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4316 u8 sink_irq_vector;
93f62dad 4317 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4318
5b215bcf
DA
4319 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4320
4df6960e
SS
4321 /*
4322 * Clearing compliance test variables to allow capturing
4323 * of values for next automated test request.
4324 */
4325 intel_dp->compliance_test_active = 0;
4326 intel_dp->compliance_test_type = 0;
4327 intel_dp->compliance_test_data = 0;
4328
e02f9a06 4329 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4330 return;
4331
1a125d8a
ID
4332 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4333 return;
4334
92fd8fd1 4335 /* Try to read receiver status if the link appears to be up */
93f62dad 4336 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4337 return;
4338 }
4339
92fd8fd1 4340 /* Now read the DPCD to see if it's actually running */
26d61aad 4341 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4342 return;
4343 }
4344
a60f0e38
JB
4345 /* Try to read the source of the interrupt */
4346 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4347 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4348 /* Clear interrupt source */
9d1a1031
JN
4349 drm_dp_dpcd_writeb(&intel_dp->aux,
4350 DP_DEVICE_SERVICE_IRQ_VECTOR,
4351 sink_irq_vector);
a60f0e38
JB
4352
4353 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4354 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4355 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4356 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4357 }
4358
14631e9d
SS
4359 /* if link training is requested we should perform it always */
4360 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4361 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
92fd8fd1 4362 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4363 intel_encoder->base.name);
33a34e4e 4364 intel_dp_start_link_train(intel_dp);
3ab9c637 4365 intel_dp_stop_link_train(intel_dp);
33a34e4e 4366 }
a4fc5ed6 4367}
a4fc5ed6 4368
caf9ab24 4369/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4370static enum drm_connector_status
26d61aad 4371intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4372{
caf9ab24 4373 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4374 uint8_t type;
4375
4376 if (!intel_dp_get_dpcd(intel_dp))
4377 return connector_status_disconnected;
4378
4379 /* if there's no downstream port, we're done */
4380 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4381 return connector_status_connected;
caf9ab24
AJ
4382
4383 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4384 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4385 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4386 uint8_t reg;
9d1a1031
JN
4387
4388 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4389 &reg, 1) < 0)
caf9ab24 4390 return connector_status_unknown;
9d1a1031 4391
23235177
AJ
4392 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4393 : connector_status_disconnected;
caf9ab24
AJ
4394 }
4395
4396 /* If no HPD, poke DDC gently */
0b99836f 4397 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4398 return connector_status_connected;
caf9ab24
AJ
4399
4400 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4401 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4402 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4403 if (type == DP_DS_PORT_TYPE_VGA ||
4404 type == DP_DS_PORT_TYPE_NON_EDID)
4405 return connector_status_unknown;
4406 } else {
4407 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4408 DP_DWN_STRM_PORT_TYPE_MASK;
4409 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4410 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4411 return connector_status_unknown;
4412 }
caf9ab24
AJ
4413
4414 /* Anything else is out of spec, warn and ignore */
4415 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4416 return connector_status_disconnected;
71ba9000
AJ
4417}
4418
d410b56d
CW
4419static enum drm_connector_status
4420edp_detect(struct intel_dp *intel_dp)
4421{
4422 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4423 enum drm_connector_status status;
4424
4425 status = intel_panel_detect(dev);
4426 if (status == connector_status_unknown)
4427 status = connector_status_connected;
4428
4429 return status;
4430}
4431
b93433cc
JN
4432static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4433 struct intel_digital_port *port)
5eb08b69 4434{
b93433cc 4435 u32 bit;
01cb9ea6 4436
0df53b77
JN
4437 switch (port->port) {
4438 case PORT_A:
4439 return true;
4440 case PORT_B:
4441 bit = SDE_PORTB_HOTPLUG;
4442 break;
4443 case PORT_C:
4444 bit = SDE_PORTC_HOTPLUG;
4445 break;
4446 case PORT_D:
4447 bit = SDE_PORTD_HOTPLUG;
4448 break;
4449 default:
4450 MISSING_CASE(port->port);
4451 return false;
4452 }
4453
4454 return I915_READ(SDEISR) & bit;
4455}
4456
4457static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4458 struct intel_digital_port *port)
4459{
4460 u32 bit;
4461
4462 switch (port->port) {
4463 case PORT_A:
4464 return true;
4465 case PORT_B:
4466 bit = SDE_PORTB_HOTPLUG_CPT;
4467 break;
4468 case PORT_C:
4469 bit = SDE_PORTC_HOTPLUG_CPT;
4470 break;
4471 case PORT_D:
4472 bit = SDE_PORTD_HOTPLUG_CPT;
4473 break;
a78695d3
JN
4474 case PORT_E:
4475 bit = SDE_PORTE_HOTPLUG_SPT;
4476 break;
0df53b77
JN
4477 default:
4478 MISSING_CASE(port->port);
4479 return false;
b93433cc 4480 }
1b469639 4481
b93433cc 4482 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4483}
4484
7e66bcf2 4485static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4486 struct intel_digital_port *port)
a4fc5ed6 4487{
9642c81c 4488 u32 bit;
5eb08b69 4489
9642c81c
JN
4490 switch (port->port) {
4491 case PORT_B:
4492 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4493 break;
4494 case PORT_C:
4495 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4496 break;
4497 case PORT_D:
4498 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4499 break;
4500 default:
4501 MISSING_CASE(port->port);
4502 return false;
4503 }
4504
4505 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4506}
4507
0780cd36
VS
4508static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4509 struct intel_digital_port *port)
9642c81c
JN
4510{
4511 u32 bit;
4512
4513 switch (port->port) {
4514 case PORT_B:
0780cd36 4515 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4516 break;
4517 case PORT_C:
0780cd36 4518 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4519 break;
4520 case PORT_D:
0780cd36 4521 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4522 break;
4523 default:
4524 MISSING_CASE(port->port);
4525 return false;
a4fc5ed6
KP
4526 }
4527
1d245987 4528 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4529}
4530
e464bfde 4531static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4532 struct intel_digital_port *intel_dig_port)
e464bfde 4533{
e2ec35a5
SJ
4534 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4535 enum port port;
e464bfde
JN
4536 u32 bit;
4537
e2ec35a5
SJ
4538 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4539 switch (port) {
e464bfde
JN
4540 case PORT_A:
4541 bit = BXT_DE_PORT_HP_DDIA;
4542 break;
4543 case PORT_B:
4544 bit = BXT_DE_PORT_HP_DDIB;
4545 break;
4546 case PORT_C:
4547 bit = BXT_DE_PORT_HP_DDIC;
4548 break;
4549 default:
e2ec35a5 4550 MISSING_CASE(port);
e464bfde
JN
4551 return false;
4552 }
4553
4554 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4555}
4556
7e66bcf2
JN
4557/*
4558 * intel_digital_port_connected - is the specified port connected?
4559 * @dev_priv: i915 private structure
4560 * @port: the port to test
4561 *
4562 * Return %true if @port is connected, %false otherwise.
4563 */
237ed86c 4564bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4565 struct intel_digital_port *port)
4566{
0df53b77 4567 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4568 return ibx_digital_port_connected(dev_priv, port);
22824fac 4569 else if (HAS_PCH_SPLIT(dev_priv))
0df53b77 4570 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4571 else if (IS_BROXTON(dev_priv))
4572 return bxt_digital_port_connected(dev_priv, port);
0780cd36
VS
4573 else if (IS_GM45(dev_priv))
4574 return gm45_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4575 else
4576 return g4x_digital_port_connected(dev_priv, port);
4577}
4578
8c241fef 4579static struct edid *
beb60608 4580intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4581{
beb60608 4582 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4583
9cd300e0
JN
4584 /* use cached edid if we have one */
4585 if (intel_connector->edid) {
9cd300e0
JN
4586 /* invalid edid */
4587 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4588 return NULL;
4589
55e9edeb 4590 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4591 } else
4592 return drm_get_edid(&intel_connector->base,
4593 &intel_dp->aux.ddc);
4594}
8c241fef 4595
beb60608
CW
4596static void
4597intel_dp_set_edid(struct intel_dp *intel_dp)
4598{
4599 struct intel_connector *intel_connector = intel_dp->attached_connector;
4600 struct edid *edid;
8c241fef 4601
beb60608
CW
4602 edid = intel_dp_get_edid(intel_dp);
4603 intel_connector->detect_edid = edid;
4604
4605 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4606 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4607 else
4608 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4609}
4610
beb60608
CW
4611static void
4612intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4613{
beb60608 4614 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4615
beb60608
CW
4616 kfree(intel_connector->detect_edid);
4617 intel_connector->detect_edid = NULL;
9cd300e0 4618
beb60608
CW
4619 intel_dp->has_audio = false;
4620}
d6f24d0f 4621
a9756bb5
ZW
4622static enum drm_connector_status
4623intel_dp_detect(struct drm_connector *connector, bool force)
4624{
4625 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4626 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4627 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4628 struct drm_device *dev = connector->dev;
a9756bb5 4629 enum drm_connector_status status;
671dedd2 4630 enum intel_display_power_domain power_domain;
0e32b39c 4631 bool ret;
09b1eb13 4632 u8 sink_irq_vector;
a9756bb5 4633
164c8598 4634 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4635 connector->base.id, connector->name);
beb60608 4636 intel_dp_unset_edid(intel_dp);
164c8598 4637
0e32b39c
DA
4638 if (intel_dp->is_mst) {
4639 /* MST devices are disconnected from a monitor POV */
4640 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4641 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4642 return connector_status_disconnected;
0e32b39c
DA
4643 }
4644
25f78f58
VS
4645 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4646 intel_display_power_get(to_i915(dev), power_domain);
a9756bb5 4647
d410b56d
CW
4648 /* Can't disconnect eDP, but you can close the lid... */
4649 if (is_edp(intel_dp))
4650 status = edp_detect(intel_dp);
c555a81d
ACO
4651 else if (intel_digital_port_connected(to_i915(dev),
4652 dp_to_dig_port(intel_dp)))
4653 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 4654 else
c555a81d
ACO
4655 status = connector_status_disconnected;
4656
4df6960e
SS
4657 if (status != connector_status_connected) {
4658 intel_dp->compliance_test_active = 0;
4659 intel_dp->compliance_test_type = 0;
4660 intel_dp->compliance_test_data = 0;
4661
c8c8fb33 4662 goto out;
4df6960e 4663 }
a9756bb5 4664
0d198328
AJ
4665 intel_dp_probe_oui(intel_dp);
4666
0e32b39c
DA
4667 ret = intel_dp_probe_mst(intel_dp);
4668 if (ret) {
4669 /* if we are in MST mode then this connector
4670 won't appear connected or have anything with EDID on it */
4671 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4672 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4673 status = connector_status_disconnected;
4674 goto out;
4675 }
4676
4df6960e
SS
4677 /*
4678 * Clearing NACK and defer counts to get their exact values
4679 * while reading EDID which are required by Compliance tests
4680 * 4.2.2.4 and 4.2.2.5
4681 */
4682 intel_dp->aux.i2c_nack_count = 0;
4683 intel_dp->aux.i2c_defer_count = 0;
4684
beb60608 4685 intel_dp_set_edid(intel_dp);
a9756bb5 4686
d63885da
PZ
4687 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4688 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4689 status = connector_status_connected;
4690
09b1eb13
TP
4691 /* Try to read the source of the interrupt */
4692 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4693 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4694 /* Clear interrupt source */
4695 drm_dp_dpcd_writeb(&intel_dp->aux,
4696 DP_DEVICE_SERVICE_IRQ_VECTOR,
4697 sink_irq_vector);
4698
4699 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4700 intel_dp_handle_test_request(intel_dp);
4701 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4702 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4703 }
4704
c8c8fb33 4705out:
25f78f58 4706 intel_display_power_put(to_i915(dev), power_domain);
c8c8fb33 4707 return status;
a4fc5ed6
KP
4708}
4709
beb60608
CW
4710static void
4711intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4712{
df0e9248 4713 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4714 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
25f78f58 4715 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
671dedd2 4716 enum intel_display_power_domain power_domain;
a4fc5ed6 4717
beb60608
CW
4718 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4719 connector->base.id, connector->name);
4720 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4721
beb60608
CW
4722 if (connector->status != connector_status_connected)
4723 return;
671dedd2 4724
25f78f58
VS
4725 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4726 intel_display_power_get(dev_priv, power_domain);
beb60608
CW
4727
4728 intel_dp_set_edid(intel_dp);
4729
25f78f58 4730 intel_display_power_put(dev_priv, power_domain);
beb60608
CW
4731
4732 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4733 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4734}
4735
4736static int intel_dp_get_modes(struct drm_connector *connector)
4737{
4738 struct intel_connector *intel_connector = to_intel_connector(connector);
4739 struct edid *edid;
4740
4741 edid = intel_connector->detect_edid;
4742 if (edid) {
4743 int ret = intel_connector_update_modes(connector, edid);
4744 if (ret)
4745 return ret;
4746 }
32f9d658 4747
f8779fda 4748 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4749 if (is_edp(intel_attached_dp(connector)) &&
4750 intel_connector->panel.fixed_mode) {
f8779fda 4751 struct drm_display_mode *mode;
beb60608
CW
4752
4753 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4754 intel_connector->panel.fixed_mode);
f8779fda 4755 if (mode) {
32f9d658
ZW
4756 drm_mode_probed_add(connector, mode);
4757 return 1;
4758 }
4759 }
beb60608 4760
32f9d658 4761 return 0;
a4fc5ed6
KP
4762}
4763
1aad7ac0
CW
4764static bool
4765intel_dp_detect_audio(struct drm_connector *connector)
4766{
1aad7ac0 4767 bool has_audio = false;
beb60608 4768 struct edid *edid;
1aad7ac0 4769
beb60608
CW
4770 edid = to_intel_connector(connector)->detect_edid;
4771 if (edid)
1aad7ac0 4772 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4773
1aad7ac0
CW
4774 return has_audio;
4775}
4776
f684960e
CW
4777static int
4778intel_dp_set_property(struct drm_connector *connector,
4779 struct drm_property *property,
4780 uint64_t val)
4781{
e953fd7b 4782 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4783 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4784 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4785 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4786 int ret;
4787
662595df 4788 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4789 if (ret)
4790 return ret;
4791
3f43c48d 4792 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4793 int i = val;
4794 bool has_audio;
4795
4796 if (i == intel_dp->force_audio)
f684960e
CW
4797 return 0;
4798
1aad7ac0 4799 intel_dp->force_audio = i;
f684960e 4800
c3e5f67b 4801 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4802 has_audio = intel_dp_detect_audio(connector);
4803 else
c3e5f67b 4804 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4805
4806 if (has_audio == intel_dp->has_audio)
f684960e
CW
4807 return 0;
4808
1aad7ac0 4809 intel_dp->has_audio = has_audio;
f684960e
CW
4810 goto done;
4811 }
4812
e953fd7b 4813 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4814 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4815 bool old_range = intel_dp->limited_color_range;
ae4edb80 4816
55bc60db
VS
4817 switch (val) {
4818 case INTEL_BROADCAST_RGB_AUTO:
4819 intel_dp->color_range_auto = true;
4820 break;
4821 case INTEL_BROADCAST_RGB_FULL:
4822 intel_dp->color_range_auto = false;
0f2a2a75 4823 intel_dp->limited_color_range = false;
55bc60db
VS
4824 break;
4825 case INTEL_BROADCAST_RGB_LIMITED:
4826 intel_dp->color_range_auto = false;
0f2a2a75 4827 intel_dp->limited_color_range = true;
55bc60db
VS
4828 break;
4829 default:
4830 return -EINVAL;
4831 }
ae4edb80
DV
4832
4833 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4834 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4835 return 0;
4836
e953fd7b
CW
4837 goto done;
4838 }
4839
53b41837
YN
4840 if (is_edp(intel_dp) &&
4841 property == connector->dev->mode_config.scaling_mode_property) {
4842 if (val == DRM_MODE_SCALE_NONE) {
4843 DRM_DEBUG_KMS("no scaling not supported\n");
4844 return -EINVAL;
4845 }
4846
4847 if (intel_connector->panel.fitting_mode == val) {
4848 /* the eDP scaling property is not changed */
4849 return 0;
4850 }
4851 intel_connector->panel.fitting_mode = val;
4852
4853 goto done;
4854 }
4855
f684960e
CW
4856 return -EINVAL;
4857
4858done:
c0c36b94
CW
4859 if (intel_encoder->base.crtc)
4860 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4861
4862 return 0;
4863}
4864
a4fc5ed6 4865static void
73845adf 4866intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4867{
1d508706 4868 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4869
10e972d3 4870 kfree(intel_connector->detect_edid);
beb60608 4871
9cd300e0
JN
4872 if (!IS_ERR_OR_NULL(intel_connector->edid))
4873 kfree(intel_connector->edid);
4874
acd8db10
PZ
4875 /* Can't call is_edp() since the encoder may have been destroyed
4876 * already. */
4877 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4878 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4879
a4fc5ed6 4880 drm_connector_cleanup(connector);
55f78c43 4881 kfree(connector);
a4fc5ed6
KP
4882}
4883
00c09d70 4884void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4885{
da63a9f2
PZ
4886 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4887 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4888
a121f4e5 4889 intel_dp_aux_fini(intel_dp);
0e32b39c 4890 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4891 if (is_edp(intel_dp)) {
4892 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4893 /*
4894 * vdd might still be enabled do to the delayed vdd off.
4895 * Make sure vdd is actually turned off here.
4896 */
773538e8 4897 pps_lock(intel_dp);
4be73780 4898 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4899 pps_unlock(intel_dp);
4900
01527b31
CT
4901 if (intel_dp->edp_notifier.notifier_call) {
4902 unregister_reboot_notifier(&intel_dp->edp_notifier);
4903 intel_dp->edp_notifier.notifier_call = NULL;
4904 }
bd943159 4905 }
c8bd0e49 4906 drm_encoder_cleanup(encoder);
da63a9f2 4907 kfree(intel_dig_port);
24d05927
DV
4908}
4909
07f9cd0b
ID
4910static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4911{
4912 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4913
4914 if (!is_edp(intel_dp))
4915 return;
4916
951468f3
VS
4917 /*
4918 * vdd might still be enabled do to the delayed vdd off.
4919 * Make sure vdd is actually turned off here.
4920 */
afa4e53a 4921 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4922 pps_lock(intel_dp);
07f9cd0b 4923 edp_panel_vdd_off_sync(intel_dp);
773538e8 4924 pps_unlock(intel_dp);
07f9cd0b
ID
4925}
4926
49e6bc51
VS
4927static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4928{
4929 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4930 struct drm_device *dev = intel_dig_port->base.base.dev;
4931 struct drm_i915_private *dev_priv = dev->dev_private;
4932 enum intel_display_power_domain power_domain;
4933
4934 lockdep_assert_held(&dev_priv->pps_mutex);
4935
4936 if (!edp_have_panel_vdd(intel_dp))
4937 return;
4938
4939 /*
4940 * The VDD bit needs a power domain reference, so if the bit is
4941 * already enabled when we boot or resume, grab this reference and
4942 * schedule a vdd off, so we don't hold on to the reference
4943 * indefinitely.
4944 */
4945 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
25f78f58 4946 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
49e6bc51
VS
4947 intel_display_power_get(dev_priv, power_domain);
4948
4949 edp_panel_vdd_schedule_off(intel_dp);
4950}
4951
6d93c0c4
ID
4952static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4953{
49e6bc51
VS
4954 struct intel_dp *intel_dp;
4955
4956 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4957 return;
4958
4959 intel_dp = enc_to_intel_dp(encoder);
4960
4961 pps_lock(intel_dp);
4962
4963 /*
4964 * Read out the current power sequencer assignment,
4965 * in case the BIOS did something with it.
4966 */
666a4537 4967 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
49e6bc51
VS
4968 vlv_initial_power_sequencer_setup(intel_dp);
4969
4970 intel_edp_panel_vdd_sanitize(intel_dp);
4971
4972 pps_unlock(intel_dp);
6d93c0c4
ID
4973}
4974
a4fc5ed6 4975static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4976 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4977 .detect = intel_dp_detect,
beb60608 4978 .force = intel_dp_force,
a4fc5ed6 4979 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4980 .set_property = intel_dp_set_property,
2545e4a6 4981 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4982 .destroy = intel_dp_connector_destroy,
c6f95f27 4983 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4984 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4985};
4986
4987static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4988 .get_modes = intel_dp_get_modes,
4989 .mode_valid = intel_dp_mode_valid,
df0e9248 4990 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4991};
4992
a4fc5ed6 4993static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4994 .reset = intel_dp_encoder_reset,
24d05927 4995 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4996};
4997
b2c5c181 4998enum irqreturn
13cf5504
DA
4999intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5000{
5001 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 5002 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
5003 struct drm_device *dev = intel_dig_port->base.base.dev;
5004 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 5005 enum intel_display_power_domain power_domain;
b2c5c181 5006 enum irqreturn ret = IRQ_NONE;
1c767b33 5007
2540058f
TI
5008 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5009 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
0e32b39c 5010 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5011
7a7f84cc
VS
5012 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5013 /*
5014 * vdd off can generate a long pulse on eDP which
5015 * would require vdd on to handle it, and thus we
5016 * would end up in an endless cycle of
5017 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5018 */
5019 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5020 port_name(intel_dig_port->port));
a8b3d52f 5021 return IRQ_HANDLED;
7a7f84cc
VS
5022 }
5023
26fbb774
VS
5024 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5025 port_name(intel_dig_port->port),
0e32b39c 5026 long_hpd ? "long" : "short");
13cf5504 5027
25f78f58 5028 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1c767b33
ID
5029 intel_display_power_get(dev_priv, power_domain);
5030
0e32b39c 5031 if (long_hpd) {
5fa836a9
MK
5032 /* indicate that we need to restart link training */
5033 intel_dp->train_set_valid = false;
2a592bec 5034
7e66bcf2
JN
5035 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5036 goto mst_fail;
0e32b39c
DA
5037
5038 if (!intel_dp_get_dpcd(intel_dp)) {
5039 goto mst_fail;
5040 }
5041
5042 intel_dp_probe_oui(intel_dp);
5043
d14e7b6d
VS
5044 if (!intel_dp_probe_mst(intel_dp)) {
5045 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5046 intel_dp_check_link_status(intel_dp);
5047 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5048 goto mst_fail;
d14e7b6d 5049 }
0e32b39c
DA
5050 } else {
5051 if (intel_dp->is_mst) {
1c767b33 5052 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5053 goto mst_fail;
5054 }
5055
5056 if (!intel_dp->is_mst) {
5b215bcf 5057 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5058 intel_dp_check_link_status(intel_dp);
5b215bcf 5059 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5060 }
5061 }
b2c5c181
DV
5062
5063 ret = IRQ_HANDLED;
5064
1c767b33 5065 goto put_power;
0e32b39c
DA
5066mst_fail:
5067 /* if we were in MST mode, and device is not there get out of MST mode */
5068 if (intel_dp->is_mst) {
5069 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5070 intel_dp->is_mst = false;
5071 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5072 }
1c767b33
ID
5073put_power:
5074 intel_display_power_put(dev_priv, power_domain);
5075
5076 return ret;
13cf5504
DA
5077}
5078
477ec328 5079/* check the VBT to see whether the eDP is on another port */
5d8a7752 5080bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5081{
5082 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5083 union child_device_config *p_child;
36e83a18 5084 int i;
5d8a7752 5085 static const short port_mapping[] = {
477ec328
RV
5086 [PORT_B] = DVO_PORT_DPB,
5087 [PORT_C] = DVO_PORT_DPC,
5088 [PORT_D] = DVO_PORT_DPD,
5089 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5090 };
36e83a18 5091
53ce81a7
VS
5092 /*
5093 * eDP not supported on g4x. so bail out early just
5094 * for a bit extra safety in case the VBT is bonkers.
5095 */
5096 if (INTEL_INFO(dev)->gen < 5)
5097 return false;
5098
3b32a35b
VS
5099 if (port == PORT_A)
5100 return true;
5101
41aa3448 5102 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5103 return false;
5104
41aa3448
RV
5105 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5106 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5107
5d8a7752 5108 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5109 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5110 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5111 return true;
5112 }
5113 return false;
5114}
5115
0e32b39c 5116void
f684960e
CW
5117intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5118{
53b41837
YN
5119 struct intel_connector *intel_connector = to_intel_connector(connector);
5120
3f43c48d 5121 intel_attach_force_audio_property(connector);
e953fd7b 5122 intel_attach_broadcast_rgb_property(connector);
55bc60db 5123 intel_dp->color_range_auto = true;
53b41837
YN
5124
5125 if (is_edp(intel_dp)) {
5126 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5127 drm_object_attach_property(
5128 &connector->base,
53b41837 5129 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5130 DRM_MODE_SCALE_ASPECT);
5131 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5132 }
f684960e
CW
5133}
5134
dada1a9f
ID
5135static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5136{
d28d4731 5137 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
5138 intel_dp->last_power_on = jiffies;
5139 intel_dp->last_backlight_off = jiffies;
5140}
5141
67a54566
DV
5142static void
5143intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5144 struct intel_dp *intel_dp)
67a54566
DV
5145{
5146 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5147 struct edp_power_seq cur, vbt, spec,
5148 *final = &intel_dp->pps_delays;
b0a08bec 5149 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
f0f59a00 5150 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5151
e39b999a
VS
5152 lockdep_assert_held(&dev_priv->pps_mutex);
5153
81ddbc69
VS
5154 /* already initialized? */
5155 if (final->t11_t12 != 0)
5156 return;
5157
b0a08bec
VK
5158 if (IS_BROXTON(dev)) {
5159 /*
5160 * TODO: BXT has 2 sets of PPS registers.
5161 * Correct Register for Broxton need to be identified
5162 * using VBT. hardcoding for now
5163 */
5164 pp_ctrl_reg = BXT_PP_CONTROL(0);
5165 pp_on_reg = BXT_PP_ON_DELAYS(0);
5166 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5167 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5168 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5169 pp_on_reg = PCH_PP_ON_DELAYS;
5170 pp_off_reg = PCH_PP_OFF_DELAYS;
5171 pp_div_reg = PCH_PP_DIVISOR;
5172 } else {
bf13e81b
JN
5173 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5174
5175 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5176 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5177 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5178 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5179 }
67a54566
DV
5180
5181 /* Workaround: Need to write PP_CONTROL with the unlock key as
5182 * the very first thing. */
b0a08bec 5183 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5184
453c5420
JB
5185 pp_on = I915_READ(pp_on_reg);
5186 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5187 if (!IS_BROXTON(dev)) {
5188 I915_WRITE(pp_ctrl_reg, pp_ctl);
5189 pp_div = I915_READ(pp_div_reg);
5190 }
67a54566
DV
5191
5192 /* Pull timing values out of registers */
5193 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5194 PANEL_POWER_UP_DELAY_SHIFT;
5195
5196 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5197 PANEL_LIGHT_ON_DELAY_SHIFT;
5198
5199 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5200 PANEL_LIGHT_OFF_DELAY_SHIFT;
5201
5202 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5203 PANEL_POWER_DOWN_DELAY_SHIFT;
5204
b0a08bec
VK
5205 if (IS_BROXTON(dev)) {
5206 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5207 BXT_POWER_CYCLE_DELAY_SHIFT;
5208 if (tmp > 0)
5209 cur.t11_t12 = (tmp - 1) * 1000;
5210 else
5211 cur.t11_t12 = 0;
5212 } else {
5213 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5214 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5215 }
67a54566
DV
5216
5217 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5218 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5219
41aa3448 5220 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5221
5222 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5223 * our hw here, which are all in 100usec. */
5224 spec.t1_t3 = 210 * 10;
5225 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5226 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5227 spec.t10 = 500 * 10;
5228 /* This one is special and actually in units of 100ms, but zero
5229 * based in the hw (so we need to add 100 ms). But the sw vbt
5230 * table multiplies it with 1000 to make it in units of 100usec,
5231 * too. */
5232 spec.t11_t12 = (510 + 100) * 10;
5233
5234 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5235 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5236
5237 /* Use the max of the register settings and vbt. If both are
5238 * unset, fall back to the spec limits. */
36b5f425 5239#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5240 spec.field : \
5241 max(cur.field, vbt.field))
5242 assign_final(t1_t3);
5243 assign_final(t8);
5244 assign_final(t9);
5245 assign_final(t10);
5246 assign_final(t11_t12);
5247#undef assign_final
5248
36b5f425 5249#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5250 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5251 intel_dp->backlight_on_delay = get_delay(t8);
5252 intel_dp->backlight_off_delay = get_delay(t9);
5253 intel_dp->panel_power_down_delay = get_delay(t10);
5254 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5255#undef get_delay
5256
f30d26e4
JN
5257 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5258 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5259 intel_dp->panel_power_cycle_delay);
5260
5261 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5262 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5263}
5264
5265static void
5266intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5267 struct intel_dp *intel_dp)
f30d26e4
JN
5268{
5269 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5270 u32 pp_on, pp_off, pp_div, port_sel = 0;
5271 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
f0f59a00 5272 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
ad933b56 5273 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5274 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5275
e39b999a 5276 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5277
b0a08bec
VK
5278 if (IS_BROXTON(dev)) {
5279 /*
5280 * TODO: BXT has 2 sets of PPS registers.
5281 * Correct Register for Broxton need to be identified
5282 * using VBT. hardcoding for now
5283 */
5284 pp_ctrl_reg = BXT_PP_CONTROL(0);
5285 pp_on_reg = BXT_PP_ON_DELAYS(0);
5286 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5287
5288 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5289 pp_on_reg = PCH_PP_ON_DELAYS;
5290 pp_off_reg = PCH_PP_OFF_DELAYS;
5291 pp_div_reg = PCH_PP_DIVISOR;
5292 } else {
bf13e81b
JN
5293 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5294
5295 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5296 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5297 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5298 }
5299
b2f19d1a
PZ
5300 /*
5301 * And finally store the new values in the power sequencer. The
5302 * backlight delays are set to 1 because we do manual waits on them. For
5303 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5304 * we'll end up waiting for the backlight off delay twice: once when we
5305 * do the manual sleep, and once when we disable the panel and wait for
5306 * the PP_STATUS bit to become zero.
5307 */
f30d26e4 5308 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5309 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5310 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5311 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5312 /* Compute the divisor for the pp clock, simply match the Bspec
5313 * formula. */
b0a08bec
VK
5314 if (IS_BROXTON(dev)) {
5315 pp_div = I915_READ(pp_ctrl_reg);
5316 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5317 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5318 << BXT_POWER_CYCLE_DELAY_SHIFT);
5319 } else {
5320 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5321 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5322 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5323 }
67a54566
DV
5324
5325 /* Haswell doesn't have any port selection bits for the panel
5326 * power sequencer any more. */
666a4537 5327 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ad933b56 5328 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5329 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5330 if (port == PORT_A)
a24c144c 5331 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5332 else
a24c144c 5333 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5334 }
5335
453c5420
JB
5336 pp_on |= port_sel;
5337
5338 I915_WRITE(pp_on_reg, pp_on);
5339 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5340 if (IS_BROXTON(dev))
5341 I915_WRITE(pp_ctrl_reg, pp_div);
5342 else
5343 I915_WRITE(pp_div_reg, pp_div);
67a54566 5344
67a54566 5345 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5346 I915_READ(pp_on_reg),
5347 I915_READ(pp_off_reg),
b0a08bec
VK
5348 IS_BROXTON(dev) ?
5349 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5350 I915_READ(pp_div_reg));
f684960e
CW
5351}
5352
b33a2815
VK
5353/**
5354 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5355 * @dev: DRM device
5356 * @refresh_rate: RR to be programmed
5357 *
5358 * This function gets called when refresh rate (RR) has to be changed from
5359 * one frequency to another. Switches can be between high and low RR
5360 * supported by the panel or to any other RR based on media playback (in
5361 * this case, RR value needs to be passed from user space).
5362 *
5363 * The caller of this function needs to take a lock on dev_priv->drrs.
5364 */
96178eeb 5365static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5366{
5367 struct drm_i915_private *dev_priv = dev->dev_private;
5368 struct intel_encoder *encoder;
96178eeb
VK
5369 struct intel_digital_port *dig_port = NULL;
5370 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5371 struct intel_crtc_state *config = NULL;
439d7ac0 5372 struct intel_crtc *intel_crtc = NULL;
96178eeb 5373 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5374
5375 if (refresh_rate <= 0) {
5376 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5377 return;
5378 }
5379
96178eeb
VK
5380 if (intel_dp == NULL) {
5381 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5382 return;
5383 }
5384
1fcc9d1c 5385 /*
e4d59f6b
RV
5386 * FIXME: This needs proper synchronization with psr state for some
5387 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5388 */
439d7ac0 5389
96178eeb
VK
5390 dig_port = dp_to_dig_port(intel_dp);
5391 encoder = &dig_port->base;
723f9aab 5392 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5393
5394 if (!intel_crtc) {
5395 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5396 return;
5397 }
5398
6e3c9717 5399 config = intel_crtc->config;
439d7ac0 5400
96178eeb 5401 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5402 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5403 return;
5404 }
5405
96178eeb
VK
5406 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5407 refresh_rate)
439d7ac0
PB
5408 index = DRRS_LOW_RR;
5409
96178eeb 5410 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5411 DRM_DEBUG_KMS(
5412 "DRRS requested for previously set RR...ignoring\n");
5413 return;
5414 }
5415
5416 if (!intel_crtc->active) {
5417 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5418 return;
5419 }
5420
44395bfe 5421 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5422 switch (index) {
5423 case DRRS_HIGH_RR:
5424 intel_dp_set_m_n(intel_crtc, M1_N1);
5425 break;
5426 case DRRS_LOW_RR:
5427 intel_dp_set_m_n(intel_crtc, M2_N2);
5428 break;
5429 case DRRS_MAX_RR:
5430 default:
5431 DRM_ERROR("Unsupported refreshrate type\n");
5432 }
5433 } else if (INTEL_INFO(dev)->gen > 6) {
f0f59a00 5434 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
649636ef 5435 u32 val;
a4c30b1d 5436
649636ef 5437 val = I915_READ(reg);
439d7ac0 5438 if (index > DRRS_HIGH_RR) {
666a4537 5439 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5440 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5441 else
5442 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5443 } else {
666a4537 5444 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5445 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5446 else
5447 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5448 }
5449 I915_WRITE(reg, val);
5450 }
5451
4e9ac947
VK
5452 dev_priv->drrs.refresh_rate_type = index;
5453
5454 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5455}
5456
b33a2815
VK
5457/**
5458 * intel_edp_drrs_enable - init drrs struct if supported
5459 * @intel_dp: DP struct
5460 *
5461 * Initializes frontbuffer_bits and drrs.dp
5462 */
c395578e
VK
5463void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5464{
5465 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5466 struct drm_i915_private *dev_priv = dev->dev_private;
5467 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5468 struct drm_crtc *crtc = dig_port->base.base.crtc;
5469 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5470
5471 if (!intel_crtc->config->has_drrs) {
5472 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5473 return;
5474 }
5475
5476 mutex_lock(&dev_priv->drrs.mutex);
5477 if (WARN_ON(dev_priv->drrs.dp)) {
5478 DRM_ERROR("DRRS already enabled\n");
5479 goto unlock;
5480 }
5481
5482 dev_priv->drrs.busy_frontbuffer_bits = 0;
5483
5484 dev_priv->drrs.dp = intel_dp;
5485
5486unlock:
5487 mutex_unlock(&dev_priv->drrs.mutex);
5488}
5489
b33a2815
VK
5490/**
5491 * intel_edp_drrs_disable - Disable DRRS
5492 * @intel_dp: DP struct
5493 *
5494 */
c395578e
VK
5495void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5496{
5497 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5498 struct drm_i915_private *dev_priv = dev->dev_private;
5499 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5500 struct drm_crtc *crtc = dig_port->base.base.crtc;
5501 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5502
5503 if (!intel_crtc->config->has_drrs)
5504 return;
5505
5506 mutex_lock(&dev_priv->drrs.mutex);
5507 if (!dev_priv->drrs.dp) {
5508 mutex_unlock(&dev_priv->drrs.mutex);
5509 return;
5510 }
5511
5512 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5513 intel_dp_set_drrs_state(dev_priv->dev,
5514 intel_dp->attached_connector->panel.
5515 fixed_mode->vrefresh);
5516
5517 dev_priv->drrs.dp = NULL;
5518 mutex_unlock(&dev_priv->drrs.mutex);
5519
5520 cancel_delayed_work_sync(&dev_priv->drrs.work);
5521}
5522
4e9ac947
VK
5523static void intel_edp_drrs_downclock_work(struct work_struct *work)
5524{
5525 struct drm_i915_private *dev_priv =
5526 container_of(work, typeof(*dev_priv), drrs.work.work);
5527 struct intel_dp *intel_dp;
5528
5529 mutex_lock(&dev_priv->drrs.mutex);
5530
5531 intel_dp = dev_priv->drrs.dp;
5532
5533 if (!intel_dp)
5534 goto unlock;
5535
439d7ac0 5536 /*
4e9ac947
VK
5537 * The delayed work can race with an invalidate hence we need to
5538 * recheck.
439d7ac0
PB
5539 */
5540
4e9ac947
VK
5541 if (dev_priv->drrs.busy_frontbuffer_bits)
5542 goto unlock;
439d7ac0 5543
4e9ac947
VK
5544 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5545 intel_dp_set_drrs_state(dev_priv->dev,
5546 intel_dp->attached_connector->panel.
5547 downclock_mode->vrefresh);
439d7ac0 5548
4e9ac947 5549unlock:
4e9ac947 5550 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5551}
5552
b33a2815 5553/**
0ddfd203 5554 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5555 * @dev: DRM device
5556 * @frontbuffer_bits: frontbuffer plane tracking bits
5557 *
0ddfd203
R
5558 * This function gets called everytime rendering on the given planes start.
5559 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5560 *
5561 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5562 */
a93fad0f
VK
5563void intel_edp_drrs_invalidate(struct drm_device *dev,
5564 unsigned frontbuffer_bits)
5565{
5566 struct drm_i915_private *dev_priv = dev->dev_private;
5567 struct drm_crtc *crtc;
5568 enum pipe pipe;
5569
9da7d693 5570 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5571 return;
5572
88f933a8 5573 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5574
a93fad0f 5575 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5576 if (!dev_priv->drrs.dp) {
5577 mutex_unlock(&dev_priv->drrs.mutex);
5578 return;
5579 }
5580
a93fad0f
VK
5581 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5582 pipe = to_intel_crtc(crtc)->pipe;
5583
c1d038c6
DV
5584 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5585 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5586
0ddfd203 5587 /* invalidate means busy screen hence upclock */
c1d038c6 5588 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5589 intel_dp_set_drrs_state(dev_priv->dev,
5590 dev_priv->drrs.dp->attached_connector->panel.
5591 fixed_mode->vrefresh);
a93fad0f 5592
a93fad0f
VK
5593 mutex_unlock(&dev_priv->drrs.mutex);
5594}
5595
b33a2815 5596/**
0ddfd203 5597 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5598 * @dev: DRM device
5599 * @frontbuffer_bits: frontbuffer plane tracking bits
5600 *
0ddfd203
R
5601 * This function gets called every time rendering on the given planes has
5602 * completed or flip on a crtc is completed. So DRRS should be upclocked
5603 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5604 * if no other planes are dirty.
b33a2815
VK
5605 *
5606 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5607 */
a93fad0f
VK
5608void intel_edp_drrs_flush(struct drm_device *dev,
5609 unsigned frontbuffer_bits)
5610{
5611 struct drm_i915_private *dev_priv = dev->dev_private;
5612 struct drm_crtc *crtc;
5613 enum pipe pipe;
5614
9da7d693 5615 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5616 return;
5617
88f933a8 5618 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5619
a93fad0f 5620 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5621 if (!dev_priv->drrs.dp) {
5622 mutex_unlock(&dev_priv->drrs.mutex);
5623 return;
5624 }
5625
a93fad0f
VK
5626 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5627 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5628
5629 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5630 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5631
0ddfd203 5632 /* flush means busy screen hence upclock */
c1d038c6 5633 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5634 intel_dp_set_drrs_state(dev_priv->dev,
5635 dev_priv->drrs.dp->attached_connector->panel.
5636 fixed_mode->vrefresh);
5637
5638 /*
5639 * flush also means no more activity hence schedule downclock, if all
5640 * other fbs are quiescent too
5641 */
5642 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5643 schedule_delayed_work(&dev_priv->drrs.work,
5644 msecs_to_jiffies(1000));
5645 mutex_unlock(&dev_priv->drrs.mutex);
5646}
5647
b33a2815
VK
5648/**
5649 * DOC: Display Refresh Rate Switching (DRRS)
5650 *
5651 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5652 * which enables swtching between low and high refresh rates,
5653 * dynamically, based on the usage scenario. This feature is applicable
5654 * for internal panels.
5655 *
5656 * Indication that the panel supports DRRS is given by the panel EDID, which
5657 * would list multiple refresh rates for one resolution.
5658 *
5659 * DRRS is of 2 types - static and seamless.
5660 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5661 * (may appear as a blink on screen) and is used in dock-undock scenario.
5662 * Seamless DRRS involves changing RR without any visual effect to the user
5663 * and can be used during normal system usage. This is done by programming
5664 * certain registers.
5665 *
5666 * Support for static/seamless DRRS may be indicated in the VBT based on
5667 * inputs from the panel spec.
5668 *
5669 * DRRS saves power by switching to low RR based on usage scenarios.
5670 *
5671 * eDP DRRS:-
5672 * The implementation is based on frontbuffer tracking implementation.
5673 * When there is a disturbance on the screen triggered by user activity or a
5674 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5675 * When there is no movement on screen, after a timeout of 1 second, a switch
5676 * to low RR is made.
5677 * For integration with frontbuffer tracking code,
5678 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5679 *
5680 * DRRS can be further extended to support other internal panels and also
5681 * the scenario of video playback wherein RR is set based on the rate
5682 * requested by userspace.
5683 */
5684
5685/**
5686 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5687 * @intel_connector: eDP connector
5688 * @fixed_mode: preferred mode of panel
5689 *
5690 * This function is called only once at driver load to initialize basic
5691 * DRRS stuff.
5692 *
5693 * Returns:
5694 * Downclock mode if panel supports it, else return NULL.
5695 * DRRS support is determined by the presence of downclock mode (apart
5696 * from VBT setting).
5697 */
4f9db5b5 5698static struct drm_display_mode *
96178eeb
VK
5699intel_dp_drrs_init(struct intel_connector *intel_connector,
5700 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5701{
5702 struct drm_connector *connector = &intel_connector->base;
96178eeb 5703 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5704 struct drm_i915_private *dev_priv = dev->dev_private;
5705 struct drm_display_mode *downclock_mode = NULL;
5706
9da7d693
DV
5707 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5708 mutex_init(&dev_priv->drrs.mutex);
5709
4f9db5b5
PB
5710 if (INTEL_INFO(dev)->gen <= 6) {
5711 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5712 return NULL;
5713 }
5714
5715 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5716 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5717 return NULL;
5718 }
5719
5720 downclock_mode = intel_find_panel_downclock
5721 (dev, fixed_mode, connector);
5722
5723 if (!downclock_mode) {
a1d26342 5724 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5725 return NULL;
5726 }
5727
96178eeb 5728 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5729
96178eeb 5730 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5731 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5732 return downclock_mode;
5733}
5734
ed92f0b2 5735static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5736 struct intel_connector *intel_connector)
ed92f0b2
PZ
5737{
5738 struct drm_connector *connector = &intel_connector->base;
5739 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5740 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5741 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5742 struct drm_i915_private *dev_priv = dev->dev_private;
5743 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5744 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5745 bool has_dpcd;
5746 struct drm_display_mode *scan;
5747 struct edid *edid;
6517d273 5748 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5749
5750 if (!is_edp(intel_dp))
5751 return true;
5752
49e6bc51
VS
5753 pps_lock(intel_dp);
5754 intel_edp_panel_vdd_sanitize(intel_dp);
5755 pps_unlock(intel_dp);
63635217 5756
ed92f0b2 5757 /* Cache DPCD and EDID for edp. */
ed92f0b2 5758 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5759
5760 if (has_dpcd) {
5761 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5762 dev_priv->no_aux_handshake =
5763 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5764 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5765 } else {
5766 /* if this fails, presume the device is a ghost */
5767 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5768 return false;
5769 }
5770
5771 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5772 pps_lock(intel_dp);
36b5f425 5773 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5774 pps_unlock(intel_dp);
ed92f0b2 5775
060c8778 5776 mutex_lock(&dev->mode_config.mutex);
0b99836f 5777 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5778 if (edid) {
5779 if (drm_add_edid_modes(connector, edid)) {
5780 drm_mode_connector_update_edid_property(connector,
5781 edid);
5782 drm_edid_to_eld(connector, edid);
5783 } else {
5784 kfree(edid);
5785 edid = ERR_PTR(-EINVAL);
5786 }
5787 } else {
5788 edid = ERR_PTR(-ENOENT);
5789 }
5790 intel_connector->edid = edid;
5791
5792 /* prefer fixed mode from EDID if available */
5793 list_for_each_entry(scan, &connector->probed_modes, head) {
5794 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5795 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5796 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5797 intel_connector, fixed_mode);
ed92f0b2
PZ
5798 break;
5799 }
5800 }
5801
5802 /* fallback to VBT if available for eDP */
5803 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5804 fixed_mode = drm_mode_duplicate(dev,
5805 dev_priv->vbt.lfp_lvds_vbt_mode);
5806 if (fixed_mode)
5807 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5808 }
060c8778 5809 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5810
666a4537 5811 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
01527b31
CT
5812 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5813 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5814
5815 /*
5816 * Figure out the current pipe for the initial backlight setup.
5817 * If the current pipe isn't valid, try the PPS pipe, and if that
5818 * fails just assume pipe A.
5819 */
5820 if (IS_CHERRYVIEW(dev))
5821 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5822 else
5823 pipe = PORT_TO_PIPE(intel_dp->DP);
5824
5825 if (pipe != PIPE_A && pipe != PIPE_B)
5826 pipe = intel_dp->pps_pipe;
5827
5828 if (pipe != PIPE_A && pipe != PIPE_B)
5829 pipe = PIPE_A;
5830
5831 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5832 pipe_name(pipe));
01527b31
CT
5833 }
5834
4f9db5b5 5835 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5836 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5837 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5838
5839 return true;
5840}
5841
16c25533 5842bool
f0fec3f2
PZ
5843intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5844 struct intel_connector *intel_connector)
a4fc5ed6 5845{
f0fec3f2
PZ
5846 struct drm_connector *connector = &intel_connector->base;
5847 struct intel_dp *intel_dp = &intel_dig_port->dp;
5848 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5849 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5850 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5851 enum port port = intel_dig_port->port;
a121f4e5 5852 int type, ret;
a4fc5ed6 5853
ccb1a831
VS
5854 if (WARN(intel_dig_port->max_lanes < 1,
5855 "Not enough lanes (%d) for DP on port %c\n",
5856 intel_dig_port->max_lanes, port_name(port)))
5857 return false;
5858
a4a5d2f8
VS
5859 intel_dp->pps_pipe = INVALID_PIPE;
5860
ec5b01dd 5861 /* intel_dp vfuncs */
b6b5e383
DL
5862 if (INTEL_INFO(dev)->gen >= 9)
5863 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
666a4537 5864 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
ec5b01dd
DL
5865 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5866 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5867 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5868 else if (HAS_PCH_SPLIT(dev))
5869 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5870 else
5871 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5872
b9ca5fad
DL
5873 if (INTEL_INFO(dev)->gen >= 9)
5874 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5875 else
5876 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5877
ad64217b
ACO
5878 if (HAS_DDI(dev))
5879 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5880
0767935e
DV
5881 /* Preserve the current hw state. */
5882 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5883 intel_dp->attached_connector = intel_connector;
3d3dc149 5884
3b32a35b 5885 if (intel_dp_is_edp(dev, port))
b329530c 5886 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5887 else
5888 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5889
f7d24902
ID
5890 /*
5891 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5892 * for DP the encoder type can be set by the caller to
5893 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5894 */
5895 if (type == DRM_MODE_CONNECTOR_eDP)
5896 intel_encoder->type = INTEL_OUTPUT_EDP;
5897
c17ed5b5 5898 /* eDP only on port B and/or C on vlv/chv */
666a4537
WB
5899 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5900 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
c17ed5b5
VS
5901 return false;
5902
e7281eab
ID
5903 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5904 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5905 port_name(port));
5906
b329530c 5907 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5908 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5909
a4fc5ed6
KP
5910 connector->interlace_allowed = true;
5911 connector->doublescan_allowed = 0;
5912
f0fec3f2 5913 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5914 edp_panel_vdd_work);
a4fc5ed6 5915
df0e9248 5916 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5917 drm_connector_register(connector);
a4fc5ed6 5918
affa9354 5919 if (HAS_DDI(dev))
bcbc889b
PZ
5920 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5921 else
5922 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5923 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5924
0b99836f 5925 /* Set up the hotplug pin. */
ab9d7c30
PZ
5926 switch (port) {
5927 case PORT_A:
1d843f9d 5928 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5929 break;
5930 case PORT_B:
1d843f9d 5931 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5932 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5933 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5934 break;
5935 case PORT_C:
1d843f9d 5936 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5937 break;
5938 case PORT_D:
1d843f9d 5939 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5940 break;
26951caf
XZ
5941 case PORT_E:
5942 intel_encoder->hpd_pin = HPD_PORT_E;
5943 break;
ab9d7c30 5944 default:
ad1c0b19 5945 BUG();
5eb08b69
ZW
5946 }
5947
dada1a9f 5948 if (is_edp(intel_dp)) {
773538e8 5949 pps_lock(intel_dp);
1e74a324 5950 intel_dp_init_panel_power_timestamps(intel_dp);
666a4537 5951 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
a4a5d2f8 5952 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5953 else
36b5f425 5954 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5955 pps_unlock(intel_dp);
dada1a9f 5956 }
0095e6dc 5957
a121f4e5
VS
5958 ret = intel_dp_aux_init(intel_dp, intel_connector);
5959 if (ret)
5960 goto fail;
c1f05264 5961
0e32b39c 5962 /* init MST on ports that can support it */
0c9b3715
JN
5963 if (HAS_DP_MST(dev) &&
5964 (port == PORT_B || port == PORT_C || port == PORT_D))
5965 intel_dp_mst_encoder_init(intel_dig_port,
5966 intel_connector->base.base.id);
0e32b39c 5967
36b5f425 5968 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
5969 intel_dp_aux_fini(intel_dp);
5970 intel_dp_mst_encoder_cleanup(intel_dig_port);
5971 goto fail;
b2f246a8 5972 }
32f9d658 5973
f684960e
CW
5974 intel_dp_add_properties(intel_dp, connector);
5975
a4fc5ed6
KP
5976 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5977 * 0xd. Failure to do so will result in spurious interrupts being
5978 * generated on the port when a cable is not attached.
5979 */
5980 if (IS_G4X(dev) && !IS_GM45(dev)) {
5981 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5982 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5983 }
16c25533 5984
aa7471d2
JN
5985 i915_debugfs_connector_add(connector);
5986
16c25533 5987 return true;
a121f4e5
VS
5988
5989fail:
5990 if (is_edp(intel_dp)) {
5991 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5992 /*
5993 * vdd might still be enabled do to the delayed vdd off.
5994 * Make sure vdd is actually turned off here.
5995 */
5996 pps_lock(intel_dp);
5997 edp_panel_vdd_off_sync(intel_dp);
5998 pps_unlock(intel_dp);
5999 }
6000 drm_connector_unregister(connector);
6001 drm_connector_cleanup(connector);
6002
6003 return false;
a4fc5ed6 6004}
f0fec3f2
PZ
6005
6006void
f0f59a00
VS
6007intel_dp_init(struct drm_device *dev,
6008 i915_reg_t output_reg, enum port port)
f0fec3f2 6009{
13cf5504 6010 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6011 struct intel_digital_port *intel_dig_port;
6012 struct intel_encoder *intel_encoder;
6013 struct drm_encoder *encoder;
6014 struct intel_connector *intel_connector;
6015
b14c5679 6016 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6017 if (!intel_dig_port)
6018 return;
6019
08d9bc92 6020 intel_connector = intel_connector_alloc();
11aee0f6
SM
6021 if (!intel_connector)
6022 goto err_connector_alloc;
f0fec3f2
PZ
6023
6024 intel_encoder = &intel_dig_port->base;
6025 encoder = &intel_encoder->base;
6026
893da0c9 6027 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
ade1ba73 6028 DRM_MODE_ENCODER_TMDS, NULL))
893da0c9 6029 goto err_encoder_init;
f0fec3f2 6030
5bfe2ac0 6031 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6032 intel_encoder->disable = intel_disable_dp;
00c09d70 6033 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6034 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6035 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6036 if (IS_CHERRYVIEW(dev)) {
9197c88b 6037 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6038 intel_encoder->pre_enable = chv_pre_enable_dp;
6039 intel_encoder->enable = vlv_enable_dp;
580d3811 6040 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6041 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6042 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6043 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6044 intel_encoder->pre_enable = vlv_pre_enable_dp;
6045 intel_encoder->enable = vlv_enable_dp;
49277c31 6046 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6047 } else {
ecff4f3b
JN
6048 intel_encoder->pre_enable = g4x_pre_enable_dp;
6049 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6050 if (INTEL_INFO(dev)->gen >= 5)
6051 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6052 }
f0fec3f2 6053
174edf1f 6054 intel_dig_port->port = port;
0bdf5a05 6055 dev_priv->dig_port_map[port] = intel_encoder;
f0fec3f2 6056 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 6057 intel_dig_port->max_lanes = 4;
f0fec3f2 6058
00c09d70 6059 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6060 if (IS_CHERRYVIEW(dev)) {
6061 if (port == PORT_D)
6062 intel_encoder->crtc_mask = 1 << 2;
6063 else
6064 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6065 } else {
6066 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6067 }
bc079e8b 6068 intel_encoder->cloneable = 0;
f0fec3f2 6069
13cf5504 6070 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6071 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6072
11aee0f6
SM
6073 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6074 goto err_init_connector;
6075
6076 return;
6077
6078err_init_connector:
6079 drm_encoder_cleanup(encoder);
893da0c9 6080err_encoder_init:
11aee0f6
SM
6081 kfree(intel_connector);
6082err_connector_alloc:
6083 kfree(intel_dig_port);
6084
6085 return;
f0fec3f2 6086}
0e32b39c
DA
6087
6088void intel_dp_mst_suspend(struct drm_device *dev)
6089{
6090 struct drm_i915_private *dev_priv = dev->dev_private;
6091 int i;
6092
6093 /* disable MST */
6094 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6095 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6096 if (!intel_dig_port)
6097 continue;
6098
6099 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6100 if (!intel_dig_port->dp.can_mst)
6101 continue;
6102 if (intel_dig_port->dp.is_mst)
6103 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6104 }
6105 }
6106}
6107
6108void intel_dp_mst_resume(struct drm_device *dev)
6109{
6110 struct drm_i915_private *dev_priv = dev->dev_private;
6111 int i;
6112
6113 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6114 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6115 if (!intel_dig_port)
6116 continue;
6117 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6118 int ret;
6119
6120 if (!intel_dig_port->dp.can_mst)
6121 continue;
6122
6123 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6124 if (ret != 0) {
6125 intel_dp_check_mst_status(&intel_dig_port->dp);
6126 }
6127 }
6128 }
6129}