drm/i915: intel_ring_initialized() must be simple and inline
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
cd9dde44
AJ
173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
a4fc5ed6 190static int
c898261c 191intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 192{
cd9dde44 193 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
194}
195
fe27d53e
DA
196static int
197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
c19de8eb 202static enum drm_mode_status
a4fc5ed6
KP
203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
df0e9248 206 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 211
dd06f90e
JN
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
214 return MODE_PANEL;
215
dd06f90e 216 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 217 return MODE_PANEL;
03afc4a2
DV
218
219 target_clock = fixed_mode->clock;
7de56f43
ZY
220 }
221
50fec21a 222 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 223 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
c4867936 229 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
0af78a2b
DV
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
a4fc5ed6
KP
237 return MODE_OK;
238}
239
a4f1289e 240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
c2af70e2 252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
bf13e81b
JN
261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 263 struct intel_dp *intel_dp);
bf13e81b
JN
264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 266 struct intel_dp *intel_dp);
bf13e81b 267
773538e8
VS
268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
25f78f58 280 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
25f78f58 296 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
297 intel_display_power_put(dev_priv, power_domain);
298}
299
961a0db0
VS
300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
d288f65f
VS
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
0047eedc
VS
339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
d288f65f
VS
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
a8c3344e
VS
410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
a4a5d2f8 413
a8c3344e
VS
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
36b5f425
VS
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 424
961a0db0
VS
425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
430
431 return intel_dp->pps_pipe;
432}
433
6491ab27
VS
434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
bf13e81b 454
a4a5d2f8 455static enum pipe
6491ab27
VS
456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
a4a5d2f8
VS
459{
460 enum pipe pipe;
bf13e81b 461
bf13e81b
JN
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
6491ab27
VS
469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
a4a5d2f8 472 return pipe;
bf13e81b
JN
473 }
474
a4a5d2f8
VS
475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
6491ab27
VS
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
a4a5d2f8
VS
500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
bf13e81b
JN
506 }
507
a4a5d2f8
VS
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
36b5f425
VS
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
513}
514
773538e8
VS
515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
666a4537 520 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
773538e8
VS
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
bf13e81b
JN
542}
543
f0f59a00
VS
544static i915_reg_t
545_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b
JN
546{
547 struct drm_device *dev = intel_dp_to_dev(intel_dp);
548
b0a08bec
VK
549 if (IS_BROXTON(dev))
550 return BXT_PP_CONTROL(0);
551 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
552 return PCH_PP_CONTROL;
553 else
554 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
555}
556
f0f59a00
VS
557static i915_reg_t
558_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b
JN
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
b0a08bec
VK
562 if (IS_BROXTON(dev))
563 return BXT_PP_STATUS(0);
564 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
565 return PCH_PP_STATUS;
566 else
567 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568}
569
01527b31
CT
570/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571 This function only applicable when panel PM state is not to be tracked */
572static int edp_notify_handler(struct notifier_block *this, unsigned long code,
573 void *unused)
574{
575 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
576 edp_notifier);
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
579
580 if (!is_edp(intel_dp) || code != SYS_RESTART)
581 return 0;
582
773538e8 583 pps_lock(intel_dp);
e39b999a 584
666a4537 585 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e39b999a 586 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
f0f59a00 587 i915_reg_t pp_ctrl_reg, pp_div_reg;
649636ef 588 u32 pp_div;
e39b999a 589
01527b31
CT
590 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
591 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
592 pp_div = I915_READ(pp_div_reg);
593 pp_div &= PP_REFERENCE_DIVIDER_MASK;
594
595 /* 0x1F write to PP_DIV_REG sets max cycle delay */
596 I915_WRITE(pp_div_reg, pp_div | 0x1F);
597 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
598 msleep(intel_dp->panel_power_cycle_delay);
599 }
600
773538e8 601 pps_unlock(intel_dp);
e39b999a 602
01527b31
CT
603 return 0;
604}
605
4be73780 606static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 607{
30add22d 608 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
609 struct drm_i915_private *dev_priv = dev->dev_private;
610
e39b999a
VS
611 lockdep_assert_held(&dev_priv->pps_mutex);
612
666a4537 613 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
614 intel_dp->pps_pipe == INVALID_PIPE)
615 return false;
616
bf13e81b 617 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
618}
619
4be73780 620static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 621{
30add22d 622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
623 struct drm_i915_private *dev_priv = dev->dev_private;
624
e39b999a
VS
625 lockdep_assert_held(&dev_priv->pps_mutex);
626
666a4537 627 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
773538e8 631 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
632}
633
9b984dae
KP
634static void
635intel_dp_check_edp(struct intel_dp *intel_dp)
636{
30add22d 637 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 638 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 639
9b984dae
KP
640 if (!is_edp(intel_dp))
641 return;
453c5420 642
4be73780 643 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
644 WARN(1, "eDP powered off while attempting aux channel communication.\n");
645 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
646 I915_READ(_pp_stat_reg(intel_dp)),
647 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
648 }
649}
650
9ee32fea
DV
651static uint32_t
652intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653{
654 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
655 struct drm_device *dev = intel_dig_port->base.base.dev;
656 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 657 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
658 uint32_t status;
659 bool done;
660
ef04f00d 661#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 662 if (has_aux_irq)
b18ac466 663 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 664 msecs_to_jiffies_timeout(10));
9ee32fea
DV
665 else
666 done = wait_for_atomic(C, 10) == 0;
667 if (!done)
668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669 has_aux_irq);
670#undef C
671
672 return status;
673}
674
ec5b01dd 675static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 676{
174edf1f
PZ
677 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
678 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 679
ec5b01dd
DL
680 /*
681 * The clock divider is based off the hrawclk, and would like to run at
682 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 683 */
fce18c4c 684 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
ec5b01dd
DL
685}
686
687static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688{
689 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 691 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
692
693 if (index)
694 return 0;
695
696 if (intel_dig_port->port == PORT_A) {
fce18c4c 697 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
05024da3 698
ec5b01dd 699 } else {
fce18c4c 700 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
ec5b01dd
DL
701 }
702}
703
704static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705{
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
708 struct drm_i915_private *dev_priv = dev->dev_private;
709
710 if (intel_dig_port->port == PORT_A) {
711 if (index)
712 return 0;
05024da3 713 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
56f5f700 714 } else if (HAS_PCH_LPT_H(dev_priv)) {
2c55c336 715 /* Workaround for non-ULT HSW */
bc86625a
CW
716 switch (index) {
717 case 0: return 63;
718 case 1: return 72;
719 default: return 0;
720 }
ec5b01dd 721 } else {
fce18c4c 722 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
2c55c336 723 }
b84a1cf8
RV
724}
725
ec5b01dd
DL
726static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
727{
728 return index ? 0 : 100;
729}
730
b6b5e383
DL
731static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732{
733 /*
734 * SKL doesn't need us to program the AUX clock divider (Hardware will
735 * derive the clock from CDCLK automatically). We still implement the
736 * get_aux_clock_divider vfunc to plug-in into the existing code.
737 */
738 return index ? 0 : 1;
739}
740
5ed12a19
DL
741static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742 bool has_aux_irq,
743 int send_bytes,
744 uint32_t aux_clock_divider)
745{
746 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
747 struct drm_device *dev = intel_dig_port->base.base.dev;
748 uint32_t precharge, timeout;
749
750 if (IS_GEN6(dev))
751 precharge = 3;
752 else
753 precharge = 5;
754
f3c6a3a7 755 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5ed12a19
DL
756 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
757 else
758 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
759
760 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 761 DP_AUX_CH_CTL_DONE |
5ed12a19 762 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 763 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 764 timeout |
788d4433 765 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
766 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
767 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 768 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
769}
770
b9ca5fad
DL
771static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
772 bool has_aux_irq,
773 int send_bytes,
774 uint32_t unused)
775{
776 return DP_AUX_CH_CTL_SEND_BUSY |
777 DP_AUX_CH_CTL_DONE |
778 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
779 DP_AUX_CH_CTL_TIME_OUT_ERROR |
780 DP_AUX_CH_CTL_TIME_OUT_1600us |
781 DP_AUX_CH_CTL_RECEIVE_ERROR |
782 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
783 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
784}
785
b84a1cf8
RV
786static int
787intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 788 const uint8_t *send, int send_bytes,
b84a1cf8
RV
789 uint8_t *recv, int recv_size)
790{
791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
792 struct drm_device *dev = intel_dig_port->base.base.dev;
793 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 794 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
bc86625a 795 uint32_t aux_clock_divider;
b84a1cf8
RV
796 int i, ret, recv_bytes;
797 uint32_t status;
5ed12a19 798 int try, clock = 0;
4e6b788c 799 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
800 bool vdd;
801
773538e8 802 pps_lock(intel_dp);
e39b999a 803
72c3500a
VS
804 /*
805 * We will be called with VDD already enabled for dpcd/edid/oui reads.
806 * In such cases we want to leave VDD enabled and it's up to upper layers
807 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808 * ourselves.
809 */
1e0560e0 810 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
811
812 /* dp aux is extremely sensitive to irq latency, hence request the
813 * lowest possible wakeup latency and so prevent the cpu from going into
814 * deep sleep states.
815 */
816 pm_qos_update_request(&dev_priv->pm_qos, 0);
817
818 intel_dp_check_edp(intel_dp);
5eb08b69 819
11bee43e
JB
820 /* Try to wait for any previous AUX channel activity */
821 for (try = 0; try < 3; try++) {
ef04f00d 822 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
823 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
824 break;
825 msleep(1);
826 }
827
828 if (try == 3) {
02196c77
MK
829 static u32 last_status = -1;
830 const u32 status = I915_READ(ch_ctl);
831
832 if (status != last_status) {
833 WARN(1, "dp_aux_ch not started status 0x%08x\n",
834 status);
835 last_status = status;
836 }
837
9ee32fea
DV
838 ret = -EBUSY;
839 goto out;
4f7f7b7e
CW
840 }
841
46a5ae9f
PZ
842 /* Only 5 data registers! */
843 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
844 ret = -E2BIG;
845 goto out;
846 }
847
ec5b01dd 848 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
849 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
850 has_aux_irq,
851 send_bytes,
852 aux_clock_divider);
5ed12a19 853
bc86625a
CW
854 /* Must try at least 3 times according to DP spec */
855 for (try = 0; try < 5; try++) {
856 /* Load the send data into the aux channel data registers */
857 for (i = 0; i < send_bytes; i += 4)
330e20ec 858 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
a4f1289e
RV
859 intel_dp_pack_aux(send + i,
860 send_bytes - i));
bc86625a
CW
861
862 /* Send the command and wait for it to complete */
5ed12a19 863 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
864
865 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
866
867 /* Clear done status and any errors */
868 I915_WRITE(ch_ctl,
869 status |
870 DP_AUX_CH_CTL_DONE |
871 DP_AUX_CH_CTL_TIME_OUT_ERROR |
872 DP_AUX_CH_CTL_RECEIVE_ERROR);
873
74ebf294 874 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 875 continue;
74ebf294
TP
876
877 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
878 * 400us delay required for errors and timeouts
879 * Timeout errors from the HW already meet this
880 * requirement so skip to next iteration
881 */
882 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
883 usleep_range(400, 500);
bc86625a 884 continue;
74ebf294 885 }
bc86625a 886 if (status & DP_AUX_CH_CTL_DONE)
e058c945 887 goto done;
bc86625a 888 }
a4fc5ed6
KP
889 }
890
a4fc5ed6 891 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 892 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
893 ret = -EBUSY;
894 goto out;
a4fc5ed6
KP
895 }
896
e058c945 897done:
a4fc5ed6
KP
898 /* Check for timeout or receive error.
899 * Timeouts occur when the sink is not connected
900 */
a5b3da54 901 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 902 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
903 ret = -EIO;
904 goto out;
a5b3da54 905 }
1ae8c0a5
KP
906
907 /* Timeouts occur when the device isn't connected, so they're
908 * "normal" -- don't fill the kernel log with these */
a5b3da54 909 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 910 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
911 ret = -ETIMEDOUT;
912 goto out;
a4fc5ed6
KP
913 }
914
915 /* Unload any bytes sent back from the other side */
916 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
917 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
918 if (recv_bytes > recv_size)
919 recv_bytes = recv_size;
0206e353 920
4f7f7b7e 921 for (i = 0; i < recv_bytes; i += 4)
330e20ec 922 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
a4f1289e 923 recv + i, recv_bytes - i);
a4fc5ed6 924
9ee32fea
DV
925 ret = recv_bytes;
926out:
927 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
928
884f19e9
JN
929 if (vdd)
930 edp_panel_vdd_off(intel_dp, false);
931
773538e8 932 pps_unlock(intel_dp);
e39b999a 933
9ee32fea 934 return ret;
a4fc5ed6
KP
935}
936
a6c8aff0
JN
937#define BARE_ADDRESS_SIZE 3
938#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
939static ssize_t
940intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 941{
9d1a1031
JN
942 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
943 uint8_t txbuf[20], rxbuf[20];
944 size_t txsize, rxsize;
a4fc5ed6 945 int ret;
a4fc5ed6 946
d2d9cbbd
VS
947 txbuf[0] = (msg->request << 4) |
948 ((msg->address >> 16) & 0xf);
949 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
950 txbuf[2] = msg->address & 0xff;
951 txbuf[3] = msg->size - 1;
46a5ae9f 952
9d1a1031
JN
953 switch (msg->request & ~DP_AUX_I2C_MOT) {
954 case DP_AUX_NATIVE_WRITE:
955 case DP_AUX_I2C_WRITE:
c1e74122 956 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 957 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 958 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 959
9d1a1031
JN
960 if (WARN_ON(txsize > 20))
961 return -E2BIG;
a4fc5ed6 962
9d1a1031 963 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 964
9d1a1031
JN
965 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
966 if (ret > 0) {
967 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 968
a1ddefd8
JN
969 if (ret > 1) {
970 /* Number of bytes written in a short write. */
971 ret = clamp_t(int, rxbuf[1], 0, msg->size);
972 } else {
973 /* Return payload size. */
974 ret = msg->size;
975 }
9d1a1031
JN
976 }
977 break;
46a5ae9f 978
9d1a1031
JN
979 case DP_AUX_NATIVE_READ:
980 case DP_AUX_I2C_READ:
a6c8aff0 981 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 982 rxsize = msg->size + 1;
a4fc5ed6 983
9d1a1031
JN
984 if (WARN_ON(rxsize > 20))
985 return -E2BIG;
a4fc5ed6 986
9d1a1031
JN
987 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988 if (ret > 0) {
989 msg->reply = rxbuf[0] >> 4;
990 /*
991 * Assume happy day, and copy the data. The caller is
992 * expected to check msg->reply before touching it.
993 *
994 * Return payload size.
995 */
996 ret--;
997 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 998 }
9d1a1031
JN
999 break;
1000
1001 default:
1002 ret = -EINVAL;
1003 break;
a4fc5ed6 1004 }
f51a44b9 1005
9d1a1031 1006 return ret;
a4fc5ed6
KP
1007}
1008
f0f59a00
VS
1009static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1010 enum port port)
da00bdcf
VS
1011{
1012 switch (port) {
1013 case PORT_B:
1014 case PORT_C:
1015 case PORT_D:
1016 return DP_AUX_CH_CTL(port);
1017 default:
1018 MISSING_CASE(port);
1019 return DP_AUX_CH_CTL(PORT_B);
1020 }
1021}
1022
f0f59a00
VS
1023static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1024 enum port port, int index)
330e20ec
VS
1025{
1026 switch (port) {
1027 case PORT_B:
1028 case PORT_C:
1029 case PORT_D:
1030 return DP_AUX_CH_DATA(port, index);
1031 default:
1032 MISSING_CASE(port);
1033 return DP_AUX_CH_DATA(PORT_B, index);
1034 }
1035}
1036
f0f59a00
VS
1037static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1038 enum port port)
da00bdcf
VS
1039{
1040 switch (port) {
1041 case PORT_A:
1042 return DP_AUX_CH_CTL(port);
1043 case PORT_B:
1044 case PORT_C:
1045 case PORT_D:
1046 return PCH_DP_AUX_CH_CTL(port);
1047 default:
1048 MISSING_CASE(port);
1049 return DP_AUX_CH_CTL(PORT_A);
1050 }
1051}
1052
f0f59a00
VS
1053static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1054 enum port port, int index)
330e20ec
VS
1055{
1056 switch (port) {
1057 case PORT_A:
1058 return DP_AUX_CH_DATA(port, index);
1059 case PORT_B:
1060 case PORT_C:
1061 case PORT_D:
1062 return PCH_DP_AUX_CH_DATA(port, index);
1063 default:
1064 MISSING_CASE(port);
1065 return DP_AUX_CH_DATA(PORT_A, index);
1066 }
1067}
1068
da00bdcf
VS
1069/*
1070 * On SKL we don't have Aux for port E so we rely
1071 * on VBT to set a proper alternate aux channel.
1072 */
1073static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1074{
1075 const struct ddi_vbt_port_info *info =
1076 &dev_priv->vbt.ddi_port_info[PORT_E];
1077
1078 switch (info->alternate_aux_channel) {
1079 case DP_AUX_A:
1080 return PORT_A;
1081 case DP_AUX_B:
1082 return PORT_B;
1083 case DP_AUX_C:
1084 return PORT_C;
1085 case DP_AUX_D:
1086 return PORT_D;
1087 default:
1088 MISSING_CASE(info->alternate_aux_channel);
1089 return PORT_A;
1090 }
1091}
1092
f0f59a00
VS
1093static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1094 enum port port)
da00bdcf
VS
1095{
1096 if (port == PORT_E)
1097 port = skl_porte_aux_port(dev_priv);
1098
1099 switch (port) {
1100 case PORT_A:
1101 case PORT_B:
1102 case PORT_C:
1103 case PORT_D:
1104 return DP_AUX_CH_CTL(port);
1105 default:
1106 MISSING_CASE(port);
1107 return DP_AUX_CH_CTL(PORT_A);
1108 }
1109}
1110
f0f59a00
VS
1111static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1112 enum port port, int index)
330e20ec
VS
1113{
1114 if (port == PORT_E)
1115 port = skl_porte_aux_port(dev_priv);
1116
1117 switch (port) {
1118 case PORT_A:
1119 case PORT_B:
1120 case PORT_C:
1121 case PORT_D:
1122 return DP_AUX_CH_DATA(port, index);
1123 default:
1124 MISSING_CASE(port);
1125 return DP_AUX_CH_DATA(PORT_A, index);
1126 }
1127}
1128
f0f59a00
VS
1129static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1130 enum port port)
330e20ec
VS
1131{
1132 if (INTEL_INFO(dev_priv)->gen >= 9)
1133 return skl_aux_ctl_reg(dev_priv, port);
1134 else if (HAS_PCH_SPLIT(dev_priv))
1135 return ilk_aux_ctl_reg(dev_priv, port);
1136 else
1137 return g4x_aux_ctl_reg(dev_priv, port);
1138}
1139
f0f59a00
VS
1140static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1141 enum port port, int index)
330e20ec
VS
1142{
1143 if (INTEL_INFO(dev_priv)->gen >= 9)
1144 return skl_aux_data_reg(dev_priv, port, index);
1145 else if (HAS_PCH_SPLIT(dev_priv))
1146 return ilk_aux_data_reg(dev_priv, port, index);
1147 else
1148 return g4x_aux_data_reg(dev_priv, port, index);
1149}
1150
1151static void intel_aux_reg_init(struct intel_dp *intel_dp)
1152{
1153 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1154 enum port port = dp_to_dig_port(intel_dp)->port;
1155 int i;
1156
1157 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1158 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1159 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1160}
1161
9d1a1031 1162static void
a121f4e5
VS
1163intel_dp_aux_fini(struct intel_dp *intel_dp)
1164{
1165 drm_dp_aux_unregister(&intel_dp->aux);
1166 kfree(intel_dp->aux.name);
1167}
1168
1169static int
9d1a1031
JN
1170intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1171{
1172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1173 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1174 enum port port = intel_dig_port->port;
ab2c0672
DA
1175 int ret;
1176
330e20ec 1177 intel_aux_reg_init(intel_dp);
8316f337 1178
a121f4e5
VS
1179 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1180 if (!intel_dp->aux.name)
1181 return -ENOMEM;
1182
9d1a1031
JN
1183 intel_dp->aux.dev = dev->dev;
1184 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1185
a121f4e5
VS
1186 DRM_DEBUG_KMS("registering %s bus for %s\n",
1187 intel_dp->aux.name,
0b99836f 1188 connector->base.kdev->kobj.name);
8316f337 1189
4f71d0cb 1190 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1191 if (ret < 0) {
4f71d0cb 1192 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
a121f4e5
VS
1193 intel_dp->aux.name, ret);
1194 kfree(intel_dp->aux.name);
1195 return ret;
ab2c0672 1196 }
8a5e6aeb 1197
0b99836f
JN
1198 ret = sysfs_create_link(&connector->base.kdev->kobj,
1199 &intel_dp->aux.ddc.dev.kobj,
1200 intel_dp->aux.ddc.dev.kobj.name);
1201 if (ret < 0) {
a121f4e5
VS
1202 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1203 intel_dp->aux.name, ret);
1204 intel_dp_aux_fini(intel_dp);
1205 return ret;
ab2c0672 1206 }
a121f4e5
VS
1207
1208 return 0;
a4fc5ed6
KP
1209}
1210
80f65de3
ID
1211static void
1212intel_dp_connector_unregister(struct intel_connector *intel_connector)
1213{
1214 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1215
0e32b39c
DA
1216 if (!intel_connector->mst_port)
1217 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1218 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1219 intel_connector_unregister(intel_connector);
1220}
1221
5416d871 1222static void
840b32b7 1223skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1224{
1225 u32 ctrl1;
1226
dd3cd74a
ACO
1227 memset(&pipe_config->dpll_hw_state, 0,
1228 sizeof(pipe_config->dpll_hw_state));
1229
5416d871
DL
1230 pipe_config->ddi_pll_sel = SKL_DPLL0;
1231 pipe_config->dpll_hw_state.cfgcr1 = 0;
1232 pipe_config->dpll_hw_state.cfgcr2 = 0;
1233
1234 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1235 switch (pipe_config->port_clock / 2) {
c3346ef6 1236 case 81000:
71cd8423 1237 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1238 SKL_DPLL0);
1239 break;
c3346ef6 1240 case 135000:
71cd8423 1241 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1242 SKL_DPLL0);
1243 break;
c3346ef6 1244 case 270000:
71cd8423 1245 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1246 SKL_DPLL0);
1247 break;
c3346ef6 1248 case 162000:
71cd8423 1249 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1250 SKL_DPLL0);
1251 break;
1252 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1253 results in CDCLK change. Need to handle the change of CDCLK by
1254 disabling pipes and re-enabling them */
1255 case 108000:
71cd8423 1256 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1257 SKL_DPLL0);
1258 break;
1259 case 216000:
71cd8423 1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1261 SKL_DPLL0);
1262 break;
1263
5416d871
DL
1264 }
1265 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1266}
1267
6fa2d197 1268void
840b32b7 1269hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1270{
ee46f3c7
ACO
1271 memset(&pipe_config->dpll_hw_state, 0,
1272 sizeof(pipe_config->dpll_hw_state));
1273
840b32b7
VS
1274 switch (pipe_config->port_clock / 2) {
1275 case 81000:
0e50338c
DV
1276 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1277 break;
840b32b7 1278 case 135000:
0e50338c
DV
1279 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1280 break;
840b32b7 1281 case 270000:
0e50338c
DV
1282 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1283 break;
1284 }
1285}
1286
fc0f8e25 1287static int
12f6a2e2 1288intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1289{
94ca719e
VS
1290 if (intel_dp->num_sink_rates) {
1291 *sink_rates = intel_dp->sink_rates;
1292 return intel_dp->num_sink_rates;
fc0f8e25 1293 }
12f6a2e2
VS
1294
1295 *sink_rates = default_rates;
1296
1297 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1298}
1299
e588fa18 1300bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1301{
e588fa18
ACO
1302 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1303 struct drm_device *dev = dig_port->base.base.dev;
1304
ed63baaf 1305 /* WaDisableHBR2:skl */
e87a005d 1306 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1307 return false;
1308
1309 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1310 (INTEL_INFO(dev)->gen >= 9))
1311 return true;
1312 else
1313 return false;
1314}
1315
a8f3ef61 1316static int
e588fa18 1317intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1318{
e588fa18
ACO
1319 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1320 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1321 int size;
1322
64987fc5
SJ
1323 if (IS_BROXTON(dev)) {
1324 *source_rates = bxt_rates;
af7080f5 1325 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1326 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1327 *source_rates = skl_rates;
af7080f5
TS
1328 size = ARRAY_SIZE(skl_rates);
1329 } else {
1330 *source_rates = default_rates;
1331 size = ARRAY_SIZE(default_rates);
a8f3ef61 1332 }
636280ba 1333
ed63baaf 1334 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1335 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1336 size--;
636280ba 1337
af7080f5 1338 return size;
a8f3ef61
SJ
1339}
1340
c6bb3538
DV
1341static void
1342intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1343 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1344{
1345 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1346 const struct dp_link_dpll *divisor = NULL;
1347 int i, count = 0;
c6bb3538
DV
1348
1349 if (IS_G4X(dev)) {
9dd4ffdf
CML
1350 divisor = gen4_dpll;
1351 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1352 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1353 divisor = pch_dpll;
1354 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1355 } else if (IS_CHERRYVIEW(dev)) {
1356 divisor = chv_dpll;
1357 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1358 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1359 divisor = vlv_dpll;
1360 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1361 }
9dd4ffdf
CML
1362
1363 if (divisor && count) {
1364 for (i = 0; i < count; i++) {
840b32b7 1365 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1366 pipe_config->dpll = divisor[i].dpll;
1367 pipe_config->clock_set = true;
1368 break;
1369 }
1370 }
c6bb3538
DV
1371 }
1372}
1373
2ecae76a
VS
1374static int intersect_rates(const int *source_rates, int source_len,
1375 const int *sink_rates, int sink_len,
94ca719e 1376 int *common_rates)
a8f3ef61
SJ
1377{
1378 int i = 0, j = 0, k = 0;
1379
a8f3ef61
SJ
1380 while (i < source_len && j < sink_len) {
1381 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1382 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1383 return k;
94ca719e 1384 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1385 ++k;
1386 ++i;
1387 ++j;
1388 } else if (source_rates[i] < sink_rates[j]) {
1389 ++i;
1390 } else {
1391 ++j;
1392 }
1393 }
1394 return k;
1395}
1396
94ca719e
VS
1397static int intel_dp_common_rates(struct intel_dp *intel_dp,
1398 int *common_rates)
2ecae76a 1399{
2ecae76a
VS
1400 const int *source_rates, *sink_rates;
1401 int source_len, sink_len;
1402
1403 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1404 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1405
1406 return intersect_rates(source_rates, source_len,
1407 sink_rates, sink_len,
94ca719e 1408 common_rates);
2ecae76a
VS
1409}
1410
0336400e
VS
1411static void snprintf_int_array(char *str, size_t len,
1412 const int *array, int nelem)
1413{
1414 int i;
1415
1416 str[0] = '\0';
1417
1418 for (i = 0; i < nelem; i++) {
b2f505be 1419 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1420 if (r >= len)
1421 return;
1422 str += r;
1423 len -= r;
1424 }
1425}
1426
1427static void intel_dp_print_rates(struct intel_dp *intel_dp)
1428{
0336400e 1429 const int *source_rates, *sink_rates;
94ca719e
VS
1430 int source_len, sink_len, common_len;
1431 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1432 char str[128]; /* FIXME: too big for stack? */
1433
1434 if ((drm_debug & DRM_UT_KMS) == 0)
1435 return;
1436
e588fa18 1437 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1438 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1439 DRM_DEBUG_KMS("source rates: %s\n", str);
1440
1441 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1442 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1443 DRM_DEBUG_KMS("sink rates: %s\n", str);
1444
94ca719e
VS
1445 common_len = intel_dp_common_rates(intel_dp, common_rates);
1446 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1447 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1448}
1449
f4896f15 1450static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1451{
1452 int i = 0;
1453
1454 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1455 if (find == rates[i])
1456 break;
1457
1458 return i;
1459}
1460
50fec21a
VS
1461int
1462intel_dp_max_link_rate(struct intel_dp *intel_dp)
1463{
1464 int rates[DP_MAX_SUPPORTED_RATES] = {};
1465 int len;
1466
94ca719e 1467 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1468 if (WARN_ON(len <= 0))
1469 return 162000;
1470
1471 return rates[rate_to_index(0, rates) - 1];
1472}
1473
ed4e9c1d
VS
1474int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1475{
94ca719e 1476 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1477}
1478
94223d04
ACO
1479void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1480 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1481{
1482 if (intel_dp->num_sink_rates) {
1483 *link_bw = 0;
1484 *rate_select =
1485 intel_dp_rate_select(intel_dp, port_clock);
1486 } else {
1487 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1488 *rate_select = 0;
1489 }
1490}
1491
00c09d70 1492bool
5bfe2ac0 1493intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1494 struct intel_crtc_state *pipe_config)
a4fc5ed6 1495{
5bfe2ac0 1496 struct drm_device *dev = encoder->base.dev;
36008365 1497 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1498 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1499 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1500 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1501 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1502 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1503 int lane_count, clock;
56071a20 1504 int min_lane_count = 1;
eeb6324d 1505 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1506 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1507 int min_clock = 0;
a8f3ef61 1508 int max_clock;
083f9560 1509 int bpp, mode_rate;
ff9a6750 1510 int link_avail, link_clock;
94ca719e
VS
1511 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1512 int common_len;
04a60f9f 1513 uint8_t link_bw, rate_select;
a8f3ef61 1514
94ca719e 1515 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1516
1517 /* No common link rates between source and sink */
94ca719e 1518 WARN_ON(common_len <= 0);
a8f3ef61 1519
94ca719e 1520 max_clock = common_len - 1;
a4fc5ed6 1521
bc7d38a4 1522 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1523 pipe_config->has_pch_encoder = true;
1524
03afc4a2 1525 pipe_config->has_dp_encoder = true;
f769cd24 1526 pipe_config->has_drrs = false;
9fcb1704 1527 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1528
dd06f90e
JN
1529 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1530 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1531 adjusted_mode);
a1b2278e
CK
1532
1533 if (INTEL_INFO(dev)->gen >= 9) {
1534 int ret;
e435d6e5 1535 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1536 if (ret)
1537 return ret;
1538 }
1539
b5667627 1540 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1541 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1542 intel_connector->panel.fitting_mode);
1543 else
b074cec8
JB
1544 intel_pch_panel_fitting(intel_crtc, pipe_config,
1545 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1546 }
1547
cb1793ce 1548 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1549 return false;
1550
083f9560 1551 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1552 "max bw %d pixel clock %iKHz\n",
94ca719e 1553 max_lane_count, common_rates[max_clock],
241bfc38 1554 adjusted_mode->crtc_clock);
083f9560 1555
36008365
DV
1556 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1557 * bpc in between. */
3e7ca985 1558 bpp = pipe_config->pipe_bpp;
56071a20 1559 if (is_edp(intel_dp)) {
22ce5628
TS
1560
1561 /* Get bpp from vbt only for panels that dont have bpp in edid */
1562 if (intel_connector->base.display_info.bpc == 0 &&
1563 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1564 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1565 dev_priv->vbt.edp_bpp);
1566 bpp = dev_priv->vbt.edp_bpp;
1567 }
1568
344c5bbc
JN
1569 /*
1570 * Use the maximum clock and number of lanes the eDP panel
1571 * advertizes being capable of. The panels are generally
1572 * designed to support only a single clock and lane
1573 * configuration, and typically these values correspond to the
1574 * native resolution of the panel.
1575 */
1576 min_lane_count = max_lane_count;
1577 min_clock = max_clock;
7984211e 1578 }
657445fe 1579
36008365 1580 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1581 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1582 bpp);
36008365 1583
c6930992 1584 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1585 for (lane_count = min_lane_count;
1586 lane_count <= max_lane_count;
1587 lane_count <<= 1) {
1588
94ca719e 1589 link_clock = common_rates[clock];
36008365
DV
1590 link_avail = intel_dp_max_data_rate(link_clock,
1591 lane_count);
1592
1593 if (mode_rate <= link_avail) {
1594 goto found;
1595 }
1596 }
1597 }
1598 }
c4867936 1599
36008365 1600 return false;
3685a8f3 1601
36008365 1602found:
55bc60db
VS
1603 if (intel_dp->color_range_auto) {
1604 /*
1605 * See:
1606 * CEA-861-E - 5.1 Default Encoding Parameters
1607 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1608 */
0f2a2a75
VS
1609 pipe_config->limited_color_range =
1610 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1611 } else {
1612 pipe_config->limited_color_range =
1613 intel_dp->limited_color_range;
55bc60db
VS
1614 }
1615
90a6b7b0 1616 pipe_config->lane_count = lane_count;
a8f3ef61 1617
657445fe 1618 pipe_config->pipe_bpp = bpp;
94ca719e 1619 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1620
04a60f9f
VS
1621 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1622 &link_bw, &rate_select);
1623
1624 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1625 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1626 pipe_config->port_clock, bpp);
36008365
DV
1627 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1628 mode_rate, link_avail);
a4fc5ed6 1629
03afc4a2 1630 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1631 adjusted_mode->crtc_clock,
1632 pipe_config->port_clock,
03afc4a2 1633 &pipe_config->dp_m_n);
9d1a455b 1634
439d7ac0 1635 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1636 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1637 pipe_config->has_drrs = true;
439d7ac0
PB
1638 intel_link_compute_m_n(bpp, lane_count,
1639 intel_connector->panel.downclock_mode->clock,
1640 pipe_config->port_clock,
1641 &pipe_config->dp_m2_n2);
1642 }
1643
ef11bdb3 1644 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1645 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1646 else if (IS_BROXTON(dev))
1647 /* handled in ddi */;
5416d871 1648 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1649 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1650 else
840b32b7 1651 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1652
03afc4a2 1653 return true;
a4fc5ed6
KP
1654}
1655
901c2daf
VS
1656void intel_dp_set_link_params(struct intel_dp *intel_dp,
1657 const struct intel_crtc_state *pipe_config)
1658{
1659 intel_dp->link_rate = pipe_config->port_clock;
1660 intel_dp->lane_count = pipe_config->lane_count;
1661}
1662
8ac33ed3 1663static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1664{
b934223d 1665 struct drm_device *dev = encoder->base.dev;
417e822d 1666 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1667 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1668 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1669 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1670 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1671
901c2daf
VS
1672 intel_dp_set_link_params(intel_dp, crtc->config);
1673
417e822d 1674 /*
1a2eb460 1675 * There are four kinds of DP registers:
417e822d
KP
1676 *
1677 * IBX PCH
1a2eb460
KP
1678 * SNB CPU
1679 * IVB CPU
417e822d
KP
1680 * CPT PCH
1681 *
1682 * IBX PCH and CPU are the same for almost everything,
1683 * except that the CPU DP PLL is configured in this
1684 * register
1685 *
1686 * CPT PCH is quite different, having many bits moved
1687 * to the TRANS_DP_CTL register instead. That
1688 * configuration happens (oddly) in ironlake_pch_enable
1689 */
9c9e7927 1690
417e822d
KP
1691 /* Preserve the BIOS-computed detected bit. This is
1692 * supposed to be read-only.
1693 */
1694 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1695
417e822d 1696 /* Handle DP bits in common between all three register formats */
417e822d 1697 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1698 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1699
417e822d 1700 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1701
39e5fa88 1702 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1703 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1704 intel_dp->DP |= DP_SYNC_HS_HIGH;
1705 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1706 intel_dp->DP |= DP_SYNC_VS_HIGH;
1707 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1708
6aba5b6c 1709 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1710 intel_dp->DP |= DP_ENHANCED_FRAMING;
1711
7c62a164 1712 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1713 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1714 u32 trans_dp;
1715
39e5fa88 1716 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1717
1718 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1719 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1720 trans_dp |= TRANS_DP_ENH_FRAMING;
1721 else
1722 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1723 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1724 } else {
0f2a2a75 1725 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 1726 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
0f2a2a75 1727 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1728
1729 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1730 intel_dp->DP |= DP_SYNC_HS_HIGH;
1731 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1732 intel_dp->DP |= DP_SYNC_VS_HIGH;
1733 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1734
6aba5b6c 1735 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1736 intel_dp->DP |= DP_ENHANCED_FRAMING;
1737
39e5fa88 1738 if (IS_CHERRYVIEW(dev))
44f37d1f 1739 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1740 else if (crtc->pipe == PIPE_B)
1741 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1742 }
a4fc5ed6
KP
1743}
1744
ffd6749d
PZ
1745#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1746#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1747
1a5ef5b7
PZ
1748#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1749#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1750
ffd6749d
PZ
1751#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1752#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1753
4be73780 1754static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1755 u32 mask,
1756 u32 value)
bd943159 1757{
30add22d 1758 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1759 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1760 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 1761
e39b999a
VS
1762 lockdep_assert_held(&dev_priv->pps_mutex);
1763
bf13e81b
JN
1764 pp_stat_reg = _pp_stat_reg(intel_dp);
1765 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1766
99ea7127 1767 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1768 mask, value,
1769 I915_READ(pp_stat_reg),
1770 I915_READ(pp_ctrl_reg));
32ce697c 1771
453c5420 1772 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1773 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1774 I915_READ(pp_stat_reg),
1775 I915_READ(pp_ctrl_reg));
32ce697c 1776 }
54c136d4
CW
1777
1778 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1779}
32ce697c 1780
4be73780 1781static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1782{
1783 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1784 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1785}
1786
4be73780 1787static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1788{
1789 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1790 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1791}
1792
4be73780 1793static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1794{
1795 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1796
1797 /* When we disable the VDD override bit last we have to do the manual
1798 * wait. */
1799 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1800 intel_dp->panel_power_cycle_delay);
1801
4be73780 1802 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1803}
1804
4be73780 1805static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1806{
1807 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1808 intel_dp->backlight_on_delay);
1809}
1810
4be73780 1811static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1812{
1813 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1814 intel_dp->backlight_off_delay);
1815}
99ea7127 1816
832dd3c1
KP
1817/* Read the current pp_control value, unlocking the register if it
1818 * is locked
1819 */
1820
453c5420 1821static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1822{
453c5420
JB
1823 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1824 struct drm_i915_private *dev_priv = dev->dev_private;
1825 u32 control;
832dd3c1 1826
e39b999a
VS
1827 lockdep_assert_held(&dev_priv->pps_mutex);
1828
bf13e81b 1829 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1830 if (!IS_BROXTON(dev)) {
1831 control &= ~PANEL_UNLOCK_MASK;
1832 control |= PANEL_UNLOCK_REGS;
1833 }
832dd3c1 1834 return control;
bd943159
KP
1835}
1836
951468f3
VS
1837/*
1838 * Must be paired with edp_panel_vdd_off().
1839 * Must hold pps_mutex around the whole on/off sequence.
1840 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1841 */
1e0560e0 1842static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1843{
30add22d 1844 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1845 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1846 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1847 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1848 enum intel_display_power_domain power_domain;
5d613501 1849 u32 pp;
f0f59a00 1850 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 1851 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1852
e39b999a
VS
1853 lockdep_assert_held(&dev_priv->pps_mutex);
1854
97af61f5 1855 if (!is_edp(intel_dp))
adddaaf4 1856 return false;
bd943159 1857
2c623c11 1858 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1859 intel_dp->want_panel_vdd = true;
99ea7127 1860
4be73780 1861 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1862 return need_to_disable;
b0665d57 1863
25f78f58 1864 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 1865 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1866
3936fcf4
VS
1867 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1868 port_name(intel_dig_port->port));
bd943159 1869
4be73780
DV
1870 if (!edp_have_panel_power(intel_dp))
1871 wait_panel_power_cycle(intel_dp);
99ea7127 1872
453c5420 1873 pp = ironlake_get_pp_control(intel_dp);
5d613501 1874 pp |= EDP_FORCE_VDD;
ebf33b18 1875
bf13e81b
JN
1876 pp_stat_reg = _pp_stat_reg(intel_dp);
1877 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1878
1879 I915_WRITE(pp_ctrl_reg, pp);
1880 POSTING_READ(pp_ctrl_reg);
1881 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1882 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1883 /*
1884 * If the panel wasn't on, delay before accessing aux channel
1885 */
4be73780 1886 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1887 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1888 port_name(intel_dig_port->port));
f01eca2e 1889 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1890 }
adddaaf4
JN
1891
1892 return need_to_disable;
1893}
1894
951468f3
VS
1895/*
1896 * Must be paired with intel_edp_panel_vdd_off() or
1897 * intel_edp_panel_off().
1898 * Nested calls to these functions are not allowed since
1899 * we drop the lock. Caller must use some higher level
1900 * locking to prevent nested calls from other threads.
1901 */
b80d6c78 1902void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1903{
c695b6b6 1904 bool vdd;
adddaaf4 1905
c695b6b6
VS
1906 if (!is_edp(intel_dp))
1907 return;
1908
773538e8 1909 pps_lock(intel_dp);
c695b6b6 1910 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1911 pps_unlock(intel_dp);
c695b6b6 1912
e2c719b7 1913 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1914 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1915}
1916
4be73780 1917static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1918{
30add22d 1919 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1920 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1921 struct intel_digital_port *intel_dig_port =
1922 dp_to_dig_port(intel_dp);
1923 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1924 enum intel_display_power_domain power_domain;
5d613501 1925 u32 pp;
f0f59a00 1926 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 1927
e39b999a 1928 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1929
15e899a0 1930 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1931
15e899a0 1932 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1933 return;
b0665d57 1934
3936fcf4
VS
1935 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1936 port_name(intel_dig_port->port));
bd943159 1937
be2c9196
VS
1938 pp = ironlake_get_pp_control(intel_dp);
1939 pp &= ~EDP_FORCE_VDD;
453c5420 1940
be2c9196
VS
1941 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1942 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1943
be2c9196
VS
1944 I915_WRITE(pp_ctrl_reg, pp);
1945 POSTING_READ(pp_ctrl_reg);
90791a5c 1946
be2c9196
VS
1947 /* Make sure sequencer is idle before allowing subsequent activity */
1948 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1949 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1950
be2c9196
VS
1951 if ((pp & POWER_TARGET_ON) == 0)
1952 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1953
25f78f58 1954 power_domain = intel_display_port_aux_power_domain(intel_encoder);
be2c9196 1955 intel_display_power_put(dev_priv, power_domain);
bd943159 1956}
5d613501 1957
4be73780 1958static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1959{
1960 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1961 struct intel_dp, panel_vdd_work);
bd943159 1962
773538e8 1963 pps_lock(intel_dp);
15e899a0
VS
1964 if (!intel_dp->want_panel_vdd)
1965 edp_panel_vdd_off_sync(intel_dp);
773538e8 1966 pps_unlock(intel_dp);
bd943159
KP
1967}
1968
aba86890
ID
1969static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1970{
1971 unsigned long delay;
1972
1973 /*
1974 * Queue the timer to fire a long time from now (relative to the power
1975 * down delay) to keep the panel power up across a sequence of
1976 * operations.
1977 */
1978 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1979 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1980}
1981
951468f3
VS
1982/*
1983 * Must be paired with edp_panel_vdd_on().
1984 * Must hold pps_mutex around the whole on/off sequence.
1985 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1986 */
4be73780 1987static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1988{
e39b999a
VS
1989 struct drm_i915_private *dev_priv =
1990 intel_dp_to_dev(intel_dp)->dev_private;
1991
1992 lockdep_assert_held(&dev_priv->pps_mutex);
1993
97af61f5
KP
1994 if (!is_edp(intel_dp))
1995 return;
5d613501 1996
e2c719b7 1997 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1998 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1999
bd943159
KP
2000 intel_dp->want_panel_vdd = false;
2001
aba86890 2002 if (sync)
4be73780 2003 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2004 else
2005 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2006}
2007
9f0fb5be 2008static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2009{
30add22d 2010 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2011 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 2012 u32 pp;
f0f59a00 2013 i915_reg_t pp_ctrl_reg;
9934c132 2014
9f0fb5be
VS
2015 lockdep_assert_held(&dev_priv->pps_mutex);
2016
97af61f5 2017 if (!is_edp(intel_dp))
bd943159 2018 return;
99ea7127 2019
3936fcf4
VS
2020 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2021 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 2022
e7a89ace
VS
2023 if (WARN(edp_have_panel_power(intel_dp),
2024 "eDP port %c panel power already on\n",
2025 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 2026 return;
9934c132 2027
4be73780 2028 wait_panel_power_cycle(intel_dp);
37c6c9b0 2029
bf13e81b 2030 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2031 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
2032 if (IS_GEN5(dev)) {
2033 /* ILK workaround: disable reset around power sequence */
2034 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2035 I915_WRITE(pp_ctrl_reg, pp);
2036 POSTING_READ(pp_ctrl_reg);
05ce1a49 2037 }
37c6c9b0 2038
1c0ae80a 2039 pp |= POWER_TARGET_ON;
99ea7127
KP
2040 if (!IS_GEN5(dev))
2041 pp |= PANEL_POWER_RESET;
2042
453c5420
JB
2043 I915_WRITE(pp_ctrl_reg, pp);
2044 POSTING_READ(pp_ctrl_reg);
9934c132 2045
4be73780 2046 wait_panel_on(intel_dp);
dce56b3c 2047 intel_dp->last_power_on = jiffies;
9934c132 2048
05ce1a49
KP
2049 if (IS_GEN5(dev)) {
2050 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2051 I915_WRITE(pp_ctrl_reg, pp);
2052 POSTING_READ(pp_ctrl_reg);
05ce1a49 2053 }
9f0fb5be 2054}
e39b999a 2055
9f0fb5be
VS
2056void intel_edp_panel_on(struct intel_dp *intel_dp)
2057{
2058 if (!is_edp(intel_dp))
2059 return;
2060
2061 pps_lock(intel_dp);
2062 edp_panel_on(intel_dp);
773538e8 2063 pps_unlock(intel_dp);
9934c132
JB
2064}
2065
9f0fb5be
VS
2066
2067static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2068{
4e6e1a54
ID
2069 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2070 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2071 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2072 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2073 enum intel_display_power_domain power_domain;
99ea7127 2074 u32 pp;
f0f59a00 2075 i915_reg_t pp_ctrl_reg;
9934c132 2076
9f0fb5be
VS
2077 lockdep_assert_held(&dev_priv->pps_mutex);
2078
97af61f5
KP
2079 if (!is_edp(intel_dp))
2080 return;
37c6c9b0 2081
3936fcf4
VS
2082 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2083 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2084
3936fcf4
VS
2085 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2086 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2087
453c5420 2088 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2089 /* We need to switch off panel power _and_ force vdd, for otherwise some
2090 * panels get very unhappy and cease to work. */
b3064154
PJ
2091 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2092 EDP_BLC_ENABLE);
453c5420 2093
bf13e81b 2094 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2095
849e39f5
PZ
2096 intel_dp->want_panel_vdd = false;
2097
453c5420
JB
2098 I915_WRITE(pp_ctrl_reg, pp);
2099 POSTING_READ(pp_ctrl_reg);
9934c132 2100
dce56b3c 2101 intel_dp->last_power_cycle = jiffies;
4be73780 2102 wait_panel_off(intel_dp);
849e39f5
PZ
2103
2104 /* We got a reference when we enabled the VDD. */
25f78f58 2105 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 2106 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2107}
e39b999a 2108
9f0fb5be
VS
2109void intel_edp_panel_off(struct intel_dp *intel_dp)
2110{
2111 if (!is_edp(intel_dp))
2112 return;
e39b999a 2113
9f0fb5be
VS
2114 pps_lock(intel_dp);
2115 edp_panel_off(intel_dp);
773538e8 2116 pps_unlock(intel_dp);
9934c132
JB
2117}
2118
1250d107
JN
2119/* Enable backlight in the panel power control. */
2120static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2121{
da63a9f2
PZ
2122 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2123 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2124 struct drm_i915_private *dev_priv = dev->dev_private;
2125 u32 pp;
f0f59a00 2126 i915_reg_t pp_ctrl_reg;
32f9d658 2127
01cb9ea6
JB
2128 /*
2129 * If we enable the backlight right away following a panel power
2130 * on, we may see slight flicker as the panel syncs with the eDP
2131 * link. So delay a bit to make sure the image is solid before
2132 * allowing it to appear.
2133 */
4be73780 2134 wait_backlight_on(intel_dp);
e39b999a 2135
773538e8 2136 pps_lock(intel_dp);
e39b999a 2137
453c5420 2138 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2139 pp |= EDP_BLC_ENABLE;
453c5420 2140
bf13e81b 2141 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2142
2143 I915_WRITE(pp_ctrl_reg, pp);
2144 POSTING_READ(pp_ctrl_reg);
e39b999a 2145
773538e8 2146 pps_unlock(intel_dp);
32f9d658
ZW
2147}
2148
1250d107
JN
2149/* Enable backlight PWM and backlight PP control. */
2150void intel_edp_backlight_on(struct intel_dp *intel_dp)
2151{
2152 if (!is_edp(intel_dp))
2153 return;
2154
2155 DRM_DEBUG_KMS("\n");
2156
2157 intel_panel_enable_backlight(intel_dp->attached_connector);
2158 _intel_edp_backlight_on(intel_dp);
2159}
2160
2161/* Disable backlight in the panel power control. */
2162static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2163{
30add22d 2164 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 pp;
f0f59a00 2167 i915_reg_t pp_ctrl_reg;
32f9d658 2168
f01eca2e
KP
2169 if (!is_edp(intel_dp))
2170 return;
2171
773538e8 2172 pps_lock(intel_dp);
e39b999a 2173
453c5420 2174 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2175 pp &= ~EDP_BLC_ENABLE;
453c5420 2176
bf13e81b 2177 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2178
2179 I915_WRITE(pp_ctrl_reg, pp);
2180 POSTING_READ(pp_ctrl_reg);
f7d2323c 2181
773538e8 2182 pps_unlock(intel_dp);
e39b999a
VS
2183
2184 intel_dp->last_backlight_off = jiffies;
f7d2323c 2185 edp_wait_backlight_off(intel_dp);
1250d107 2186}
f7d2323c 2187
1250d107
JN
2188/* Disable backlight PP control and backlight PWM. */
2189void intel_edp_backlight_off(struct intel_dp *intel_dp)
2190{
2191 if (!is_edp(intel_dp))
2192 return;
2193
2194 DRM_DEBUG_KMS("\n");
f7d2323c 2195
1250d107 2196 _intel_edp_backlight_off(intel_dp);
f7d2323c 2197 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2198}
a4fc5ed6 2199
73580fb7
JN
2200/*
2201 * Hook for controlling the panel power control backlight through the bl_power
2202 * sysfs attribute. Take care to handle multiple calls.
2203 */
2204static void intel_edp_backlight_power(struct intel_connector *connector,
2205 bool enable)
2206{
2207 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2208 bool is_enabled;
2209
773538e8 2210 pps_lock(intel_dp);
e39b999a 2211 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2212 pps_unlock(intel_dp);
73580fb7
JN
2213
2214 if (is_enabled == enable)
2215 return;
2216
23ba9373
JN
2217 DRM_DEBUG_KMS("panel power control backlight %s\n",
2218 enable ? "enable" : "disable");
73580fb7
JN
2219
2220 if (enable)
2221 _intel_edp_backlight_on(intel_dp);
2222 else
2223 _intel_edp_backlight_off(intel_dp);
2224}
2225
64e1077a
VS
2226static const char *state_string(bool enabled)
2227{
2228 return enabled ? "on" : "off";
2229}
2230
2231static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2232{
2233 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2234 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2235 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2236
2237 I915_STATE_WARN(cur_state != state,
2238 "DP port %c state assertion failure (expected %s, current %s)\n",
2239 port_name(dig_port->port),
2240 state_string(state), state_string(cur_state));
2241}
2242#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2243
2244static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2245{
2246 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2247
2248 I915_STATE_WARN(cur_state != state,
2249 "eDP PLL state assertion failure (expected %s, current %s)\n",
2250 state_string(state), state_string(cur_state));
2251}
2252#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2253#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2254
2bd2ad64 2255static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2256{
da63a9f2 2257 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2258 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2259 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2260
64e1077a
VS
2261 assert_pipe_disabled(dev_priv, crtc->pipe);
2262 assert_dp_port_disabled(intel_dp);
2263 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2264
abfce949
VS
2265 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2266 crtc->config->port_clock);
2267
2268 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2269
2270 if (crtc->config->port_clock == 162000)
2271 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2272 else
2273 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2274
2275 I915_WRITE(DP_A, intel_dp->DP);
2276 POSTING_READ(DP_A);
2277 udelay(500);
2278
0767935e 2279 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2280
0767935e 2281 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2282 POSTING_READ(DP_A);
2283 udelay(200);
d240f20f
JB
2284}
2285
2bd2ad64 2286static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2287{
da63a9f2 2288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2289 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2291
64e1077a
VS
2292 assert_pipe_disabled(dev_priv, crtc->pipe);
2293 assert_dp_port_disabled(intel_dp);
2294 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2295
abfce949
VS
2296 DRM_DEBUG_KMS("disabling eDP PLL\n");
2297
6fec7662 2298 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2299
6fec7662 2300 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2301 POSTING_READ(DP_A);
d240f20f
JB
2302 udelay(200);
2303}
2304
c7ad3810 2305/* If the sink supports it, try to set the power state appropriately */
c19b0669 2306void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2307{
2308 int ret, i;
2309
2310 /* Should have a valid DPCD by this point */
2311 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2312 return;
2313
2314 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2315 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2316 DP_SET_POWER_D3);
c7ad3810
JB
2317 } else {
2318 /*
2319 * When turning on, we need to retry for 1ms to give the sink
2320 * time to wake up.
2321 */
2322 for (i = 0; i < 3; i++) {
9d1a1031
JN
2323 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2324 DP_SET_POWER_D0);
c7ad3810
JB
2325 if (ret == 1)
2326 break;
2327 msleep(1);
2328 }
2329 }
f9cac721
JN
2330
2331 if (ret != 1)
2332 DRM_DEBUG_KMS("failed to %s sink power state\n",
2333 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2334}
2335
19d8fe15
DV
2336static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2337 enum pipe *pipe)
d240f20f 2338{
19d8fe15 2339 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2340 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2341 struct drm_device *dev = encoder->base.dev;
2342 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2343 enum intel_display_power_domain power_domain;
2344 u32 tmp;
2345
2346 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2347 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2348 return false;
2349
2350 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2351
2352 if (!(tmp & DP_PORT_EN))
2353 return false;
2354
39e5fa88 2355 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2356 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2357 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2358 enum pipe p;
19d8fe15 2359
adc289d7
VS
2360 for_each_pipe(dev_priv, p) {
2361 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2362 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2363 *pipe = p;
19d8fe15
DV
2364 return true;
2365 }
2366 }
19d8fe15 2367
4a0833ec 2368 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
f0f59a00 2369 i915_mmio_reg_offset(intel_dp->output_reg));
39e5fa88
VS
2370 } else if (IS_CHERRYVIEW(dev)) {
2371 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2372 } else {
2373 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2374 }
d240f20f 2375
19d8fe15
DV
2376 return true;
2377}
d240f20f 2378
045ac3b5 2379static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2380 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2381{
2382 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2383 u32 tmp, flags = 0;
63000ef6
XZ
2384 struct drm_device *dev = encoder->base.dev;
2385 struct drm_i915_private *dev_priv = dev->dev_private;
2386 enum port port = dp_to_dig_port(intel_dp)->port;
2387 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2388 int dotclock;
045ac3b5 2389
9ed109a7 2390 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2391
2392 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2393
39e5fa88 2394 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2395 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2396
2397 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2398 flags |= DRM_MODE_FLAG_PHSYNC;
2399 else
2400 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2401
b81e34c2 2402 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2403 flags |= DRM_MODE_FLAG_PVSYNC;
2404 else
2405 flags |= DRM_MODE_FLAG_NVSYNC;
2406 } else {
39e5fa88 2407 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2408 flags |= DRM_MODE_FLAG_PHSYNC;
2409 else
2410 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2411
39e5fa88 2412 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2413 flags |= DRM_MODE_FLAG_PVSYNC;
2414 else
2415 flags |= DRM_MODE_FLAG_NVSYNC;
2416 }
045ac3b5 2417
2d112de7 2418 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2419
8c875fca 2420 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 2421 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
2422 pipe_config->limited_color_range = true;
2423
eb14cb74
VS
2424 pipe_config->has_dp_encoder = true;
2425
90a6b7b0
VS
2426 pipe_config->lane_count =
2427 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2428
eb14cb74
VS
2429 intel_dp_get_m_n(crtc, pipe_config);
2430
18442d08 2431 if (port == PORT_A) {
b377e0df 2432 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2433 pipe_config->port_clock = 162000;
2434 else
2435 pipe_config->port_clock = 270000;
2436 }
18442d08
VS
2437
2438 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2439 &pipe_config->dp_m_n);
2440
2441 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2442 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2443
2d112de7 2444 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2445
c6cd2ee2
JN
2446 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2447 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2448 /*
2449 * This is a big fat ugly hack.
2450 *
2451 * Some machines in UEFI boot mode provide us a VBT that has 18
2452 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2453 * unknown we fail to light up. Yet the same BIOS boots up with
2454 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2455 * max, not what it tells us to use.
2456 *
2457 * Note: This will still be broken if the eDP panel is not lit
2458 * up by the BIOS, and thus we can't get the mode at module
2459 * load.
2460 */
2461 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2462 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2463 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2464 }
045ac3b5
JB
2465}
2466
e8cb4558 2467static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2468{
e8cb4558 2469 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2470 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2471 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2472
6e3c9717 2473 if (crtc->config->has_audio)
495a5bb8 2474 intel_audio_codec_disable(encoder);
6cb49835 2475
b32c6f48
RV
2476 if (HAS_PSR(dev) && !HAS_DDI(dev))
2477 intel_psr_disable(intel_dp);
2478
6cb49835
DV
2479 /* Make sure the panel is off before trying to change the mode. But also
2480 * ensure that we have vdd while we switch off the panel. */
24f3e092 2481 intel_edp_panel_vdd_on(intel_dp);
4be73780 2482 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2483 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2484 intel_edp_panel_off(intel_dp);
3739850b 2485
08aff3fe
VS
2486 /* disable the port before the pipe on g4x */
2487 if (INTEL_INFO(dev)->gen < 5)
3739850b 2488 intel_dp_link_down(intel_dp);
d240f20f
JB
2489}
2490
08aff3fe 2491static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2492{
2bd2ad64 2493 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2494 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2495
49277c31 2496 intel_dp_link_down(intel_dp);
abfce949
VS
2497
2498 /* Only ilk+ has port A */
08aff3fe
VS
2499 if (port == PORT_A)
2500 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2501}
2502
2503static void vlv_post_disable_dp(struct intel_encoder *encoder)
2504{
2505 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2506
2507 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2508}
2509
a8f327fb
VS
2510static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2511 bool reset)
580d3811 2512{
a8f327fb
VS
2513 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2514 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2515 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2516 enum pipe pipe = crtc->pipe;
2517 uint32_t val;
580d3811 2518
a8f327fb
VS
2519 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2520 if (reset)
2521 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2522 else
2523 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2524 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2525
a8f327fb
VS
2526 if (crtc->config->lane_count > 2) {
2527 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2528 if (reset)
2529 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2530 else
2531 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2532 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2533 }
580d3811 2534
97fd4d5c 2535 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2536 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2537 if (reset)
2538 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2539 else
2540 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2541 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2542
a8f327fb 2543 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2544 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2545 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2546 if (reset)
2547 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2548 else
2549 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2550 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2551 }
a8f327fb 2552}
97fd4d5c 2553
a8f327fb
VS
2554static void chv_post_disable_dp(struct intel_encoder *encoder)
2555{
2556 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2557 struct drm_device *dev = encoder->base.dev;
2558 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2559
a8f327fb
VS
2560 intel_dp_link_down(intel_dp);
2561
2562 mutex_lock(&dev_priv->sb_lock);
2563
2564 /* Assert data lane reset */
2565 chv_data_lane_soft_reset(encoder, true);
580d3811 2566
a580516d 2567 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2568}
2569
7b13b58a
VS
2570static void
2571_intel_dp_set_link_train(struct intel_dp *intel_dp,
2572 uint32_t *DP,
2573 uint8_t dp_train_pat)
2574{
2575 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2576 struct drm_device *dev = intel_dig_port->base.base.dev;
2577 struct drm_i915_private *dev_priv = dev->dev_private;
2578 enum port port = intel_dig_port->port;
2579
2580 if (HAS_DDI(dev)) {
2581 uint32_t temp = I915_READ(DP_TP_CTL(port));
2582
2583 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2584 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2585 else
2586 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2587
2588 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2589 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2590 case DP_TRAINING_PATTERN_DISABLE:
2591 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2592
2593 break;
2594 case DP_TRAINING_PATTERN_1:
2595 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2596 break;
2597 case DP_TRAINING_PATTERN_2:
2598 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2599 break;
2600 case DP_TRAINING_PATTERN_3:
2601 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2602 break;
2603 }
2604 I915_WRITE(DP_TP_CTL(port), temp);
2605
39e5fa88
VS
2606 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2607 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2608 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2609
2610 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2611 case DP_TRAINING_PATTERN_DISABLE:
2612 *DP |= DP_LINK_TRAIN_OFF_CPT;
2613 break;
2614 case DP_TRAINING_PATTERN_1:
2615 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2616 break;
2617 case DP_TRAINING_PATTERN_2:
2618 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2619 break;
2620 case DP_TRAINING_PATTERN_3:
2621 DRM_ERROR("DP training pattern 3 not supported\n");
2622 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2623 break;
2624 }
2625
2626 } else {
2627 if (IS_CHERRYVIEW(dev))
2628 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2629 else
2630 *DP &= ~DP_LINK_TRAIN_MASK;
2631
2632 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2633 case DP_TRAINING_PATTERN_DISABLE:
2634 *DP |= DP_LINK_TRAIN_OFF;
2635 break;
2636 case DP_TRAINING_PATTERN_1:
2637 *DP |= DP_LINK_TRAIN_PAT_1;
2638 break;
2639 case DP_TRAINING_PATTERN_2:
2640 *DP |= DP_LINK_TRAIN_PAT_2;
2641 break;
2642 case DP_TRAINING_PATTERN_3:
2643 if (IS_CHERRYVIEW(dev)) {
2644 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2645 } else {
2646 DRM_ERROR("DP training pattern 3 not supported\n");
2647 *DP |= DP_LINK_TRAIN_PAT_2;
2648 }
2649 break;
2650 }
2651 }
2652}
2653
2654static void intel_dp_enable_port(struct intel_dp *intel_dp)
2655{
2656 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2657 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2658 struct intel_crtc *crtc =
2659 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2660
7b13b58a
VS
2661 /* enable with pattern 1 (as per spec) */
2662 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2663 DP_TRAINING_PATTERN_1);
2664
2665 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2666 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2667
2668 /*
2669 * Magic for VLV/CHV. We _must_ first set up the register
2670 * without actually enabling the port, and then do another
2671 * write to enable the port. Otherwise link training will
2672 * fail when the power sequencer is freshly used for this port.
2673 */
2674 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2675 if (crtc->config->has_audio)
2676 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2677
2678 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2679 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2680}
2681
e8cb4558 2682static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2683{
e8cb4558
DV
2684 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2685 struct drm_device *dev = encoder->base.dev;
2686 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2687 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2688 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2689 enum port port = dp_to_dig_port(intel_dp)->port;
2690 enum pipe pipe = crtc->pipe;
5d613501 2691
0c33d8d7
DV
2692 if (WARN_ON(dp_reg & DP_PORT_EN))
2693 return;
5d613501 2694
093e3f13
VS
2695 pps_lock(intel_dp);
2696
666a4537 2697 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
093e3f13
VS
2698 vlv_init_panel_power_sequencer(intel_dp);
2699
7864578a
VS
2700 /*
2701 * We get an occasional spurious underrun between the port
2702 * enable and vdd enable, when enabling port A eDP.
2703 *
2704 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2705 */
2706 if (port == PORT_A)
2707 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2708
7b13b58a 2709 intel_dp_enable_port(intel_dp);
093e3f13 2710
d6fbdd15
VS
2711 if (port == PORT_A && IS_GEN5(dev_priv)) {
2712 /*
2713 * Underrun reporting for the other pipe was disabled in
2714 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2715 * enabled, so it's now safe to re-enable underrun reporting.
2716 */
2717 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2718 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2719 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2720 }
2721
093e3f13
VS
2722 edp_panel_vdd_on(intel_dp);
2723 edp_panel_on(intel_dp);
2724 edp_panel_vdd_off(intel_dp, true);
2725
7864578a
VS
2726 if (port == PORT_A)
2727 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2728
093e3f13
VS
2729 pps_unlock(intel_dp);
2730
666a4537 2731 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e0fce78f
VS
2732 unsigned int lane_mask = 0x0;
2733
2734 if (IS_CHERRYVIEW(dev))
2735 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2736
9b6de0a1
VS
2737 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2738 lane_mask);
e0fce78f 2739 }
61234fa5 2740
f01eca2e 2741 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2742 intel_dp_start_link_train(intel_dp);
3ab9c637 2743 intel_dp_stop_link_train(intel_dp);
c1dec79a 2744
6e3c9717 2745 if (crtc->config->has_audio) {
c1dec79a 2746 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2747 pipe_name(pipe));
c1dec79a
JN
2748 intel_audio_codec_enable(encoder);
2749 }
ab1f90f9 2750}
89b667f8 2751
ecff4f3b
JN
2752static void g4x_enable_dp(struct intel_encoder *encoder)
2753{
828f5c6e
JN
2754 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2755
ecff4f3b 2756 intel_enable_dp(encoder);
4be73780 2757 intel_edp_backlight_on(intel_dp);
ab1f90f9 2758}
89b667f8 2759
ab1f90f9
JN
2760static void vlv_enable_dp(struct intel_encoder *encoder)
2761{
828f5c6e
JN
2762 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2763
4be73780 2764 intel_edp_backlight_on(intel_dp);
b32c6f48 2765 intel_psr_enable(intel_dp);
d240f20f
JB
2766}
2767
ecff4f3b 2768static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2769{
d6fbdd15 2770 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2771 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2772 enum port port = dp_to_dig_port(intel_dp)->port;
2773 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2774
8ac33ed3
DV
2775 intel_dp_prepare(encoder);
2776
d6fbdd15
VS
2777 if (port == PORT_A && IS_GEN5(dev_priv)) {
2778 /*
2779 * We get FIFO underruns on the other pipe when
2780 * enabling the CPU eDP PLL, and when enabling CPU
2781 * eDP port. We could potentially avoid the PLL
2782 * underrun with a vblank wait just prior to enabling
2783 * the PLL, but that doesn't appear to help the port
2784 * enable case. Just sweep it all under the rug.
2785 */
2786 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2787 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2788 }
2789
d41f1efb 2790 /* Only ilk+ has port A */
abfce949 2791 if (port == PORT_A)
ab1f90f9
JN
2792 ironlake_edp_pll_on(intel_dp);
2793}
2794
83b84597
VS
2795static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2796{
2797 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2798 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2799 enum pipe pipe = intel_dp->pps_pipe;
f0f59a00 2800 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
83b84597
VS
2801
2802 edp_panel_vdd_off_sync(intel_dp);
2803
2804 /*
2805 * VLV seems to get confused when multiple power seqeuencers
2806 * have the same port selected (even if only one has power/vdd
2807 * enabled). The failure manifests as vlv_wait_port_ready() failing
2808 * CHV on the other hand doesn't seem to mind having the same port
2809 * selected in multiple power seqeuencers, but let's clear the
2810 * port select always when logically disconnecting a power sequencer
2811 * from a port.
2812 */
2813 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2814 pipe_name(pipe), port_name(intel_dig_port->port));
2815 I915_WRITE(pp_on_reg, 0);
2816 POSTING_READ(pp_on_reg);
2817
2818 intel_dp->pps_pipe = INVALID_PIPE;
2819}
2820
a4a5d2f8
VS
2821static void vlv_steal_power_sequencer(struct drm_device *dev,
2822 enum pipe pipe)
2823{
2824 struct drm_i915_private *dev_priv = dev->dev_private;
2825 struct intel_encoder *encoder;
2826
2827 lockdep_assert_held(&dev_priv->pps_mutex);
2828
ac3c12e4
VS
2829 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2830 return;
2831
a4a5d2f8
VS
2832 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2833 base.head) {
2834 struct intel_dp *intel_dp;
773538e8 2835 enum port port;
a4a5d2f8
VS
2836
2837 if (encoder->type != INTEL_OUTPUT_EDP)
2838 continue;
2839
2840 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2841 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2842
2843 if (intel_dp->pps_pipe != pipe)
2844 continue;
2845
2846 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2847 pipe_name(pipe), port_name(port));
a4a5d2f8 2848
e02f9a06 2849 WARN(encoder->base.crtc,
034e43c6
VS
2850 "stealing pipe %c power sequencer from active eDP port %c\n",
2851 pipe_name(pipe), port_name(port));
a4a5d2f8 2852
a4a5d2f8 2853 /* make sure vdd is off before we steal it */
83b84597 2854 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2855 }
2856}
2857
2858static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2859{
2860 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2861 struct intel_encoder *encoder = &intel_dig_port->base;
2862 struct drm_device *dev = encoder->base.dev;
2863 struct drm_i915_private *dev_priv = dev->dev_private;
2864 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2865
2866 lockdep_assert_held(&dev_priv->pps_mutex);
2867
093e3f13
VS
2868 if (!is_edp(intel_dp))
2869 return;
2870
a4a5d2f8
VS
2871 if (intel_dp->pps_pipe == crtc->pipe)
2872 return;
2873
2874 /*
2875 * If another power sequencer was being used on this
2876 * port previously make sure to turn off vdd there while
2877 * we still have control of it.
2878 */
2879 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2880 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2881
2882 /*
2883 * We may be stealing the power
2884 * sequencer from another port.
2885 */
2886 vlv_steal_power_sequencer(dev, crtc->pipe);
2887
2888 /* now it's all ours */
2889 intel_dp->pps_pipe = crtc->pipe;
2890
2891 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2892 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2893
2894 /* init power sequencer on this pipe and port */
36b5f425
VS
2895 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2896 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2897}
2898
ab1f90f9 2899static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2900{
2bd2ad64 2901 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2902 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2903 struct drm_device *dev = encoder->base.dev;
89b667f8 2904 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2905 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2906 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2907 int pipe = intel_crtc->pipe;
2908 u32 val;
a4fc5ed6 2909
a580516d 2910 mutex_lock(&dev_priv->sb_lock);
89b667f8 2911
ab3c759a 2912 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2913 val = 0;
2914 if (pipe)
2915 val |= (1<<21);
2916 else
2917 val &= ~(1<<21);
2918 val |= 0x001000c4;
ab3c759a
CML
2919 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2920 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2921 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2922
a580516d 2923 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2924
2925 intel_enable_dp(encoder);
89b667f8
JB
2926}
2927
ecff4f3b 2928static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2929{
2930 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2931 struct drm_device *dev = encoder->base.dev;
2932 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2933 struct intel_crtc *intel_crtc =
2934 to_intel_crtc(encoder->base.crtc);
e4607fcf 2935 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2936 int pipe = intel_crtc->pipe;
89b667f8 2937
8ac33ed3
DV
2938 intel_dp_prepare(encoder);
2939
89b667f8 2940 /* Program Tx lane resets to default */
a580516d 2941 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2942 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2943 DPIO_PCS_TX_LANE2_RESET |
2944 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2945 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2946 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2947 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2948 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2949 DPIO_PCS_CLK_SOFT_RESET);
2950
2951 /* Fix up inter-pair skew failure */
ab3c759a
CML
2952 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2953 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2954 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2955 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2956}
2957
e4a1d846
CML
2958static void chv_pre_enable_dp(struct intel_encoder *encoder)
2959{
2960 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2961 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2962 struct drm_device *dev = encoder->base.dev;
2963 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2964 struct intel_crtc *intel_crtc =
2965 to_intel_crtc(encoder->base.crtc);
2966 enum dpio_channel ch = vlv_dport_to_channel(dport);
2967 int pipe = intel_crtc->pipe;
2e523e98 2968 int data, i, stagger;
949c1d43 2969 u32 val;
e4a1d846 2970
a580516d 2971 mutex_lock(&dev_priv->sb_lock);
949c1d43 2972
570e2a74
VS
2973 /* allow hardware to manage TX FIFO reset source */
2974 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2975 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2976 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2977
e0fce78f
VS
2978 if (intel_crtc->config->lane_count > 2) {
2979 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2980 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2981 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2982 }
570e2a74 2983
949c1d43 2984 /* Program Tx lane latency optimal setting*/
e0fce78f 2985 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 2986 /* Set the upar bit */
e0fce78f
VS
2987 if (intel_crtc->config->lane_count == 1)
2988 data = 0x0;
2989 else
2990 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
2991 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2992 data << DPIO_UPAR_SHIFT);
2993 }
2994
2995 /* Data lane stagger programming */
2e523e98
VS
2996 if (intel_crtc->config->port_clock > 270000)
2997 stagger = 0x18;
2998 else if (intel_crtc->config->port_clock > 135000)
2999 stagger = 0xd;
3000 else if (intel_crtc->config->port_clock > 67500)
3001 stagger = 0x7;
3002 else if (intel_crtc->config->port_clock > 33750)
3003 stagger = 0x4;
3004 else
3005 stagger = 0x2;
3006
3007 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3008 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3009 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3010
e0fce78f
VS
3011 if (intel_crtc->config->lane_count > 2) {
3012 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3013 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3014 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3015 }
2e523e98
VS
3016
3017 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3018 DPIO_LANESTAGGER_STRAP(stagger) |
3019 DPIO_LANESTAGGER_STRAP_OVRD |
3020 DPIO_TX1_STAGGER_MASK(0x1f) |
3021 DPIO_TX1_STAGGER_MULT(6) |
3022 DPIO_TX2_STAGGER_MULT(0));
3023
e0fce78f
VS
3024 if (intel_crtc->config->lane_count > 2) {
3025 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3026 DPIO_LANESTAGGER_STRAP(stagger) |
3027 DPIO_LANESTAGGER_STRAP_OVRD |
3028 DPIO_TX1_STAGGER_MASK(0x1f) |
3029 DPIO_TX1_STAGGER_MULT(7) |
3030 DPIO_TX2_STAGGER_MULT(5));
3031 }
e4a1d846 3032
a8f327fb
VS
3033 /* Deassert data lane reset */
3034 chv_data_lane_soft_reset(encoder, false);
3035
a580516d 3036 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 3037
e4a1d846 3038 intel_enable_dp(encoder);
b0b33846
VS
3039
3040 /* Second common lane will stay alive on its own now */
3041 if (dport->release_cl2_override) {
3042 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3043 dport->release_cl2_override = false;
3044 }
e4a1d846
CML
3045}
3046
9197c88b
VS
3047static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3048{
3049 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3050 struct drm_device *dev = encoder->base.dev;
3051 struct drm_i915_private *dev_priv = dev->dev_private;
3052 struct intel_crtc *intel_crtc =
3053 to_intel_crtc(encoder->base.crtc);
3054 enum dpio_channel ch = vlv_dport_to_channel(dport);
3055 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
3056 unsigned int lane_mask =
3057 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
3058 u32 val;
3059
625695f8
VS
3060 intel_dp_prepare(encoder);
3061
b0b33846
VS
3062 /*
3063 * Must trick the second common lane into life.
3064 * Otherwise we can't even access the PLL.
3065 */
3066 if (ch == DPIO_CH0 && pipe == PIPE_B)
3067 dport->release_cl2_override =
3068 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3069
e0fce78f
VS
3070 chv_phy_powergate_lanes(encoder, true, lane_mask);
3071
a580516d 3072 mutex_lock(&dev_priv->sb_lock);
9197c88b 3073
a8f327fb
VS
3074 /* Assert data lane reset */
3075 chv_data_lane_soft_reset(encoder, true);
3076
b9e5ac3c
VS
3077 /* program left/right clock distribution */
3078 if (pipe != PIPE_B) {
3079 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3080 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3081 if (ch == DPIO_CH0)
3082 val |= CHV_BUFLEFTENA1_FORCE;
3083 if (ch == DPIO_CH1)
3084 val |= CHV_BUFRIGHTENA1_FORCE;
3085 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3086 } else {
3087 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3088 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3089 if (ch == DPIO_CH0)
3090 val |= CHV_BUFLEFTENA2_FORCE;
3091 if (ch == DPIO_CH1)
3092 val |= CHV_BUFRIGHTENA2_FORCE;
3093 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3094 }
3095
9197c88b
VS
3096 /* program clock channel usage */
3097 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3098 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3099 if (pipe != PIPE_B)
3100 val &= ~CHV_PCS_USEDCLKCHANNEL;
3101 else
3102 val |= CHV_PCS_USEDCLKCHANNEL;
3103 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3104
e0fce78f
VS
3105 if (intel_crtc->config->lane_count > 2) {
3106 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3107 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3108 if (pipe != PIPE_B)
3109 val &= ~CHV_PCS_USEDCLKCHANNEL;
3110 else
3111 val |= CHV_PCS_USEDCLKCHANNEL;
3112 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3113 }
9197c88b
VS
3114
3115 /*
3116 * This a a bit weird since generally CL
3117 * matches the pipe, but here we need to
3118 * pick the CL based on the port.
3119 */
3120 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3121 if (pipe != PIPE_B)
3122 val &= ~CHV_CMN_USEDCLKCHANNEL;
3123 else
3124 val |= CHV_CMN_USEDCLKCHANNEL;
3125 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3126
a580516d 3127 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3128}
3129
d6db995f
VS
3130static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3131{
3132 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3133 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3134 u32 val;
3135
3136 mutex_lock(&dev_priv->sb_lock);
3137
3138 /* disable left/right clock distribution */
3139 if (pipe != PIPE_B) {
3140 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3141 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3142 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3143 } else {
3144 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3145 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3146 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3147 }
3148
3149 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3150
b0b33846
VS
3151 /*
3152 * Leave the power down bit cleared for at least one
3153 * lane so that chv_powergate_phy_ch() will power
3154 * on something when the channel is otherwise unused.
3155 * When the port is off and the override is removed
3156 * the lanes power down anyway, so otherwise it doesn't
3157 * really matter what the state of power down bits is
3158 * after this.
3159 */
e0fce78f 3160 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3161}
3162
a4fc5ed6 3163/*
df0c237d
JB
3164 * Native read with retry for link status and receiver capability reads for
3165 * cases where the sink may still be asleep.
9d1a1031
JN
3166 *
3167 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3168 * supposed to retry 3 times per the spec.
a4fc5ed6 3169 */
9d1a1031
JN
3170static ssize_t
3171intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3172 void *buffer, size_t size)
a4fc5ed6 3173{
9d1a1031
JN
3174 ssize_t ret;
3175 int i;
61da5fab 3176
f6a19066
VS
3177 /*
3178 * Sometime we just get the same incorrect byte repeated
3179 * over the entire buffer. Doing just one throw away read
3180 * initially seems to "solve" it.
3181 */
3182 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3183
61da5fab 3184 for (i = 0; i < 3; i++) {
9d1a1031
JN
3185 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3186 if (ret == size)
3187 return ret;
61da5fab
JB
3188 msleep(1);
3189 }
a4fc5ed6 3190
9d1a1031 3191 return ret;
a4fc5ed6
KP
3192}
3193
3194/*
3195 * Fetch AUX CH registers 0x202 - 0x207 which contain
3196 * link status information
3197 */
94223d04 3198bool
93f62dad 3199intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3200{
9d1a1031
JN
3201 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3202 DP_LANE0_1_STATUS,
3203 link_status,
3204 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3205}
3206
1100244e 3207/* These are source-specific values. */
94223d04 3208uint8_t
1a2eb460 3209intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3210{
30add22d 3211 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3212 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3213 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3214
9314726b
VK
3215 if (IS_BROXTON(dev))
3216 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3217 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3218 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3219 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3220 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
666a4537 3221 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
bd60018a 3222 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3223 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3224 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3225 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3226 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3227 else
bd60018a 3228 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3229}
3230
94223d04 3231uint8_t
1a2eb460
KP
3232intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3233{
30add22d 3234 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3235 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3236
5a9d1f1a
DL
3237 if (INTEL_INFO(dev)->gen >= 9) {
3238 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3239 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3240 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3242 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3244 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3246 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3247 default:
3248 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3249 }
3250 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3251 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3253 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3255 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3257 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3259 default:
bd60018a 3260 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3261 }
666a4537 3262 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e2fa6fba 3263 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3271 default:
bd60018a 3272 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3273 }
bc7d38a4 3274 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3275 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3277 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3280 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3281 default:
bd60018a 3282 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3283 }
3284 } else {
3285 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3287 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3289 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3293 default:
bd60018a 3294 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3295 }
a4fc5ed6
KP
3296 }
3297}
3298
5829975c 3299static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3300{
3301 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3302 struct drm_i915_private *dev_priv = dev->dev_private;
3303 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3304 struct intel_crtc *intel_crtc =
3305 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3306 unsigned long demph_reg_value, preemph_reg_value,
3307 uniqtranscale_reg_value;
3308 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3309 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3310 int pipe = intel_crtc->pipe;
e2fa6fba
P
3311
3312 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3313 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3314 preemph_reg_value = 0x0004000;
3315 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3317 demph_reg_value = 0x2B405555;
3318 uniqtranscale_reg_value = 0x552AB83A;
3319 break;
bd60018a 3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3321 demph_reg_value = 0x2B404040;
3322 uniqtranscale_reg_value = 0x5548B83A;
3323 break;
bd60018a 3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3325 demph_reg_value = 0x2B245555;
3326 uniqtranscale_reg_value = 0x5560B83A;
3327 break;
bd60018a 3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3329 demph_reg_value = 0x2B405555;
3330 uniqtranscale_reg_value = 0x5598DA3A;
3331 break;
3332 default:
3333 return 0;
3334 }
3335 break;
bd60018a 3336 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3337 preemph_reg_value = 0x0002000;
3338 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3340 demph_reg_value = 0x2B404040;
3341 uniqtranscale_reg_value = 0x5552B83A;
3342 break;
bd60018a 3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3344 demph_reg_value = 0x2B404848;
3345 uniqtranscale_reg_value = 0x5580B83A;
3346 break;
bd60018a 3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3348 demph_reg_value = 0x2B404040;
3349 uniqtranscale_reg_value = 0x55ADDA3A;
3350 break;
3351 default:
3352 return 0;
3353 }
3354 break;
bd60018a 3355 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3356 preemph_reg_value = 0x0000000;
3357 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3358 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3359 demph_reg_value = 0x2B305555;
3360 uniqtranscale_reg_value = 0x5570B83A;
3361 break;
bd60018a 3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3363 demph_reg_value = 0x2B2B4040;
3364 uniqtranscale_reg_value = 0x55ADDA3A;
3365 break;
3366 default:
3367 return 0;
3368 }
3369 break;
bd60018a 3370 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3371 preemph_reg_value = 0x0006000;
3372 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3374 demph_reg_value = 0x1B405555;
3375 uniqtranscale_reg_value = 0x55ADDA3A;
3376 break;
3377 default:
3378 return 0;
3379 }
3380 break;
3381 default:
3382 return 0;
3383 }
3384
a580516d 3385 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3386 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3387 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3388 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3389 uniqtranscale_reg_value);
ab3c759a
CML
3390 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3391 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3392 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3393 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3394 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3395
3396 return 0;
3397}
3398
67fa24b4
VS
3399static bool chv_need_uniq_trans_scale(uint8_t train_set)
3400{
3401 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3402 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3403}
3404
5829975c 3405static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3406{
3407 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3408 struct drm_i915_private *dev_priv = dev->dev_private;
3409 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3410 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3411 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3412 uint8_t train_set = intel_dp->train_set[0];
3413 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3414 enum pipe pipe = intel_crtc->pipe;
3415 int i;
e4a1d846
CML
3416
3417 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3418 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3419 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3420 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3421 deemph_reg_value = 128;
3422 margin_reg_value = 52;
3423 break;
bd60018a 3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3425 deemph_reg_value = 128;
3426 margin_reg_value = 77;
3427 break;
bd60018a 3428 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3429 deemph_reg_value = 128;
3430 margin_reg_value = 102;
3431 break;
bd60018a 3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3433 deemph_reg_value = 128;
3434 margin_reg_value = 154;
3435 /* FIXME extra to set for 1200 */
3436 break;
3437 default:
3438 return 0;
3439 }
3440 break;
bd60018a 3441 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3442 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3444 deemph_reg_value = 85;
3445 margin_reg_value = 78;
3446 break;
bd60018a 3447 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3448 deemph_reg_value = 85;
3449 margin_reg_value = 116;
3450 break;
bd60018a 3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3452 deemph_reg_value = 85;
3453 margin_reg_value = 154;
3454 break;
3455 default:
3456 return 0;
3457 }
3458 break;
bd60018a 3459 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3460 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3462 deemph_reg_value = 64;
3463 margin_reg_value = 104;
3464 break;
bd60018a 3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3466 deemph_reg_value = 64;
3467 margin_reg_value = 154;
3468 break;
3469 default:
3470 return 0;
3471 }
3472 break;
bd60018a 3473 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3474 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3476 deemph_reg_value = 43;
3477 margin_reg_value = 154;
3478 break;
3479 default:
3480 return 0;
3481 }
3482 break;
3483 default:
3484 return 0;
3485 }
3486
a580516d 3487 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3488
3489 /* Clear calc init */
1966e59e
VS
3490 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3491 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3492 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3493 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3494 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3495
e0fce78f
VS
3496 if (intel_crtc->config->lane_count > 2) {
3497 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3498 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3499 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3500 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3501 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3502 }
e4a1d846 3503
a02ef3c7
VS
3504 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3505 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3506 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3507 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3508
e0fce78f
VS
3509 if (intel_crtc->config->lane_count > 2) {
3510 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3511 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3512 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3513 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3514 }
a02ef3c7 3515
e4a1d846 3516 /* Program swing deemph */
e0fce78f 3517 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3518 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3519 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3520 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3521 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3522 }
e4a1d846
CML
3523
3524 /* Program swing margin */
e0fce78f 3525 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3526 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3527
1fb44505
VS
3528 val &= ~DPIO_SWING_MARGIN000_MASK;
3529 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3530
3531 /*
3532 * Supposedly this value shouldn't matter when unique transition
3533 * scale is disabled, but in fact it does matter. Let's just
3534 * always program the same value and hope it's OK.
3535 */
3536 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3537 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3538
f72df8db
VS
3539 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3540 }
e4a1d846 3541
67fa24b4
VS
3542 /*
3543 * The document said it needs to set bit 27 for ch0 and bit 26
3544 * for ch1. Might be a typo in the doc.
3545 * For now, for this unique transition scale selection, set bit
3546 * 27 for ch0 and ch1.
3547 */
e0fce78f 3548 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3549 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3550 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3551 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3552 else
3553 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3554 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3555 }
3556
3557 /* Start swing calculation */
1966e59e
VS
3558 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3559 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3560 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3561
e0fce78f
VS
3562 if (intel_crtc->config->lane_count > 2) {
3563 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3564 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3565 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3566 }
e4a1d846 3567
a580516d 3568 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3569
3570 return 0;
3571}
3572
a4fc5ed6 3573static uint32_t
5829975c 3574gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3575{
3cf2efb1 3576 uint32_t signal_levels = 0;
a4fc5ed6 3577
3cf2efb1 3578 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3579 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3580 default:
3581 signal_levels |= DP_VOLTAGE_0_4;
3582 break;
bd60018a 3583 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3584 signal_levels |= DP_VOLTAGE_0_6;
3585 break;
bd60018a 3586 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3587 signal_levels |= DP_VOLTAGE_0_8;
3588 break;
bd60018a 3589 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3590 signal_levels |= DP_VOLTAGE_1_2;
3591 break;
3592 }
3cf2efb1 3593 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3594 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3595 default:
3596 signal_levels |= DP_PRE_EMPHASIS_0;
3597 break;
bd60018a 3598 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3599 signal_levels |= DP_PRE_EMPHASIS_3_5;
3600 break;
bd60018a 3601 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3602 signal_levels |= DP_PRE_EMPHASIS_6;
3603 break;
bd60018a 3604 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3605 signal_levels |= DP_PRE_EMPHASIS_9_5;
3606 break;
3607 }
3608 return signal_levels;
3609}
3610
e3421a18
ZW
3611/* Gen6's DP voltage swing and pre-emphasis control */
3612static uint32_t
5829975c 3613gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3614{
3c5a62b5
YL
3615 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3616 DP_TRAIN_PRE_EMPHASIS_MASK);
3617 switch (signal_levels) {
bd60018a
SJ
3618 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3619 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3620 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3621 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3622 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3623 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3624 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3625 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3626 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3627 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3628 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3629 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3630 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3631 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3632 default:
3c5a62b5
YL
3633 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3634 "0x%x\n", signal_levels);
3635 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3636 }
3637}
3638
1a2eb460
KP
3639/* Gen7's DP voltage swing and pre-emphasis control */
3640static uint32_t
5829975c 3641gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3642{
3643 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3644 DP_TRAIN_PRE_EMPHASIS_MASK);
3645 switch (signal_levels) {
bd60018a 3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3647 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3648 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3649 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3651 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3652
bd60018a 3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3654 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3656 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3657
bd60018a 3658 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3659 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3661 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3662
3663 default:
3664 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3665 "0x%x\n", signal_levels);
3666 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3667 }
3668}
3669
94223d04 3670void
f4eb692e 3671intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3672{
3673 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3674 enum port port = intel_dig_port->port;
f0a3424e 3675 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3676 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3677 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3678 uint8_t train_set = intel_dp->train_set[0];
3679
f8896f5d
DW
3680 if (HAS_DDI(dev)) {
3681 signal_levels = ddi_signal_levels(intel_dp);
3682
3683 if (IS_BROXTON(dev))
3684 signal_levels = 0;
3685 else
3686 mask = DDI_BUF_EMP_MASK;
e4a1d846 3687 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3688 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3689 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3690 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3691 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3692 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3693 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3694 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3695 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3696 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3697 } else {
5829975c 3698 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3699 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3700 }
3701
96fb9f9b
VK
3702 if (mask)
3703 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3704
3705 DRM_DEBUG_KMS("Using vswing level %d\n",
3706 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3707 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3708 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3709 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3710
f4eb692e 3711 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3712
3713 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3714 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3715}
3716
94223d04 3717void
e9c176d5
ACO
3718intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3719 uint8_t dp_train_pat)
a4fc5ed6 3720{
174edf1f 3721 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3722 struct drm_i915_private *dev_priv =
3723 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3724
f4eb692e 3725 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3726
f4eb692e 3727 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3728 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3729}
3730
94223d04 3731void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3732{
3733 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3734 struct drm_device *dev = intel_dig_port->base.base.dev;
3735 struct drm_i915_private *dev_priv = dev->dev_private;
3736 enum port port = intel_dig_port->port;
3737 uint32_t val;
3738
3739 if (!HAS_DDI(dev))
3740 return;
3741
3742 val = I915_READ(DP_TP_CTL(port));
3743 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3744 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3745 I915_WRITE(DP_TP_CTL(port), val);
3746
3747 /*
3748 * On PORT_A we can have only eDP in SST mode. There the only reason
3749 * we need to set idle transmission mode is to work around a HW issue
3750 * where we enable the pipe while not in idle link-training mode.
3751 * In this case there is requirement to wait for a minimum number of
3752 * idle patterns to be sent.
3753 */
3754 if (port == PORT_A)
3755 return;
3756
3757 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3758 1))
3759 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3760}
3761
a4fc5ed6 3762static void
ea5b213a 3763intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3764{
da63a9f2 3765 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3766 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3767 enum port port = intel_dig_port->port;
da63a9f2 3768 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3769 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3770 uint32_t DP = intel_dp->DP;
a4fc5ed6 3771
bc76e320 3772 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3773 return;
3774
0c33d8d7 3775 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3776 return;
3777
28c97730 3778 DRM_DEBUG_KMS("\n");
32f9d658 3779
39e5fa88
VS
3780 if ((IS_GEN7(dev) && port == PORT_A) ||
3781 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3782 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3783 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3784 } else {
aad3d14d
VS
3785 if (IS_CHERRYVIEW(dev))
3786 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3787 else
3788 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3789 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3790 }
1612c8bd 3791 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3792 POSTING_READ(intel_dp->output_reg);
5eb08b69 3793
1612c8bd
VS
3794 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3795 I915_WRITE(intel_dp->output_reg, DP);
3796 POSTING_READ(intel_dp->output_reg);
3797
3798 /*
3799 * HW workaround for IBX, we need to move the port
3800 * to transcoder A after disabling it to allow the
3801 * matching HDMI port to be enabled on transcoder A.
3802 */
3803 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3804 /*
3805 * We get CPU/PCH FIFO underruns on the other pipe when
3806 * doing the workaround. Sweep them under the rug.
3807 */
3808 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3809 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3810
1612c8bd
VS
3811 /* always enable with pattern 1 (as per spec) */
3812 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3813 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3814 I915_WRITE(intel_dp->output_reg, DP);
3815 POSTING_READ(intel_dp->output_reg);
3816
3817 DP &= ~DP_PORT_EN;
5bddd17f 3818 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3819 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3820
3821 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3822 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3823 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3824 }
3825
f01eca2e 3826 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3827
3828 intel_dp->DP = DP;
a4fc5ed6
KP
3829}
3830
26d61aad
KP
3831static bool
3832intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3833{
a031d709
RV
3834 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3835 struct drm_device *dev = dig_port->base.base.dev;
3836 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3837 uint8_t rev;
a031d709 3838
9d1a1031
JN
3839 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3840 sizeof(intel_dp->dpcd)) < 0)
edb39244 3841 return false; /* aux transfer failed */
92fd8fd1 3842
a8e98153 3843 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3844
edb39244
AJ
3845 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3846 return false; /* DPCD not present */
3847
2293bb5c
SK
3848 /* Check if the panel supports PSR */
3849 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3850 if (is_edp(intel_dp)) {
9d1a1031
JN
3851 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3852 intel_dp->psr_dpcd,
3853 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3854 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3855 dev_priv->psr.sink_support = true;
50003939 3856 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3857 }
474d1ec4
SJ
3858
3859 if (INTEL_INFO(dev)->gen >= 9 &&
3860 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3861 uint8_t frame_sync_cap;
3862
3863 dev_priv->psr.sink_support = true;
3864 intel_dp_dpcd_read_wake(&intel_dp->aux,
3865 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3866 &frame_sync_cap, 1);
3867 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3868 /* PSR2 needs frame sync as well */
3869 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3870 DRM_DEBUG_KMS("PSR2 %s on sink",
3871 dev_priv->psr.psr2_support ? "supported" : "not supported");
3872 }
50003939
JN
3873 }
3874
bc5133d5 3875 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3876 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3877 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3878
fc0f8e25
SJ
3879 /* Intermediate frequency support */
3880 if (is_edp(intel_dp) &&
3881 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3882 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3883 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3884 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3885 int i;
3886
fc0f8e25
SJ
3887 intel_dp_dpcd_read_wake(&intel_dp->aux,
3888 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3889 sink_rates,
3890 sizeof(sink_rates));
ea2d8a42 3891
94ca719e
VS
3892 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3893 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3894
3895 if (val == 0)
3896 break;
3897
af77b974
SJ
3898 /* Value read is in kHz while drm clock is saved in deca-kHz */
3899 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3900 }
94ca719e 3901 intel_dp->num_sink_rates = i;
fc0f8e25 3902 }
0336400e
VS
3903
3904 intel_dp_print_rates(intel_dp);
3905
edb39244
AJ
3906 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3907 DP_DWN_STRM_PORT_PRESENT))
3908 return true; /* native DP sink */
3909
3910 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3911 return true; /* no per-port downstream info */
3912
9d1a1031
JN
3913 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3914 intel_dp->downstream_ports,
3915 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3916 return false; /* downstream port status fetch failed */
3917
3918 return true;
92fd8fd1
KP
3919}
3920
0d198328
AJ
3921static void
3922intel_dp_probe_oui(struct intel_dp *intel_dp)
3923{
3924 u8 buf[3];
3925
3926 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3927 return;
3928
9d1a1031 3929 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3930 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3931 buf[0], buf[1], buf[2]);
3932
9d1a1031 3933 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3934 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3935 buf[0], buf[1], buf[2]);
3936}
3937
0e32b39c
DA
3938static bool
3939intel_dp_probe_mst(struct intel_dp *intel_dp)
3940{
3941 u8 buf[1];
3942
3943 if (!intel_dp->can_mst)
3944 return false;
3945
3946 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3947 return false;
3948
0e32b39c
DA
3949 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3950 if (buf[0] & DP_MST_CAP) {
3951 DRM_DEBUG_KMS("Sink is MST capable\n");
3952 intel_dp->is_mst = true;
3953 } else {
3954 DRM_DEBUG_KMS("Sink is not MST capable\n");
3955 intel_dp->is_mst = false;
3956 }
3957 }
0e32b39c
DA
3958
3959 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3960 return intel_dp->is_mst;
3961}
3962
e5a1cab5 3963static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3964{
082dcc7c 3965 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 3966 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c 3967 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3968 u8 buf;
e5a1cab5 3969 int ret = 0;
c6297843
RV
3970 int count = 0;
3971 int attempts = 10;
d2e216d0 3972
082dcc7c
RV
3973 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3974 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3975 ret = -EIO;
3976 goto out;
4373f0f2
PZ
3977 }
3978
082dcc7c 3979 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 3980 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 3981 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3982 ret = -EIO;
3983 goto out;
3984 }
d2e216d0 3985
c6297843
RV
3986 do {
3987 intel_wait_for_vblank(dev, intel_crtc->pipe);
3988
3989 if (drm_dp_dpcd_readb(&intel_dp->aux,
3990 DP_TEST_SINK_MISC, &buf) < 0) {
3991 ret = -EIO;
3992 goto out;
3993 }
3994 count = buf & DP_TEST_COUNT_MASK;
3995 } while (--attempts && count);
3996
3997 if (attempts == 0) {
3998 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
3999 ret = -ETIMEDOUT;
4000 }
4001
e5a1cab5 4002 out:
082dcc7c 4003 hsw_enable_ips(intel_crtc);
e5a1cab5 4004 return ret;
082dcc7c
RV
4005}
4006
4007static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4008{
4009 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 4010 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c
RV
4011 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4012 u8 buf;
e5a1cab5
RV
4013 int ret;
4014
082dcc7c
RV
4015 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4016 return -EIO;
4017
4018 if (!(buf & DP_TEST_CRC_SUPPORTED))
4019 return -ENOTTY;
4020
4021 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4022 return -EIO;
4023
6d8175da
RV
4024 if (buf & DP_TEST_SINK_START) {
4025 ret = intel_dp_sink_crc_stop(intel_dp);
4026 if (ret)
4027 return ret;
4028 }
4029
082dcc7c 4030 hsw_disable_ips(intel_crtc);
1dda5f93 4031
9d1a1031 4032 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4033 buf | DP_TEST_SINK_START) < 0) {
4034 hsw_enable_ips(intel_crtc);
4035 return -EIO;
4373f0f2
PZ
4036 }
4037
d72f9d91 4038 intel_wait_for_vblank(dev, intel_crtc->pipe);
082dcc7c
RV
4039 return 0;
4040}
4041
4042int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4043{
4044 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4045 struct drm_device *dev = dig_port->base.base.dev;
4046 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4047 u8 buf;
621d4c76 4048 int count, ret;
082dcc7c 4049 int attempts = 6;
082dcc7c
RV
4050
4051 ret = intel_dp_sink_crc_start(intel_dp);
4052 if (ret)
4053 return ret;
4054
ad9dc91b 4055 do {
621d4c76
RV
4056 intel_wait_for_vblank(dev, intel_crtc->pipe);
4057
1dda5f93 4058 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4059 DP_TEST_SINK_MISC, &buf) < 0) {
4060 ret = -EIO;
afe0d67e 4061 goto stop;
4373f0f2 4062 }
621d4c76 4063 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4064
7e38eeff 4065 } while (--attempts && count == 0);
ad9dc91b
RV
4066
4067 if (attempts == 0) {
7e38eeff
RV
4068 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4069 ret = -ETIMEDOUT;
4070 goto stop;
4071 }
4072
4073 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4074 ret = -EIO;
4075 goto stop;
ad9dc91b 4076 }
d2e216d0 4077
afe0d67e 4078stop:
082dcc7c 4079 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4080 return ret;
d2e216d0
RV
4081}
4082
a60f0e38
JB
4083static bool
4084intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4085{
9d1a1031
JN
4086 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4087 DP_DEVICE_SERVICE_IRQ_VECTOR,
4088 sink_irq_vector, 1) == 1;
a60f0e38
JB
4089}
4090
0e32b39c
DA
4091static bool
4092intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4093{
4094 int ret;
4095
4096 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4097 DP_SINK_COUNT_ESI,
4098 sink_irq_vector, 14);
4099 if (ret != 14)
4100 return false;
4101
4102 return true;
4103}
4104
c5d5ab7a
TP
4105static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4106{
4107 uint8_t test_result = DP_TEST_ACK;
4108 return test_result;
4109}
4110
4111static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4112{
4113 uint8_t test_result = DP_TEST_NAK;
4114 return test_result;
4115}
4116
4117static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4118{
c5d5ab7a 4119 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4120 struct intel_connector *intel_connector = intel_dp->attached_connector;
4121 struct drm_connector *connector = &intel_connector->base;
4122
4123 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4124 connector->edid_corrupt ||
559be30c
TP
4125 intel_dp->aux.i2c_defer_count > 6) {
4126 /* Check EDID read for NACKs, DEFERs and corruption
4127 * (DP CTS 1.2 Core r1.1)
4128 * 4.2.2.4 : Failed EDID read, I2C_NAK
4129 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4130 * 4.2.2.6 : EDID corruption detected
4131 * Use failsafe mode for all cases
4132 */
4133 if (intel_dp->aux.i2c_nack_count > 0 ||
4134 intel_dp->aux.i2c_defer_count > 0)
4135 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4136 intel_dp->aux.i2c_nack_count,
4137 intel_dp->aux.i2c_defer_count);
4138 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4139 } else {
f79b468e
TS
4140 struct edid *block = intel_connector->detect_edid;
4141
4142 /* We have to write the checksum
4143 * of the last block read
4144 */
4145 block += intel_connector->detect_edid->extensions;
4146
559be30c
TP
4147 if (!drm_dp_dpcd_write(&intel_dp->aux,
4148 DP_TEST_EDID_CHECKSUM,
f79b468e 4149 &block->checksum,
5a1cc655 4150 1))
559be30c
TP
4151 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4152
4153 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4154 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4155 }
4156
4157 /* Set test active flag here so userspace doesn't interrupt things */
4158 intel_dp->compliance_test_active = 1;
4159
c5d5ab7a
TP
4160 return test_result;
4161}
4162
4163static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4164{
c5d5ab7a
TP
4165 uint8_t test_result = DP_TEST_NAK;
4166 return test_result;
4167}
4168
4169static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4170{
4171 uint8_t response = DP_TEST_NAK;
4172 uint8_t rxdata = 0;
4173 int status = 0;
4174
c5d5ab7a
TP
4175 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4176 if (status <= 0) {
4177 DRM_DEBUG_KMS("Could not read test request from sink\n");
4178 goto update_status;
4179 }
4180
4181 switch (rxdata) {
4182 case DP_TEST_LINK_TRAINING:
4183 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4184 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4185 response = intel_dp_autotest_link_training(intel_dp);
4186 break;
4187 case DP_TEST_LINK_VIDEO_PATTERN:
4188 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4189 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4190 response = intel_dp_autotest_video_pattern(intel_dp);
4191 break;
4192 case DP_TEST_LINK_EDID_READ:
4193 DRM_DEBUG_KMS("EDID test requested\n");
4194 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4195 response = intel_dp_autotest_edid(intel_dp);
4196 break;
4197 case DP_TEST_LINK_PHY_TEST_PATTERN:
4198 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4199 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4200 response = intel_dp_autotest_phy_pattern(intel_dp);
4201 break;
4202 default:
4203 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4204 break;
4205 }
4206
4207update_status:
4208 status = drm_dp_dpcd_write(&intel_dp->aux,
4209 DP_TEST_RESPONSE,
4210 &response, 1);
4211 if (status <= 0)
4212 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4213}
4214
0e32b39c
DA
4215static int
4216intel_dp_check_mst_status(struct intel_dp *intel_dp)
4217{
4218 bool bret;
4219
4220 if (intel_dp->is_mst) {
4221 u8 esi[16] = { 0 };
4222 int ret = 0;
4223 int retry;
4224 bool handled;
4225 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4226go_again:
4227 if (bret == true) {
4228
4229 /* check link status - esi[10] = 0x200c */
90a6b7b0 4230 if (intel_dp->active_mst_links &&
901c2daf 4231 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4232 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4233 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4234 intel_dp_stop_link_train(intel_dp);
4235 }
4236
6f34cc39 4237 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4238 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4239
4240 if (handled) {
4241 for (retry = 0; retry < 3; retry++) {
4242 int wret;
4243 wret = drm_dp_dpcd_write(&intel_dp->aux,
4244 DP_SINK_COUNT_ESI+1,
4245 &esi[1], 3);
4246 if (wret == 3) {
4247 break;
4248 }
4249 }
4250
4251 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4252 if (bret == true) {
6f34cc39 4253 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4254 goto go_again;
4255 }
4256 } else
4257 ret = 0;
4258
4259 return ret;
4260 } else {
4261 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4262 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4263 intel_dp->is_mst = false;
4264 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4265 /* send a hotplug event */
4266 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4267 }
4268 }
4269 return -EINVAL;
4270}
4271
a4fc5ed6
KP
4272/*
4273 * According to DP spec
4274 * 5.1.2:
4275 * 1. Read DPCD
4276 * 2. Configure link according to Receiver Capabilities
4277 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4278 * 4. Check link status on receipt of hot-plug interrupt
4279 */
a5146200 4280static void
ea5b213a 4281intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4282{
5b215bcf 4283 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4284 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4285 u8 sink_irq_vector;
93f62dad 4286 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4287
5b215bcf
DA
4288 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4289
4df6960e
SS
4290 /*
4291 * Clearing compliance test variables to allow capturing
4292 * of values for next automated test request.
4293 */
4294 intel_dp->compliance_test_active = 0;
4295 intel_dp->compliance_test_type = 0;
4296 intel_dp->compliance_test_data = 0;
4297
e02f9a06 4298 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4299 return;
4300
1a125d8a
ID
4301 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4302 return;
4303
92fd8fd1 4304 /* Try to read receiver status if the link appears to be up */
93f62dad 4305 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4306 return;
4307 }
4308
92fd8fd1 4309 /* Now read the DPCD to see if it's actually running */
26d61aad 4310 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4311 return;
4312 }
4313
a60f0e38
JB
4314 /* Try to read the source of the interrupt */
4315 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4316 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4317 /* Clear interrupt source */
9d1a1031
JN
4318 drm_dp_dpcd_writeb(&intel_dp->aux,
4319 DP_DEVICE_SERVICE_IRQ_VECTOR,
4320 sink_irq_vector);
a60f0e38
JB
4321
4322 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4323 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4324 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4325 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4326 }
4327
14631e9d
SS
4328 /* if link training is requested we should perform it always */
4329 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4330 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
92fd8fd1 4331 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4332 intel_encoder->base.name);
33a34e4e 4333 intel_dp_start_link_train(intel_dp);
3ab9c637 4334 intel_dp_stop_link_train(intel_dp);
33a34e4e 4335 }
a4fc5ed6 4336}
a4fc5ed6 4337
caf9ab24 4338/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4339static enum drm_connector_status
26d61aad 4340intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4341{
caf9ab24 4342 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4343 uint8_t type;
4344
4345 if (!intel_dp_get_dpcd(intel_dp))
4346 return connector_status_disconnected;
4347
4348 /* if there's no downstream port, we're done */
4349 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4350 return connector_status_connected;
caf9ab24
AJ
4351
4352 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4353 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4354 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4355 uint8_t reg;
9d1a1031
JN
4356
4357 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4358 &reg, 1) < 0)
caf9ab24 4359 return connector_status_unknown;
9d1a1031 4360
23235177
AJ
4361 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4362 : connector_status_disconnected;
caf9ab24
AJ
4363 }
4364
4365 /* If no HPD, poke DDC gently */
0b99836f 4366 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4367 return connector_status_connected;
caf9ab24
AJ
4368
4369 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4370 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4371 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4372 if (type == DP_DS_PORT_TYPE_VGA ||
4373 type == DP_DS_PORT_TYPE_NON_EDID)
4374 return connector_status_unknown;
4375 } else {
4376 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4377 DP_DWN_STRM_PORT_TYPE_MASK;
4378 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4379 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4380 return connector_status_unknown;
4381 }
caf9ab24
AJ
4382
4383 /* Anything else is out of spec, warn and ignore */
4384 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4385 return connector_status_disconnected;
71ba9000
AJ
4386}
4387
d410b56d
CW
4388static enum drm_connector_status
4389edp_detect(struct intel_dp *intel_dp)
4390{
4391 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4392 enum drm_connector_status status;
4393
4394 status = intel_panel_detect(dev);
4395 if (status == connector_status_unknown)
4396 status = connector_status_connected;
4397
4398 return status;
4399}
4400
b93433cc
JN
4401static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4402 struct intel_digital_port *port)
5eb08b69 4403{
b93433cc 4404 u32 bit;
01cb9ea6 4405
0df53b77
JN
4406 switch (port->port) {
4407 case PORT_A:
4408 return true;
4409 case PORT_B:
4410 bit = SDE_PORTB_HOTPLUG;
4411 break;
4412 case PORT_C:
4413 bit = SDE_PORTC_HOTPLUG;
4414 break;
4415 case PORT_D:
4416 bit = SDE_PORTD_HOTPLUG;
4417 break;
4418 default:
4419 MISSING_CASE(port->port);
4420 return false;
4421 }
4422
4423 return I915_READ(SDEISR) & bit;
4424}
4425
4426static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4427 struct intel_digital_port *port)
4428{
4429 u32 bit;
4430
4431 switch (port->port) {
4432 case PORT_A:
4433 return true;
4434 case PORT_B:
4435 bit = SDE_PORTB_HOTPLUG_CPT;
4436 break;
4437 case PORT_C:
4438 bit = SDE_PORTC_HOTPLUG_CPT;
4439 break;
4440 case PORT_D:
4441 bit = SDE_PORTD_HOTPLUG_CPT;
4442 break;
a78695d3
JN
4443 case PORT_E:
4444 bit = SDE_PORTE_HOTPLUG_SPT;
4445 break;
0df53b77
JN
4446 default:
4447 MISSING_CASE(port->port);
4448 return false;
b93433cc 4449 }
1b469639 4450
b93433cc 4451 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4452}
4453
7e66bcf2 4454static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4455 struct intel_digital_port *port)
a4fc5ed6 4456{
9642c81c 4457 u32 bit;
5eb08b69 4458
9642c81c
JN
4459 switch (port->port) {
4460 case PORT_B:
4461 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4462 break;
4463 case PORT_C:
4464 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4465 break;
4466 case PORT_D:
4467 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4468 break;
4469 default:
4470 MISSING_CASE(port->port);
4471 return false;
4472 }
4473
4474 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4475}
4476
4477static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4478 struct intel_digital_port *port)
4479{
4480 u32 bit;
4481
4482 switch (port->port) {
4483 case PORT_B:
4484 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4485 break;
4486 case PORT_C:
4487 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4488 break;
4489 case PORT_D:
4490 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4491 break;
4492 default:
4493 MISSING_CASE(port->port);
4494 return false;
a4fc5ed6
KP
4495 }
4496
1d245987 4497 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4498}
4499
e464bfde 4500static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4501 struct intel_digital_port *intel_dig_port)
e464bfde 4502{
e2ec35a5
SJ
4503 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4504 enum port port;
e464bfde
JN
4505 u32 bit;
4506
e2ec35a5
SJ
4507 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4508 switch (port) {
e464bfde
JN
4509 case PORT_A:
4510 bit = BXT_DE_PORT_HP_DDIA;
4511 break;
4512 case PORT_B:
4513 bit = BXT_DE_PORT_HP_DDIB;
4514 break;
4515 case PORT_C:
4516 bit = BXT_DE_PORT_HP_DDIC;
4517 break;
4518 default:
e2ec35a5 4519 MISSING_CASE(port);
e464bfde
JN
4520 return false;
4521 }
4522
4523 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4524}
4525
7e66bcf2
JN
4526/*
4527 * intel_digital_port_connected - is the specified port connected?
4528 * @dev_priv: i915 private structure
4529 * @port: the port to test
4530 *
4531 * Return %true if @port is connected, %false otherwise.
4532 */
237ed86c 4533bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4534 struct intel_digital_port *port)
4535{
0df53b77 4536 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4537 return ibx_digital_port_connected(dev_priv, port);
0df53b77
JN
4538 if (HAS_PCH_SPLIT(dev_priv))
4539 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4540 else if (IS_BROXTON(dev_priv))
4541 return bxt_digital_port_connected(dev_priv, port);
666a4537 4542 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
9642c81c 4543 return vlv_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4544 else
4545 return g4x_digital_port_connected(dev_priv, port);
4546}
4547
8c241fef 4548static struct edid *
beb60608 4549intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4550{
beb60608 4551 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4552
9cd300e0
JN
4553 /* use cached edid if we have one */
4554 if (intel_connector->edid) {
9cd300e0
JN
4555 /* invalid edid */
4556 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4557 return NULL;
4558
55e9edeb 4559 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4560 } else
4561 return drm_get_edid(&intel_connector->base,
4562 &intel_dp->aux.ddc);
4563}
8c241fef 4564
beb60608
CW
4565static void
4566intel_dp_set_edid(struct intel_dp *intel_dp)
4567{
4568 struct intel_connector *intel_connector = intel_dp->attached_connector;
4569 struct edid *edid;
8c241fef 4570
beb60608
CW
4571 edid = intel_dp_get_edid(intel_dp);
4572 intel_connector->detect_edid = edid;
4573
4574 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4575 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4576 else
4577 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4578}
4579
beb60608
CW
4580static void
4581intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4582{
beb60608 4583 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4584
beb60608
CW
4585 kfree(intel_connector->detect_edid);
4586 intel_connector->detect_edid = NULL;
9cd300e0 4587
beb60608
CW
4588 intel_dp->has_audio = false;
4589}
d6f24d0f 4590
a9756bb5
ZW
4591static enum drm_connector_status
4592intel_dp_detect(struct drm_connector *connector, bool force)
4593{
4594 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4595 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4596 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4597 struct drm_device *dev = connector->dev;
a9756bb5 4598 enum drm_connector_status status;
671dedd2 4599 enum intel_display_power_domain power_domain;
0e32b39c 4600 bool ret;
09b1eb13 4601 u8 sink_irq_vector;
a9756bb5 4602
164c8598 4603 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4604 connector->base.id, connector->name);
beb60608 4605 intel_dp_unset_edid(intel_dp);
164c8598 4606
0e32b39c
DA
4607 if (intel_dp->is_mst) {
4608 /* MST devices are disconnected from a monitor POV */
4609 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4610 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4611 return connector_status_disconnected;
0e32b39c
DA
4612 }
4613
25f78f58
VS
4614 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4615 intel_display_power_get(to_i915(dev), power_domain);
a9756bb5 4616
d410b56d
CW
4617 /* Can't disconnect eDP, but you can close the lid... */
4618 if (is_edp(intel_dp))
4619 status = edp_detect(intel_dp);
c555a81d
ACO
4620 else if (intel_digital_port_connected(to_i915(dev),
4621 dp_to_dig_port(intel_dp)))
4622 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 4623 else
c555a81d
ACO
4624 status = connector_status_disconnected;
4625
4df6960e
SS
4626 if (status != connector_status_connected) {
4627 intel_dp->compliance_test_active = 0;
4628 intel_dp->compliance_test_type = 0;
4629 intel_dp->compliance_test_data = 0;
4630
c8c8fb33 4631 goto out;
4df6960e 4632 }
a9756bb5 4633
0d198328
AJ
4634 intel_dp_probe_oui(intel_dp);
4635
0e32b39c
DA
4636 ret = intel_dp_probe_mst(intel_dp);
4637 if (ret) {
4638 /* if we are in MST mode then this connector
4639 won't appear connected or have anything with EDID on it */
4640 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4641 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4642 status = connector_status_disconnected;
4643 goto out;
4644 }
4645
4df6960e
SS
4646 /*
4647 * Clearing NACK and defer counts to get their exact values
4648 * while reading EDID which are required by Compliance tests
4649 * 4.2.2.4 and 4.2.2.5
4650 */
4651 intel_dp->aux.i2c_nack_count = 0;
4652 intel_dp->aux.i2c_defer_count = 0;
4653
beb60608 4654 intel_dp_set_edid(intel_dp);
a9756bb5 4655
d63885da
PZ
4656 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4657 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4658 status = connector_status_connected;
4659
09b1eb13
TP
4660 /* Try to read the source of the interrupt */
4661 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4662 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4663 /* Clear interrupt source */
4664 drm_dp_dpcd_writeb(&intel_dp->aux,
4665 DP_DEVICE_SERVICE_IRQ_VECTOR,
4666 sink_irq_vector);
4667
4668 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4669 intel_dp_handle_test_request(intel_dp);
4670 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4671 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4672 }
4673
c8c8fb33 4674out:
25f78f58 4675 intel_display_power_put(to_i915(dev), power_domain);
c8c8fb33 4676 return status;
a4fc5ed6
KP
4677}
4678
beb60608
CW
4679static void
4680intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4681{
df0e9248 4682 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4683 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
25f78f58 4684 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
671dedd2 4685 enum intel_display_power_domain power_domain;
a4fc5ed6 4686
beb60608
CW
4687 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4688 connector->base.id, connector->name);
4689 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4690
beb60608
CW
4691 if (connector->status != connector_status_connected)
4692 return;
671dedd2 4693
25f78f58
VS
4694 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4695 intel_display_power_get(dev_priv, power_domain);
beb60608
CW
4696
4697 intel_dp_set_edid(intel_dp);
4698
25f78f58 4699 intel_display_power_put(dev_priv, power_domain);
beb60608
CW
4700
4701 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4702 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4703}
4704
4705static int intel_dp_get_modes(struct drm_connector *connector)
4706{
4707 struct intel_connector *intel_connector = to_intel_connector(connector);
4708 struct edid *edid;
4709
4710 edid = intel_connector->detect_edid;
4711 if (edid) {
4712 int ret = intel_connector_update_modes(connector, edid);
4713 if (ret)
4714 return ret;
4715 }
32f9d658 4716
f8779fda 4717 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4718 if (is_edp(intel_attached_dp(connector)) &&
4719 intel_connector->panel.fixed_mode) {
f8779fda 4720 struct drm_display_mode *mode;
beb60608
CW
4721
4722 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4723 intel_connector->panel.fixed_mode);
f8779fda 4724 if (mode) {
32f9d658
ZW
4725 drm_mode_probed_add(connector, mode);
4726 return 1;
4727 }
4728 }
beb60608 4729
32f9d658 4730 return 0;
a4fc5ed6
KP
4731}
4732
1aad7ac0
CW
4733static bool
4734intel_dp_detect_audio(struct drm_connector *connector)
4735{
1aad7ac0 4736 bool has_audio = false;
beb60608 4737 struct edid *edid;
1aad7ac0 4738
beb60608
CW
4739 edid = to_intel_connector(connector)->detect_edid;
4740 if (edid)
1aad7ac0 4741 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4742
1aad7ac0
CW
4743 return has_audio;
4744}
4745
f684960e
CW
4746static int
4747intel_dp_set_property(struct drm_connector *connector,
4748 struct drm_property *property,
4749 uint64_t val)
4750{
e953fd7b 4751 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4752 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4753 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4754 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4755 int ret;
4756
662595df 4757 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4758 if (ret)
4759 return ret;
4760
3f43c48d 4761 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4762 int i = val;
4763 bool has_audio;
4764
4765 if (i == intel_dp->force_audio)
f684960e
CW
4766 return 0;
4767
1aad7ac0 4768 intel_dp->force_audio = i;
f684960e 4769
c3e5f67b 4770 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4771 has_audio = intel_dp_detect_audio(connector);
4772 else
c3e5f67b 4773 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4774
4775 if (has_audio == intel_dp->has_audio)
f684960e
CW
4776 return 0;
4777
1aad7ac0 4778 intel_dp->has_audio = has_audio;
f684960e
CW
4779 goto done;
4780 }
4781
e953fd7b 4782 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4783 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4784 bool old_range = intel_dp->limited_color_range;
ae4edb80 4785
55bc60db
VS
4786 switch (val) {
4787 case INTEL_BROADCAST_RGB_AUTO:
4788 intel_dp->color_range_auto = true;
4789 break;
4790 case INTEL_BROADCAST_RGB_FULL:
4791 intel_dp->color_range_auto = false;
0f2a2a75 4792 intel_dp->limited_color_range = false;
55bc60db
VS
4793 break;
4794 case INTEL_BROADCAST_RGB_LIMITED:
4795 intel_dp->color_range_auto = false;
0f2a2a75 4796 intel_dp->limited_color_range = true;
55bc60db
VS
4797 break;
4798 default:
4799 return -EINVAL;
4800 }
ae4edb80
DV
4801
4802 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4803 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4804 return 0;
4805
e953fd7b
CW
4806 goto done;
4807 }
4808
53b41837
YN
4809 if (is_edp(intel_dp) &&
4810 property == connector->dev->mode_config.scaling_mode_property) {
4811 if (val == DRM_MODE_SCALE_NONE) {
4812 DRM_DEBUG_KMS("no scaling not supported\n");
4813 return -EINVAL;
4814 }
4815
4816 if (intel_connector->panel.fitting_mode == val) {
4817 /* the eDP scaling property is not changed */
4818 return 0;
4819 }
4820 intel_connector->panel.fitting_mode = val;
4821
4822 goto done;
4823 }
4824
f684960e
CW
4825 return -EINVAL;
4826
4827done:
c0c36b94
CW
4828 if (intel_encoder->base.crtc)
4829 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4830
4831 return 0;
4832}
4833
a4fc5ed6 4834static void
73845adf 4835intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4836{
1d508706 4837 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4838
10e972d3 4839 kfree(intel_connector->detect_edid);
beb60608 4840
9cd300e0
JN
4841 if (!IS_ERR_OR_NULL(intel_connector->edid))
4842 kfree(intel_connector->edid);
4843
acd8db10
PZ
4844 /* Can't call is_edp() since the encoder may have been destroyed
4845 * already. */
4846 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4847 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4848
a4fc5ed6 4849 drm_connector_cleanup(connector);
55f78c43 4850 kfree(connector);
a4fc5ed6
KP
4851}
4852
00c09d70 4853void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4854{
da63a9f2
PZ
4855 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4856 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4857
a121f4e5 4858 intel_dp_aux_fini(intel_dp);
0e32b39c 4859 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4860 if (is_edp(intel_dp)) {
4861 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4862 /*
4863 * vdd might still be enabled do to the delayed vdd off.
4864 * Make sure vdd is actually turned off here.
4865 */
773538e8 4866 pps_lock(intel_dp);
4be73780 4867 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4868 pps_unlock(intel_dp);
4869
01527b31
CT
4870 if (intel_dp->edp_notifier.notifier_call) {
4871 unregister_reboot_notifier(&intel_dp->edp_notifier);
4872 intel_dp->edp_notifier.notifier_call = NULL;
4873 }
bd943159 4874 }
c8bd0e49 4875 drm_encoder_cleanup(encoder);
da63a9f2 4876 kfree(intel_dig_port);
24d05927
DV
4877}
4878
07f9cd0b
ID
4879static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4880{
4881 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4882
4883 if (!is_edp(intel_dp))
4884 return;
4885
951468f3
VS
4886 /*
4887 * vdd might still be enabled do to the delayed vdd off.
4888 * Make sure vdd is actually turned off here.
4889 */
afa4e53a 4890 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4891 pps_lock(intel_dp);
07f9cd0b 4892 edp_panel_vdd_off_sync(intel_dp);
773538e8 4893 pps_unlock(intel_dp);
07f9cd0b
ID
4894}
4895
49e6bc51
VS
4896static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4897{
4898 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4899 struct drm_device *dev = intel_dig_port->base.base.dev;
4900 struct drm_i915_private *dev_priv = dev->dev_private;
4901 enum intel_display_power_domain power_domain;
4902
4903 lockdep_assert_held(&dev_priv->pps_mutex);
4904
4905 if (!edp_have_panel_vdd(intel_dp))
4906 return;
4907
4908 /*
4909 * The VDD bit needs a power domain reference, so if the bit is
4910 * already enabled when we boot or resume, grab this reference and
4911 * schedule a vdd off, so we don't hold on to the reference
4912 * indefinitely.
4913 */
4914 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
25f78f58 4915 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
49e6bc51
VS
4916 intel_display_power_get(dev_priv, power_domain);
4917
4918 edp_panel_vdd_schedule_off(intel_dp);
4919}
4920
6d93c0c4
ID
4921static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4922{
49e6bc51
VS
4923 struct intel_dp *intel_dp;
4924
4925 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4926 return;
4927
4928 intel_dp = enc_to_intel_dp(encoder);
4929
4930 pps_lock(intel_dp);
4931
4932 /*
4933 * Read out the current power sequencer assignment,
4934 * in case the BIOS did something with it.
4935 */
666a4537 4936 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
49e6bc51
VS
4937 vlv_initial_power_sequencer_setup(intel_dp);
4938
4939 intel_edp_panel_vdd_sanitize(intel_dp);
4940
4941 pps_unlock(intel_dp);
6d93c0c4
ID
4942}
4943
a4fc5ed6 4944static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4945 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4946 .detect = intel_dp_detect,
beb60608 4947 .force = intel_dp_force,
a4fc5ed6 4948 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4949 .set_property = intel_dp_set_property,
2545e4a6 4950 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4951 .destroy = intel_dp_connector_destroy,
c6f95f27 4952 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4953 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4954};
4955
4956static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4957 .get_modes = intel_dp_get_modes,
4958 .mode_valid = intel_dp_mode_valid,
df0e9248 4959 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4960};
4961
a4fc5ed6 4962static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4963 .reset = intel_dp_encoder_reset,
24d05927 4964 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4965};
4966
b2c5c181 4967enum irqreturn
13cf5504
DA
4968intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4969{
4970 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4971 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4972 struct drm_device *dev = intel_dig_port->base.base.dev;
4973 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4974 enum intel_display_power_domain power_domain;
b2c5c181 4975 enum irqreturn ret = IRQ_NONE;
1c767b33 4976
0e32b39c
DA
4977 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4978 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4979
7a7f84cc
VS
4980 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4981 /*
4982 * vdd off can generate a long pulse on eDP which
4983 * would require vdd on to handle it, and thus we
4984 * would end up in an endless cycle of
4985 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4986 */
4987 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4988 port_name(intel_dig_port->port));
a8b3d52f 4989 return IRQ_HANDLED;
7a7f84cc
VS
4990 }
4991
26fbb774
VS
4992 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4993 port_name(intel_dig_port->port),
0e32b39c 4994 long_hpd ? "long" : "short");
13cf5504 4995
25f78f58 4996 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1c767b33
ID
4997 intel_display_power_get(dev_priv, power_domain);
4998
0e32b39c 4999 if (long_hpd) {
5fa836a9
MK
5000 /* indicate that we need to restart link training */
5001 intel_dp->train_set_valid = false;
2a592bec 5002
7e66bcf2
JN
5003 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5004 goto mst_fail;
0e32b39c
DA
5005
5006 if (!intel_dp_get_dpcd(intel_dp)) {
5007 goto mst_fail;
5008 }
5009
5010 intel_dp_probe_oui(intel_dp);
5011
d14e7b6d
VS
5012 if (!intel_dp_probe_mst(intel_dp)) {
5013 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5014 intel_dp_check_link_status(intel_dp);
5015 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5016 goto mst_fail;
d14e7b6d 5017 }
0e32b39c
DA
5018 } else {
5019 if (intel_dp->is_mst) {
1c767b33 5020 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5021 goto mst_fail;
5022 }
5023
5024 if (!intel_dp->is_mst) {
5b215bcf 5025 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5026 intel_dp_check_link_status(intel_dp);
5b215bcf 5027 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5028 }
5029 }
b2c5c181
DV
5030
5031 ret = IRQ_HANDLED;
5032
1c767b33 5033 goto put_power;
0e32b39c
DA
5034mst_fail:
5035 /* if we were in MST mode, and device is not there get out of MST mode */
5036 if (intel_dp->is_mst) {
5037 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5038 intel_dp->is_mst = false;
5039 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5040 }
1c767b33
ID
5041put_power:
5042 intel_display_power_put(dev_priv, power_domain);
5043
5044 return ret;
13cf5504
DA
5045}
5046
477ec328 5047/* check the VBT to see whether the eDP is on another port */
5d8a7752 5048bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5049{
5050 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5051 union child_device_config *p_child;
36e83a18 5052 int i;
5d8a7752 5053 static const short port_mapping[] = {
477ec328
RV
5054 [PORT_B] = DVO_PORT_DPB,
5055 [PORT_C] = DVO_PORT_DPC,
5056 [PORT_D] = DVO_PORT_DPD,
5057 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5058 };
36e83a18 5059
53ce81a7
VS
5060 /*
5061 * eDP not supported on g4x. so bail out early just
5062 * for a bit extra safety in case the VBT is bonkers.
5063 */
5064 if (INTEL_INFO(dev)->gen < 5)
5065 return false;
5066
3b32a35b
VS
5067 if (port == PORT_A)
5068 return true;
5069
41aa3448 5070 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5071 return false;
5072
41aa3448
RV
5073 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5074 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5075
5d8a7752 5076 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5077 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5078 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5079 return true;
5080 }
5081 return false;
5082}
5083
0e32b39c 5084void
f684960e
CW
5085intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5086{
53b41837
YN
5087 struct intel_connector *intel_connector = to_intel_connector(connector);
5088
3f43c48d 5089 intel_attach_force_audio_property(connector);
e953fd7b 5090 intel_attach_broadcast_rgb_property(connector);
55bc60db 5091 intel_dp->color_range_auto = true;
53b41837
YN
5092
5093 if (is_edp(intel_dp)) {
5094 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5095 drm_object_attach_property(
5096 &connector->base,
53b41837 5097 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5098 DRM_MODE_SCALE_ASPECT);
5099 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5100 }
f684960e
CW
5101}
5102
dada1a9f
ID
5103static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5104{
5105 intel_dp->last_power_cycle = jiffies;
5106 intel_dp->last_power_on = jiffies;
5107 intel_dp->last_backlight_off = jiffies;
5108}
5109
67a54566
DV
5110static void
5111intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5112 struct intel_dp *intel_dp)
67a54566
DV
5113{
5114 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5115 struct edp_power_seq cur, vbt, spec,
5116 *final = &intel_dp->pps_delays;
b0a08bec 5117 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
f0f59a00 5118 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5119
e39b999a
VS
5120 lockdep_assert_held(&dev_priv->pps_mutex);
5121
81ddbc69
VS
5122 /* already initialized? */
5123 if (final->t11_t12 != 0)
5124 return;
5125
b0a08bec
VK
5126 if (IS_BROXTON(dev)) {
5127 /*
5128 * TODO: BXT has 2 sets of PPS registers.
5129 * Correct Register for Broxton need to be identified
5130 * using VBT. hardcoding for now
5131 */
5132 pp_ctrl_reg = BXT_PP_CONTROL(0);
5133 pp_on_reg = BXT_PP_ON_DELAYS(0);
5134 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5135 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5136 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5137 pp_on_reg = PCH_PP_ON_DELAYS;
5138 pp_off_reg = PCH_PP_OFF_DELAYS;
5139 pp_div_reg = PCH_PP_DIVISOR;
5140 } else {
bf13e81b
JN
5141 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5142
5143 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5144 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5145 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5146 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5147 }
67a54566
DV
5148
5149 /* Workaround: Need to write PP_CONTROL with the unlock key as
5150 * the very first thing. */
b0a08bec 5151 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5152
453c5420
JB
5153 pp_on = I915_READ(pp_on_reg);
5154 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5155 if (!IS_BROXTON(dev)) {
5156 I915_WRITE(pp_ctrl_reg, pp_ctl);
5157 pp_div = I915_READ(pp_div_reg);
5158 }
67a54566
DV
5159
5160 /* Pull timing values out of registers */
5161 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5162 PANEL_POWER_UP_DELAY_SHIFT;
5163
5164 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5165 PANEL_LIGHT_ON_DELAY_SHIFT;
5166
5167 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5168 PANEL_LIGHT_OFF_DELAY_SHIFT;
5169
5170 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5171 PANEL_POWER_DOWN_DELAY_SHIFT;
5172
b0a08bec
VK
5173 if (IS_BROXTON(dev)) {
5174 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5175 BXT_POWER_CYCLE_DELAY_SHIFT;
5176 if (tmp > 0)
5177 cur.t11_t12 = (tmp - 1) * 1000;
5178 else
5179 cur.t11_t12 = 0;
5180 } else {
5181 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5182 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5183 }
67a54566
DV
5184
5185 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5186 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5187
41aa3448 5188 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5189
5190 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5191 * our hw here, which are all in 100usec. */
5192 spec.t1_t3 = 210 * 10;
5193 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5194 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5195 spec.t10 = 500 * 10;
5196 /* This one is special and actually in units of 100ms, but zero
5197 * based in the hw (so we need to add 100 ms). But the sw vbt
5198 * table multiplies it with 1000 to make it in units of 100usec,
5199 * too. */
5200 spec.t11_t12 = (510 + 100) * 10;
5201
5202 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5203 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5204
5205 /* Use the max of the register settings and vbt. If both are
5206 * unset, fall back to the spec limits. */
36b5f425 5207#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5208 spec.field : \
5209 max(cur.field, vbt.field))
5210 assign_final(t1_t3);
5211 assign_final(t8);
5212 assign_final(t9);
5213 assign_final(t10);
5214 assign_final(t11_t12);
5215#undef assign_final
5216
36b5f425 5217#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5218 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5219 intel_dp->backlight_on_delay = get_delay(t8);
5220 intel_dp->backlight_off_delay = get_delay(t9);
5221 intel_dp->panel_power_down_delay = get_delay(t10);
5222 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5223#undef get_delay
5224
f30d26e4
JN
5225 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5226 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5227 intel_dp->panel_power_cycle_delay);
5228
5229 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5230 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5231}
5232
5233static void
5234intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5235 struct intel_dp *intel_dp)
f30d26e4
JN
5236{
5237 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5238 u32 pp_on, pp_off, pp_div, port_sel = 0;
5239 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
f0f59a00 5240 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
ad933b56 5241 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5242 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5243
e39b999a 5244 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5245
b0a08bec
VK
5246 if (IS_BROXTON(dev)) {
5247 /*
5248 * TODO: BXT has 2 sets of PPS registers.
5249 * Correct Register for Broxton need to be identified
5250 * using VBT. hardcoding for now
5251 */
5252 pp_ctrl_reg = BXT_PP_CONTROL(0);
5253 pp_on_reg = BXT_PP_ON_DELAYS(0);
5254 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5255
5256 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5257 pp_on_reg = PCH_PP_ON_DELAYS;
5258 pp_off_reg = PCH_PP_OFF_DELAYS;
5259 pp_div_reg = PCH_PP_DIVISOR;
5260 } else {
bf13e81b
JN
5261 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5262
5263 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5264 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5265 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5266 }
5267
b2f19d1a
PZ
5268 /*
5269 * And finally store the new values in the power sequencer. The
5270 * backlight delays are set to 1 because we do manual waits on them. For
5271 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5272 * we'll end up waiting for the backlight off delay twice: once when we
5273 * do the manual sleep, and once when we disable the panel and wait for
5274 * the PP_STATUS bit to become zero.
5275 */
f30d26e4 5276 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5277 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5278 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5279 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5280 /* Compute the divisor for the pp clock, simply match the Bspec
5281 * formula. */
b0a08bec
VK
5282 if (IS_BROXTON(dev)) {
5283 pp_div = I915_READ(pp_ctrl_reg);
5284 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5285 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5286 << BXT_POWER_CYCLE_DELAY_SHIFT);
5287 } else {
5288 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5289 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5290 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5291 }
67a54566
DV
5292
5293 /* Haswell doesn't have any port selection bits for the panel
5294 * power sequencer any more. */
666a4537 5295 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ad933b56 5296 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5297 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5298 if (port == PORT_A)
a24c144c 5299 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5300 else
a24c144c 5301 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5302 }
5303
453c5420
JB
5304 pp_on |= port_sel;
5305
5306 I915_WRITE(pp_on_reg, pp_on);
5307 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5308 if (IS_BROXTON(dev))
5309 I915_WRITE(pp_ctrl_reg, pp_div);
5310 else
5311 I915_WRITE(pp_div_reg, pp_div);
67a54566 5312
67a54566 5313 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5314 I915_READ(pp_on_reg),
5315 I915_READ(pp_off_reg),
b0a08bec
VK
5316 IS_BROXTON(dev) ?
5317 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5318 I915_READ(pp_div_reg));
f684960e
CW
5319}
5320
b33a2815
VK
5321/**
5322 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5323 * @dev: DRM device
5324 * @refresh_rate: RR to be programmed
5325 *
5326 * This function gets called when refresh rate (RR) has to be changed from
5327 * one frequency to another. Switches can be between high and low RR
5328 * supported by the panel or to any other RR based on media playback (in
5329 * this case, RR value needs to be passed from user space).
5330 *
5331 * The caller of this function needs to take a lock on dev_priv->drrs.
5332 */
96178eeb 5333static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5334{
5335 struct drm_i915_private *dev_priv = dev->dev_private;
5336 struct intel_encoder *encoder;
96178eeb
VK
5337 struct intel_digital_port *dig_port = NULL;
5338 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5339 struct intel_crtc_state *config = NULL;
439d7ac0 5340 struct intel_crtc *intel_crtc = NULL;
96178eeb 5341 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5342
5343 if (refresh_rate <= 0) {
5344 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5345 return;
5346 }
5347
96178eeb
VK
5348 if (intel_dp == NULL) {
5349 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5350 return;
5351 }
5352
1fcc9d1c 5353 /*
e4d59f6b
RV
5354 * FIXME: This needs proper synchronization with psr state for some
5355 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5356 */
439d7ac0 5357
96178eeb
VK
5358 dig_port = dp_to_dig_port(intel_dp);
5359 encoder = &dig_port->base;
723f9aab 5360 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5361
5362 if (!intel_crtc) {
5363 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5364 return;
5365 }
5366
6e3c9717 5367 config = intel_crtc->config;
439d7ac0 5368
96178eeb 5369 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5370 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5371 return;
5372 }
5373
96178eeb
VK
5374 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5375 refresh_rate)
439d7ac0
PB
5376 index = DRRS_LOW_RR;
5377
96178eeb 5378 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5379 DRM_DEBUG_KMS(
5380 "DRRS requested for previously set RR...ignoring\n");
5381 return;
5382 }
5383
5384 if (!intel_crtc->active) {
5385 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5386 return;
5387 }
5388
44395bfe 5389 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5390 switch (index) {
5391 case DRRS_HIGH_RR:
5392 intel_dp_set_m_n(intel_crtc, M1_N1);
5393 break;
5394 case DRRS_LOW_RR:
5395 intel_dp_set_m_n(intel_crtc, M2_N2);
5396 break;
5397 case DRRS_MAX_RR:
5398 default:
5399 DRM_ERROR("Unsupported refreshrate type\n");
5400 }
5401 } else if (INTEL_INFO(dev)->gen > 6) {
f0f59a00 5402 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
649636ef 5403 u32 val;
a4c30b1d 5404
649636ef 5405 val = I915_READ(reg);
439d7ac0 5406 if (index > DRRS_HIGH_RR) {
666a4537 5407 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5408 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5409 else
5410 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5411 } else {
666a4537 5412 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5413 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5414 else
5415 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5416 }
5417 I915_WRITE(reg, val);
5418 }
5419
4e9ac947
VK
5420 dev_priv->drrs.refresh_rate_type = index;
5421
5422 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5423}
5424
b33a2815
VK
5425/**
5426 * intel_edp_drrs_enable - init drrs struct if supported
5427 * @intel_dp: DP struct
5428 *
5429 * Initializes frontbuffer_bits and drrs.dp
5430 */
c395578e
VK
5431void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5432{
5433 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5434 struct drm_i915_private *dev_priv = dev->dev_private;
5435 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5436 struct drm_crtc *crtc = dig_port->base.base.crtc;
5437 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5438
5439 if (!intel_crtc->config->has_drrs) {
5440 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5441 return;
5442 }
5443
5444 mutex_lock(&dev_priv->drrs.mutex);
5445 if (WARN_ON(dev_priv->drrs.dp)) {
5446 DRM_ERROR("DRRS already enabled\n");
5447 goto unlock;
5448 }
5449
5450 dev_priv->drrs.busy_frontbuffer_bits = 0;
5451
5452 dev_priv->drrs.dp = intel_dp;
5453
5454unlock:
5455 mutex_unlock(&dev_priv->drrs.mutex);
5456}
5457
b33a2815
VK
5458/**
5459 * intel_edp_drrs_disable - Disable DRRS
5460 * @intel_dp: DP struct
5461 *
5462 */
c395578e
VK
5463void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5464{
5465 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5466 struct drm_i915_private *dev_priv = dev->dev_private;
5467 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5468 struct drm_crtc *crtc = dig_port->base.base.crtc;
5469 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5470
5471 if (!intel_crtc->config->has_drrs)
5472 return;
5473
5474 mutex_lock(&dev_priv->drrs.mutex);
5475 if (!dev_priv->drrs.dp) {
5476 mutex_unlock(&dev_priv->drrs.mutex);
5477 return;
5478 }
5479
5480 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5481 intel_dp_set_drrs_state(dev_priv->dev,
5482 intel_dp->attached_connector->panel.
5483 fixed_mode->vrefresh);
5484
5485 dev_priv->drrs.dp = NULL;
5486 mutex_unlock(&dev_priv->drrs.mutex);
5487
5488 cancel_delayed_work_sync(&dev_priv->drrs.work);
5489}
5490
4e9ac947
VK
5491static void intel_edp_drrs_downclock_work(struct work_struct *work)
5492{
5493 struct drm_i915_private *dev_priv =
5494 container_of(work, typeof(*dev_priv), drrs.work.work);
5495 struct intel_dp *intel_dp;
5496
5497 mutex_lock(&dev_priv->drrs.mutex);
5498
5499 intel_dp = dev_priv->drrs.dp;
5500
5501 if (!intel_dp)
5502 goto unlock;
5503
439d7ac0 5504 /*
4e9ac947
VK
5505 * The delayed work can race with an invalidate hence we need to
5506 * recheck.
439d7ac0
PB
5507 */
5508
4e9ac947
VK
5509 if (dev_priv->drrs.busy_frontbuffer_bits)
5510 goto unlock;
439d7ac0 5511
4e9ac947
VK
5512 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5513 intel_dp_set_drrs_state(dev_priv->dev,
5514 intel_dp->attached_connector->panel.
5515 downclock_mode->vrefresh);
439d7ac0 5516
4e9ac947 5517unlock:
4e9ac947 5518 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5519}
5520
b33a2815 5521/**
0ddfd203 5522 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5523 * @dev: DRM device
5524 * @frontbuffer_bits: frontbuffer plane tracking bits
5525 *
0ddfd203
R
5526 * This function gets called everytime rendering on the given planes start.
5527 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5528 *
5529 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5530 */
a93fad0f
VK
5531void intel_edp_drrs_invalidate(struct drm_device *dev,
5532 unsigned frontbuffer_bits)
5533{
5534 struct drm_i915_private *dev_priv = dev->dev_private;
5535 struct drm_crtc *crtc;
5536 enum pipe pipe;
5537
9da7d693 5538 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5539 return;
5540
88f933a8 5541 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5542
a93fad0f 5543 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5544 if (!dev_priv->drrs.dp) {
5545 mutex_unlock(&dev_priv->drrs.mutex);
5546 return;
5547 }
5548
a93fad0f
VK
5549 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5550 pipe = to_intel_crtc(crtc)->pipe;
5551
c1d038c6
DV
5552 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5553 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5554
0ddfd203 5555 /* invalidate means busy screen hence upclock */
c1d038c6 5556 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5557 intel_dp_set_drrs_state(dev_priv->dev,
5558 dev_priv->drrs.dp->attached_connector->panel.
5559 fixed_mode->vrefresh);
a93fad0f 5560
a93fad0f
VK
5561 mutex_unlock(&dev_priv->drrs.mutex);
5562}
5563
b33a2815 5564/**
0ddfd203 5565 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5566 * @dev: DRM device
5567 * @frontbuffer_bits: frontbuffer plane tracking bits
5568 *
0ddfd203
R
5569 * This function gets called every time rendering on the given planes has
5570 * completed or flip on a crtc is completed. So DRRS should be upclocked
5571 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5572 * if no other planes are dirty.
b33a2815
VK
5573 *
5574 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5575 */
a93fad0f
VK
5576void intel_edp_drrs_flush(struct drm_device *dev,
5577 unsigned frontbuffer_bits)
5578{
5579 struct drm_i915_private *dev_priv = dev->dev_private;
5580 struct drm_crtc *crtc;
5581 enum pipe pipe;
5582
9da7d693 5583 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5584 return;
5585
88f933a8 5586 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5587
a93fad0f 5588 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5589 if (!dev_priv->drrs.dp) {
5590 mutex_unlock(&dev_priv->drrs.mutex);
5591 return;
5592 }
5593
a93fad0f
VK
5594 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5595 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5596
5597 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5598 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5599
0ddfd203 5600 /* flush means busy screen hence upclock */
c1d038c6 5601 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5602 intel_dp_set_drrs_state(dev_priv->dev,
5603 dev_priv->drrs.dp->attached_connector->panel.
5604 fixed_mode->vrefresh);
5605
5606 /*
5607 * flush also means no more activity hence schedule downclock, if all
5608 * other fbs are quiescent too
5609 */
5610 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5611 schedule_delayed_work(&dev_priv->drrs.work,
5612 msecs_to_jiffies(1000));
5613 mutex_unlock(&dev_priv->drrs.mutex);
5614}
5615
b33a2815
VK
5616/**
5617 * DOC: Display Refresh Rate Switching (DRRS)
5618 *
5619 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5620 * which enables swtching between low and high refresh rates,
5621 * dynamically, based on the usage scenario. This feature is applicable
5622 * for internal panels.
5623 *
5624 * Indication that the panel supports DRRS is given by the panel EDID, which
5625 * would list multiple refresh rates for one resolution.
5626 *
5627 * DRRS is of 2 types - static and seamless.
5628 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5629 * (may appear as a blink on screen) and is used in dock-undock scenario.
5630 * Seamless DRRS involves changing RR without any visual effect to the user
5631 * and can be used during normal system usage. This is done by programming
5632 * certain registers.
5633 *
5634 * Support for static/seamless DRRS may be indicated in the VBT based on
5635 * inputs from the panel spec.
5636 *
5637 * DRRS saves power by switching to low RR based on usage scenarios.
5638 *
5639 * eDP DRRS:-
5640 * The implementation is based on frontbuffer tracking implementation.
5641 * When there is a disturbance on the screen triggered by user activity or a
5642 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5643 * When there is no movement on screen, after a timeout of 1 second, a switch
5644 * to low RR is made.
5645 * For integration with frontbuffer tracking code,
5646 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5647 *
5648 * DRRS can be further extended to support other internal panels and also
5649 * the scenario of video playback wherein RR is set based on the rate
5650 * requested by userspace.
5651 */
5652
5653/**
5654 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5655 * @intel_connector: eDP connector
5656 * @fixed_mode: preferred mode of panel
5657 *
5658 * This function is called only once at driver load to initialize basic
5659 * DRRS stuff.
5660 *
5661 * Returns:
5662 * Downclock mode if panel supports it, else return NULL.
5663 * DRRS support is determined by the presence of downclock mode (apart
5664 * from VBT setting).
5665 */
4f9db5b5 5666static struct drm_display_mode *
96178eeb
VK
5667intel_dp_drrs_init(struct intel_connector *intel_connector,
5668 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5669{
5670 struct drm_connector *connector = &intel_connector->base;
96178eeb 5671 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5672 struct drm_i915_private *dev_priv = dev->dev_private;
5673 struct drm_display_mode *downclock_mode = NULL;
5674
9da7d693
DV
5675 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5676 mutex_init(&dev_priv->drrs.mutex);
5677
4f9db5b5
PB
5678 if (INTEL_INFO(dev)->gen <= 6) {
5679 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5680 return NULL;
5681 }
5682
5683 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5684 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5685 return NULL;
5686 }
5687
5688 downclock_mode = intel_find_panel_downclock
5689 (dev, fixed_mode, connector);
5690
5691 if (!downclock_mode) {
a1d26342 5692 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5693 return NULL;
5694 }
5695
96178eeb 5696 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5697
96178eeb 5698 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5699 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5700 return downclock_mode;
5701}
5702
ed92f0b2 5703static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5704 struct intel_connector *intel_connector)
ed92f0b2
PZ
5705{
5706 struct drm_connector *connector = &intel_connector->base;
5707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5708 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5709 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5710 struct drm_i915_private *dev_priv = dev->dev_private;
5711 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5712 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5713 bool has_dpcd;
5714 struct drm_display_mode *scan;
5715 struct edid *edid;
6517d273 5716 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5717
5718 if (!is_edp(intel_dp))
5719 return true;
5720
49e6bc51
VS
5721 pps_lock(intel_dp);
5722 intel_edp_panel_vdd_sanitize(intel_dp);
5723 pps_unlock(intel_dp);
63635217 5724
ed92f0b2 5725 /* Cache DPCD and EDID for edp. */
ed92f0b2 5726 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5727
5728 if (has_dpcd) {
5729 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5730 dev_priv->no_aux_handshake =
5731 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5732 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5733 } else {
5734 /* if this fails, presume the device is a ghost */
5735 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5736 return false;
5737 }
5738
5739 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5740 pps_lock(intel_dp);
36b5f425 5741 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5742 pps_unlock(intel_dp);
ed92f0b2 5743
060c8778 5744 mutex_lock(&dev->mode_config.mutex);
0b99836f 5745 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5746 if (edid) {
5747 if (drm_add_edid_modes(connector, edid)) {
5748 drm_mode_connector_update_edid_property(connector,
5749 edid);
5750 drm_edid_to_eld(connector, edid);
5751 } else {
5752 kfree(edid);
5753 edid = ERR_PTR(-EINVAL);
5754 }
5755 } else {
5756 edid = ERR_PTR(-ENOENT);
5757 }
5758 intel_connector->edid = edid;
5759
5760 /* prefer fixed mode from EDID if available */
5761 list_for_each_entry(scan, &connector->probed_modes, head) {
5762 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5763 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5764 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5765 intel_connector, fixed_mode);
ed92f0b2
PZ
5766 break;
5767 }
5768 }
5769
5770 /* fallback to VBT if available for eDP */
5771 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5772 fixed_mode = drm_mode_duplicate(dev,
5773 dev_priv->vbt.lfp_lvds_vbt_mode);
5774 if (fixed_mode)
5775 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5776 }
060c8778 5777 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5778
666a4537 5779 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
01527b31
CT
5780 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5781 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5782
5783 /*
5784 * Figure out the current pipe for the initial backlight setup.
5785 * If the current pipe isn't valid, try the PPS pipe, and if that
5786 * fails just assume pipe A.
5787 */
5788 if (IS_CHERRYVIEW(dev))
5789 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5790 else
5791 pipe = PORT_TO_PIPE(intel_dp->DP);
5792
5793 if (pipe != PIPE_A && pipe != PIPE_B)
5794 pipe = intel_dp->pps_pipe;
5795
5796 if (pipe != PIPE_A && pipe != PIPE_B)
5797 pipe = PIPE_A;
5798
5799 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5800 pipe_name(pipe));
01527b31
CT
5801 }
5802
4f9db5b5 5803 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5804 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5805 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5806
5807 return true;
5808}
5809
16c25533 5810bool
f0fec3f2
PZ
5811intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5812 struct intel_connector *intel_connector)
a4fc5ed6 5813{
f0fec3f2
PZ
5814 struct drm_connector *connector = &intel_connector->base;
5815 struct intel_dp *intel_dp = &intel_dig_port->dp;
5816 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5817 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5818 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5819 enum port port = intel_dig_port->port;
a121f4e5 5820 int type, ret;
a4fc5ed6 5821
a4a5d2f8
VS
5822 intel_dp->pps_pipe = INVALID_PIPE;
5823
ec5b01dd 5824 /* intel_dp vfuncs */
b6b5e383
DL
5825 if (INTEL_INFO(dev)->gen >= 9)
5826 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
666a4537 5827 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
ec5b01dd
DL
5828 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5829 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5830 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5831 else if (HAS_PCH_SPLIT(dev))
5832 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5833 else
5834 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5835
b9ca5fad
DL
5836 if (INTEL_INFO(dev)->gen >= 9)
5837 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5838 else
5839 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5840
ad64217b
ACO
5841 if (HAS_DDI(dev))
5842 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5843
0767935e
DV
5844 /* Preserve the current hw state. */
5845 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5846 intel_dp->attached_connector = intel_connector;
3d3dc149 5847
3b32a35b 5848 if (intel_dp_is_edp(dev, port))
b329530c 5849 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5850 else
5851 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5852
f7d24902
ID
5853 /*
5854 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5855 * for DP the encoder type can be set by the caller to
5856 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5857 */
5858 if (type == DRM_MODE_CONNECTOR_eDP)
5859 intel_encoder->type = INTEL_OUTPUT_EDP;
5860
c17ed5b5 5861 /* eDP only on port B and/or C on vlv/chv */
666a4537
WB
5862 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5863 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
c17ed5b5
VS
5864 return false;
5865
e7281eab
ID
5866 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5867 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5868 port_name(port));
5869
b329530c 5870 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5871 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5872
a4fc5ed6
KP
5873 connector->interlace_allowed = true;
5874 connector->doublescan_allowed = 0;
5875
f0fec3f2 5876 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5877 edp_panel_vdd_work);
a4fc5ed6 5878
df0e9248 5879 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5880 drm_connector_register(connector);
a4fc5ed6 5881
affa9354 5882 if (HAS_DDI(dev))
bcbc889b
PZ
5883 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5884 else
5885 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5886 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5887
0b99836f 5888 /* Set up the hotplug pin. */
ab9d7c30
PZ
5889 switch (port) {
5890 case PORT_A:
1d843f9d 5891 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5892 break;
5893 case PORT_B:
1d843f9d 5894 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5895 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5896 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5897 break;
5898 case PORT_C:
1d843f9d 5899 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5900 break;
5901 case PORT_D:
1d843f9d 5902 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5903 break;
26951caf
XZ
5904 case PORT_E:
5905 intel_encoder->hpd_pin = HPD_PORT_E;
5906 break;
ab9d7c30 5907 default:
ad1c0b19 5908 BUG();
5eb08b69
ZW
5909 }
5910
dada1a9f 5911 if (is_edp(intel_dp)) {
773538e8 5912 pps_lock(intel_dp);
1e74a324 5913 intel_dp_init_panel_power_timestamps(intel_dp);
666a4537 5914 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
a4a5d2f8 5915 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5916 else
36b5f425 5917 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5918 pps_unlock(intel_dp);
dada1a9f 5919 }
0095e6dc 5920
a121f4e5
VS
5921 ret = intel_dp_aux_init(intel_dp, intel_connector);
5922 if (ret)
5923 goto fail;
c1f05264 5924
0e32b39c 5925 /* init MST on ports that can support it */
0c9b3715
JN
5926 if (HAS_DP_MST(dev) &&
5927 (port == PORT_B || port == PORT_C || port == PORT_D))
5928 intel_dp_mst_encoder_init(intel_dig_port,
5929 intel_connector->base.base.id);
0e32b39c 5930
36b5f425 5931 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
5932 intel_dp_aux_fini(intel_dp);
5933 intel_dp_mst_encoder_cleanup(intel_dig_port);
5934 goto fail;
b2f246a8 5935 }
32f9d658 5936
f684960e
CW
5937 intel_dp_add_properties(intel_dp, connector);
5938
a4fc5ed6
KP
5939 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5940 * 0xd. Failure to do so will result in spurious interrupts being
5941 * generated on the port when a cable is not attached.
5942 */
5943 if (IS_G4X(dev) && !IS_GM45(dev)) {
5944 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5945 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5946 }
16c25533 5947
aa7471d2
JN
5948 i915_debugfs_connector_add(connector);
5949
16c25533 5950 return true;
a121f4e5
VS
5951
5952fail:
5953 if (is_edp(intel_dp)) {
5954 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5955 /*
5956 * vdd might still be enabled do to the delayed vdd off.
5957 * Make sure vdd is actually turned off here.
5958 */
5959 pps_lock(intel_dp);
5960 edp_panel_vdd_off_sync(intel_dp);
5961 pps_unlock(intel_dp);
5962 }
5963 drm_connector_unregister(connector);
5964 drm_connector_cleanup(connector);
5965
5966 return false;
a4fc5ed6 5967}
f0fec3f2
PZ
5968
5969void
f0f59a00
VS
5970intel_dp_init(struct drm_device *dev,
5971 i915_reg_t output_reg, enum port port)
f0fec3f2 5972{
13cf5504 5973 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5974 struct intel_digital_port *intel_dig_port;
5975 struct intel_encoder *intel_encoder;
5976 struct drm_encoder *encoder;
5977 struct intel_connector *intel_connector;
5978
b14c5679 5979 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5980 if (!intel_dig_port)
5981 return;
5982
08d9bc92 5983 intel_connector = intel_connector_alloc();
11aee0f6
SM
5984 if (!intel_connector)
5985 goto err_connector_alloc;
f0fec3f2
PZ
5986
5987 intel_encoder = &intel_dig_port->base;
5988 encoder = &intel_encoder->base;
5989
893da0c9
SM
5990 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5991 DRM_MODE_ENCODER_TMDS))
5992 goto err_encoder_init;
f0fec3f2 5993
5bfe2ac0 5994 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5995 intel_encoder->disable = intel_disable_dp;
00c09d70 5996 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5997 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5998 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5999 if (IS_CHERRYVIEW(dev)) {
9197c88b 6000 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6001 intel_encoder->pre_enable = chv_pre_enable_dp;
6002 intel_encoder->enable = vlv_enable_dp;
580d3811 6003 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6004 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6005 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6006 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6007 intel_encoder->pre_enable = vlv_pre_enable_dp;
6008 intel_encoder->enable = vlv_enable_dp;
49277c31 6009 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6010 } else {
ecff4f3b
JN
6011 intel_encoder->pre_enable = g4x_pre_enable_dp;
6012 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6013 if (INTEL_INFO(dev)->gen >= 5)
6014 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6015 }
f0fec3f2 6016
174edf1f 6017 intel_dig_port->port = port;
f0fec3f2
PZ
6018 intel_dig_port->dp.output_reg = output_reg;
6019
00c09d70 6020 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6021 if (IS_CHERRYVIEW(dev)) {
6022 if (port == PORT_D)
6023 intel_encoder->crtc_mask = 1 << 2;
6024 else
6025 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6026 } else {
6027 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6028 }
bc079e8b 6029 intel_encoder->cloneable = 0;
f0fec3f2 6030
13cf5504 6031 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6032 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6033
11aee0f6
SM
6034 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6035 goto err_init_connector;
6036
6037 return;
6038
6039err_init_connector:
6040 drm_encoder_cleanup(encoder);
893da0c9 6041err_encoder_init:
11aee0f6
SM
6042 kfree(intel_connector);
6043err_connector_alloc:
6044 kfree(intel_dig_port);
6045
6046 return;
f0fec3f2 6047}
0e32b39c
DA
6048
6049void intel_dp_mst_suspend(struct drm_device *dev)
6050{
6051 struct drm_i915_private *dev_priv = dev->dev_private;
6052 int i;
6053
6054 /* disable MST */
6055 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6056 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6057 if (!intel_dig_port)
6058 continue;
6059
6060 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6061 if (!intel_dig_port->dp.can_mst)
6062 continue;
6063 if (intel_dig_port->dp.is_mst)
6064 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6065 }
6066 }
6067}
6068
6069void intel_dp_mst_resume(struct drm_device *dev)
6070{
6071 struct drm_i915_private *dev_priv = dev->dev_private;
6072 int i;
6073
6074 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6075 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6076 if (!intel_dig_port)
6077 continue;
6078 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6079 int ret;
6080
6081 if (!intel_dig_port->dp.can_mst)
6082 continue;
6083
6084 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6085 if (ret != 0) {
6086 intel_dp_check_mst_status(&intel_dig_port->dp);
6087 }
6088 }
6089 }
6090}