drm/i915: Don't do edp panel detection in g4x_dp_detect()
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
cd9dde44
AJ
173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
a4fc5ed6 190static int
c898261c 191intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 192{
cd9dde44 193 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
194}
195
fe27d53e
DA
196static int
197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
c19de8eb 202static enum drm_mode_status
a4fc5ed6
KP
203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
df0e9248 206 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 211
dd06f90e
JN
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
214 return MODE_PANEL;
215
dd06f90e 216 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 217 return MODE_PANEL;
03afc4a2
DV
218
219 target_clock = fixed_mode->clock;
7de56f43
ZY
220 }
221
50fec21a 222 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 223 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
c4867936 229 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
0af78a2b
DV
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
a4fc5ed6
KP
237 return MODE_OK;
238}
239
a4f1289e 240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
c2af70e2 252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
bf13e81b
JN
261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 263 struct intel_dp *intel_dp);
bf13e81b
JN
264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 266 struct intel_dp *intel_dp);
bf13e81b 267
773538e8
VS
268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
25f78f58 280 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
25f78f58 296 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
297 intel_display_power_put(dev_priv, power_domain);
298}
299
961a0db0
VS
300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
d288f65f
VS
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
0047eedc
VS
339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
d288f65f
VS
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
a8c3344e
VS
410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
a4a5d2f8 413
a8c3344e
VS
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
36b5f425
VS
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 424
961a0db0
VS
425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
430
431 return intel_dp->pps_pipe;
432}
433
6491ab27
VS
434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
bf13e81b 454
a4a5d2f8 455static enum pipe
6491ab27
VS
456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
a4a5d2f8
VS
459{
460 enum pipe pipe;
bf13e81b 461
bf13e81b
JN
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
6491ab27
VS
469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
a4a5d2f8 472 return pipe;
bf13e81b
JN
473 }
474
a4a5d2f8
VS
475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
6491ab27
VS
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
a4a5d2f8
VS
500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
bf13e81b
JN
506 }
507
a4a5d2f8
VS
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
36b5f425
VS
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
513}
514
773538e8
VS
515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
bf13e81b
JN
542}
543
f0f59a00
VS
544static i915_reg_t
545_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b
JN
546{
547 struct drm_device *dev = intel_dp_to_dev(intel_dp);
548
b0a08bec
VK
549 if (IS_BROXTON(dev))
550 return BXT_PP_CONTROL(0);
551 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
552 return PCH_PP_CONTROL;
553 else
554 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
555}
556
f0f59a00
VS
557static i915_reg_t
558_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b
JN
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
b0a08bec
VK
562 if (IS_BROXTON(dev))
563 return BXT_PP_STATUS(0);
564 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
565 return PCH_PP_STATUS;
566 else
567 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568}
569
01527b31
CT
570/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571 This function only applicable when panel PM state is not to be tracked */
572static int edp_notify_handler(struct notifier_block *this, unsigned long code,
573 void *unused)
574{
575 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
576 edp_notifier);
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
579
580 if (!is_edp(intel_dp) || code != SYS_RESTART)
581 return 0;
582
773538e8 583 pps_lock(intel_dp);
e39b999a 584
01527b31 585 if (IS_VALLEYVIEW(dev)) {
e39b999a 586 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
f0f59a00 587 i915_reg_t pp_ctrl_reg, pp_div_reg;
649636ef 588 u32 pp_div;
e39b999a 589
01527b31
CT
590 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
591 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
592 pp_div = I915_READ(pp_div_reg);
593 pp_div &= PP_REFERENCE_DIVIDER_MASK;
594
595 /* 0x1F write to PP_DIV_REG sets max cycle delay */
596 I915_WRITE(pp_div_reg, pp_div | 0x1F);
597 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
598 msleep(intel_dp->panel_power_cycle_delay);
599 }
600
773538e8 601 pps_unlock(intel_dp);
e39b999a 602
01527b31
CT
603 return 0;
604}
605
4be73780 606static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 607{
30add22d 608 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
609 struct drm_i915_private *dev_priv = dev->dev_private;
610
e39b999a
VS
611 lockdep_assert_held(&dev_priv->pps_mutex);
612
9a42356b
VS
613 if (IS_VALLEYVIEW(dev) &&
614 intel_dp->pps_pipe == INVALID_PIPE)
615 return false;
616
bf13e81b 617 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
618}
619
4be73780 620static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 621{
30add22d 622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
623 struct drm_i915_private *dev_priv = dev->dev_private;
624
e39b999a
VS
625 lockdep_assert_held(&dev_priv->pps_mutex);
626
9a42356b
VS
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
773538e8 631 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
632}
633
9b984dae
KP
634static void
635intel_dp_check_edp(struct intel_dp *intel_dp)
636{
30add22d 637 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 638 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 639
9b984dae
KP
640 if (!is_edp(intel_dp))
641 return;
453c5420 642
4be73780 643 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
644 WARN(1, "eDP powered off while attempting aux channel communication.\n");
645 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
646 I915_READ(_pp_stat_reg(intel_dp)),
647 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
648 }
649}
650
9ee32fea
DV
651static uint32_t
652intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653{
654 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
655 struct drm_device *dev = intel_dig_port->base.base.dev;
656 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 657 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
658 uint32_t status;
659 bool done;
660
ef04f00d 661#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 662 if (has_aux_irq)
b18ac466 663 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 664 msecs_to_jiffies_timeout(10));
9ee32fea
DV
665 else
666 done = wait_for_atomic(C, 10) == 0;
667 if (!done)
668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669 has_aux_irq);
670#undef C
671
672 return status;
673}
674
ec5b01dd 675static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 676{
174edf1f
PZ
677 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
678 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 679
ec5b01dd
DL
680 /*
681 * The clock divider is based off the hrawclk, and would like to run at
682 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 683 */
ec5b01dd
DL
684 return index ? 0 : intel_hrawclk(dev) / 2;
685}
686
687static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688{
689 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 691 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
692
693 if (index)
694 return 0;
695
696 if (intel_dig_port->port == PORT_A) {
05024da3
VS
697 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
698
ec5b01dd
DL
699 } else {
700 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
701 }
702}
703
704static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705{
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
708 struct drm_i915_private *dev_priv = dev->dev_private;
709
710 if (intel_dig_port->port == PORT_A) {
711 if (index)
712 return 0;
05024da3 713 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
714 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
715 /* Workaround for non-ULT HSW */
bc86625a
CW
716 switch (index) {
717 case 0: return 63;
718 case 1: return 72;
719 default: return 0;
720 }
ec5b01dd 721 } else {
bc86625a 722 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 723 }
b84a1cf8
RV
724}
725
ec5b01dd
DL
726static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
727{
728 return index ? 0 : 100;
729}
730
b6b5e383
DL
731static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732{
733 /*
734 * SKL doesn't need us to program the AUX clock divider (Hardware will
735 * derive the clock from CDCLK automatically). We still implement the
736 * get_aux_clock_divider vfunc to plug-in into the existing code.
737 */
738 return index ? 0 : 1;
739}
740
5ed12a19
DL
741static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742 bool has_aux_irq,
743 int send_bytes,
744 uint32_t aux_clock_divider)
745{
746 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
747 struct drm_device *dev = intel_dig_port->base.base.dev;
748 uint32_t precharge, timeout;
749
750 if (IS_GEN6(dev))
751 precharge = 3;
752 else
753 precharge = 5;
754
f3c6a3a7 755 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5ed12a19
DL
756 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
757 else
758 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
759
760 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 761 DP_AUX_CH_CTL_DONE |
5ed12a19 762 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 763 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 764 timeout |
788d4433 765 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
766 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
767 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 768 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
769}
770
b9ca5fad
DL
771static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
772 bool has_aux_irq,
773 int send_bytes,
774 uint32_t unused)
775{
776 return DP_AUX_CH_CTL_SEND_BUSY |
777 DP_AUX_CH_CTL_DONE |
778 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
779 DP_AUX_CH_CTL_TIME_OUT_ERROR |
780 DP_AUX_CH_CTL_TIME_OUT_1600us |
781 DP_AUX_CH_CTL_RECEIVE_ERROR |
782 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
783 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
784}
785
b84a1cf8
RV
786static int
787intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 788 const uint8_t *send, int send_bytes,
b84a1cf8
RV
789 uint8_t *recv, int recv_size)
790{
791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
792 struct drm_device *dev = intel_dig_port->base.base.dev;
793 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 794 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
bc86625a 795 uint32_t aux_clock_divider;
b84a1cf8
RV
796 int i, ret, recv_bytes;
797 uint32_t status;
5ed12a19 798 int try, clock = 0;
4e6b788c 799 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
800 bool vdd;
801
773538e8 802 pps_lock(intel_dp);
e39b999a 803
72c3500a
VS
804 /*
805 * We will be called with VDD already enabled for dpcd/edid/oui reads.
806 * In such cases we want to leave VDD enabled and it's up to upper layers
807 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808 * ourselves.
809 */
1e0560e0 810 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
811
812 /* dp aux is extremely sensitive to irq latency, hence request the
813 * lowest possible wakeup latency and so prevent the cpu from going into
814 * deep sleep states.
815 */
816 pm_qos_update_request(&dev_priv->pm_qos, 0);
817
818 intel_dp_check_edp(intel_dp);
5eb08b69 819
11bee43e
JB
820 /* Try to wait for any previous AUX channel activity */
821 for (try = 0; try < 3; try++) {
ef04f00d 822 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
823 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
824 break;
825 msleep(1);
826 }
827
828 if (try == 3) {
02196c77
MK
829 static u32 last_status = -1;
830 const u32 status = I915_READ(ch_ctl);
831
832 if (status != last_status) {
833 WARN(1, "dp_aux_ch not started status 0x%08x\n",
834 status);
835 last_status = status;
836 }
837
9ee32fea
DV
838 ret = -EBUSY;
839 goto out;
4f7f7b7e
CW
840 }
841
46a5ae9f
PZ
842 /* Only 5 data registers! */
843 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
844 ret = -E2BIG;
845 goto out;
846 }
847
ec5b01dd 848 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
849 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
850 has_aux_irq,
851 send_bytes,
852 aux_clock_divider);
5ed12a19 853
bc86625a
CW
854 /* Must try at least 3 times according to DP spec */
855 for (try = 0; try < 5; try++) {
856 /* Load the send data into the aux channel data registers */
857 for (i = 0; i < send_bytes; i += 4)
330e20ec 858 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
a4f1289e
RV
859 intel_dp_pack_aux(send + i,
860 send_bytes - i));
bc86625a
CW
861
862 /* Send the command and wait for it to complete */
5ed12a19 863 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
864
865 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
866
867 /* Clear done status and any errors */
868 I915_WRITE(ch_ctl,
869 status |
870 DP_AUX_CH_CTL_DONE |
871 DP_AUX_CH_CTL_TIME_OUT_ERROR |
872 DP_AUX_CH_CTL_RECEIVE_ERROR);
873
74ebf294 874 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 875 continue;
74ebf294
TP
876
877 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
878 * 400us delay required for errors and timeouts
879 * Timeout errors from the HW already meet this
880 * requirement so skip to next iteration
881 */
882 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
883 usleep_range(400, 500);
bc86625a 884 continue;
74ebf294 885 }
bc86625a 886 if (status & DP_AUX_CH_CTL_DONE)
e058c945 887 goto done;
bc86625a 888 }
a4fc5ed6
KP
889 }
890
a4fc5ed6 891 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 892 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
893 ret = -EBUSY;
894 goto out;
a4fc5ed6
KP
895 }
896
e058c945 897done:
a4fc5ed6
KP
898 /* Check for timeout or receive error.
899 * Timeouts occur when the sink is not connected
900 */
a5b3da54 901 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 902 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
903 ret = -EIO;
904 goto out;
a5b3da54 905 }
1ae8c0a5
KP
906
907 /* Timeouts occur when the device isn't connected, so they're
908 * "normal" -- don't fill the kernel log with these */
a5b3da54 909 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 910 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
911 ret = -ETIMEDOUT;
912 goto out;
a4fc5ed6
KP
913 }
914
915 /* Unload any bytes sent back from the other side */
916 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
917 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
918 if (recv_bytes > recv_size)
919 recv_bytes = recv_size;
0206e353 920
4f7f7b7e 921 for (i = 0; i < recv_bytes; i += 4)
330e20ec 922 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
a4f1289e 923 recv + i, recv_bytes - i);
a4fc5ed6 924
9ee32fea
DV
925 ret = recv_bytes;
926out:
927 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
928
884f19e9
JN
929 if (vdd)
930 edp_panel_vdd_off(intel_dp, false);
931
773538e8 932 pps_unlock(intel_dp);
e39b999a 933
9ee32fea 934 return ret;
a4fc5ed6
KP
935}
936
a6c8aff0
JN
937#define BARE_ADDRESS_SIZE 3
938#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
939static ssize_t
940intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 941{
9d1a1031
JN
942 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
943 uint8_t txbuf[20], rxbuf[20];
944 size_t txsize, rxsize;
a4fc5ed6 945 int ret;
a4fc5ed6 946
d2d9cbbd
VS
947 txbuf[0] = (msg->request << 4) |
948 ((msg->address >> 16) & 0xf);
949 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
950 txbuf[2] = msg->address & 0xff;
951 txbuf[3] = msg->size - 1;
46a5ae9f 952
9d1a1031
JN
953 switch (msg->request & ~DP_AUX_I2C_MOT) {
954 case DP_AUX_NATIVE_WRITE:
955 case DP_AUX_I2C_WRITE:
c1e74122 956 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 957 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 958 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 959
9d1a1031
JN
960 if (WARN_ON(txsize > 20))
961 return -E2BIG;
a4fc5ed6 962
9d1a1031 963 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 964
9d1a1031
JN
965 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
966 if (ret > 0) {
967 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 968
a1ddefd8
JN
969 if (ret > 1) {
970 /* Number of bytes written in a short write. */
971 ret = clamp_t(int, rxbuf[1], 0, msg->size);
972 } else {
973 /* Return payload size. */
974 ret = msg->size;
975 }
9d1a1031
JN
976 }
977 break;
46a5ae9f 978
9d1a1031
JN
979 case DP_AUX_NATIVE_READ:
980 case DP_AUX_I2C_READ:
a6c8aff0 981 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 982 rxsize = msg->size + 1;
a4fc5ed6 983
9d1a1031
JN
984 if (WARN_ON(rxsize > 20))
985 return -E2BIG;
a4fc5ed6 986
9d1a1031
JN
987 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988 if (ret > 0) {
989 msg->reply = rxbuf[0] >> 4;
990 /*
991 * Assume happy day, and copy the data. The caller is
992 * expected to check msg->reply before touching it.
993 *
994 * Return payload size.
995 */
996 ret--;
997 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 998 }
9d1a1031
JN
999 break;
1000
1001 default:
1002 ret = -EINVAL;
1003 break;
a4fc5ed6 1004 }
f51a44b9 1005
9d1a1031 1006 return ret;
a4fc5ed6
KP
1007}
1008
f0f59a00
VS
1009static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1010 enum port port)
da00bdcf
VS
1011{
1012 switch (port) {
1013 case PORT_B:
1014 case PORT_C:
1015 case PORT_D:
1016 return DP_AUX_CH_CTL(port);
1017 default:
1018 MISSING_CASE(port);
1019 return DP_AUX_CH_CTL(PORT_B);
1020 }
1021}
1022
f0f59a00
VS
1023static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1024 enum port port, int index)
330e20ec
VS
1025{
1026 switch (port) {
1027 case PORT_B:
1028 case PORT_C:
1029 case PORT_D:
1030 return DP_AUX_CH_DATA(port, index);
1031 default:
1032 MISSING_CASE(port);
1033 return DP_AUX_CH_DATA(PORT_B, index);
1034 }
1035}
1036
f0f59a00
VS
1037static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1038 enum port port)
da00bdcf
VS
1039{
1040 switch (port) {
1041 case PORT_A:
1042 return DP_AUX_CH_CTL(port);
1043 case PORT_B:
1044 case PORT_C:
1045 case PORT_D:
1046 return PCH_DP_AUX_CH_CTL(port);
1047 default:
1048 MISSING_CASE(port);
1049 return DP_AUX_CH_CTL(PORT_A);
1050 }
1051}
1052
f0f59a00
VS
1053static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1054 enum port port, int index)
330e20ec
VS
1055{
1056 switch (port) {
1057 case PORT_A:
1058 return DP_AUX_CH_DATA(port, index);
1059 case PORT_B:
1060 case PORT_C:
1061 case PORT_D:
1062 return PCH_DP_AUX_CH_DATA(port, index);
1063 default:
1064 MISSING_CASE(port);
1065 return DP_AUX_CH_DATA(PORT_A, index);
1066 }
1067}
1068
da00bdcf
VS
1069/*
1070 * On SKL we don't have Aux for port E so we rely
1071 * on VBT to set a proper alternate aux channel.
1072 */
1073static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1074{
1075 const struct ddi_vbt_port_info *info =
1076 &dev_priv->vbt.ddi_port_info[PORT_E];
1077
1078 switch (info->alternate_aux_channel) {
1079 case DP_AUX_A:
1080 return PORT_A;
1081 case DP_AUX_B:
1082 return PORT_B;
1083 case DP_AUX_C:
1084 return PORT_C;
1085 case DP_AUX_D:
1086 return PORT_D;
1087 default:
1088 MISSING_CASE(info->alternate_aux_channel);
1089 return PORT_A;
1090 }
1091}
1092
f0f59a00
VS
1093static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1094 enum port port)
da00bdcf
VS
1095{
1096 if (port == PORT_E)
1097 port = skl_porte_aux_port(dev_priv);
1098
1099 switch (port) {
1100 case PORT_A:
1101 case PORT_B:
1102 case PORT_C:
1103 case PORT_D:
1104 return DP_AUX_CH_CTL(port);
1105 default:
1106 MISSING_CASE(port);
1107 return DP_AUX_CH_CTL(PORT_A);
1108 }
1109}
1110
f0f59a00
VS
1111static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1112 enum port port, int index)
330e20ec
VS
1113{
1114 if (port == PORT_E)
1115 port = skl_porte_aux_port(dev_priv);
1116
1117 switch (port) {
1118 case PORT_A:
1119 case PORT_B:
1120 case PORT_C:
1121 case PORT_D:
1122 return DP_AUX_CH_DATA(port, index);
1123 default:
1124 MISSING_CASE(port);
1125 return DP_AUX_CH_DATA(PORT_A, index);
1126 }
1127}
1128
f0f59a00
VS
1129static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1130 enum port port)
330e20ec
VS
1131{
1132 if (INTEL_INFO(dev_priv)->gen >= 9)
1133 return skl_aux_ctl_reg(dev_priv, port);
1134 else if (HAS_PCH_SPLIT(dev_priv))
1135 return ilk_aux_ctl_reg(dev_priv, port);
1136 else
1137 return g4x_aux_ctl_reg(dev_priv, port);
1138}
1139
f0f59a00
VS
1140static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1141 enum port port, int index)
330e20ec
VS
1142{
1143 if (INTEL_INFO(dev_priv)->gen >= 9)
1144 return skl_aux_data_reg(dev_priv, port, index);
1145 else if (HAS_PCH_SPLIT(dev_priv))
1146 return ilk_aux_data_reg(dev_priv, port, index);
1147 else
1148 return g4x_aux_data_reg(dev_priv, port, index);
1149}
1150
1151static void intel_aux_reg_init(struct intel_dp *intel_dp)
1152{
1153 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1154 enum port port = dp_to_dig_port(intel_dp)->port;
1155 int i;
1156
1157 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1158 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1159 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1160}
1161
9d1a1031 1162static void
a121f4e5
VS
1163intel_dp_aux_fini(struct intel_dp *intel_dp)
1164{
1165 drm_dp_aux_unregister(&intel_dp->aux);
1166 kfree(intel_dp->aux.name);
1167}
1168
1169static int
9d1a1031
JN
1170intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1171{
1172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1173 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1174 enum port port = intel_dig_port->port;
ab2c0672
DA
1175 int ret;
1176
330e20ec 1177 intel_aux_reg_init(intel_dp);
8316f337 1178
a121f4e5
VS
1179 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1180 if (!intel_dp->aux.name)
1181 return -ENOMEM;
1182
9d1a1031
JN
1183 intel_dp->aux.dev = dev->dev;
1184 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1185
a121f4e5
VS
1186 DRM_DEBUG_KMS("registering %s bus for %s\n",
1187 intel_dp->aux.name,
0b99836f 1188 connector->base.kdev->kobj.name);
8316f337 1189
4f71d0cb 1190 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1191 if (ret < 0) {
4f71d0cb 1192 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
a121f4e5
VS
1193 intel_dp->aux.name, ret);
1194 kfree(intel_dp->aux.name);
1195 return ret;
ab2c0672 1196 }
8a5e6aeb 1197
0b99836f
JN
1198 ret = sysfs_create_link(&connector->base.kdev->kobj,
1199 &intel_dp->aux.ddc.dev.kobj,
1200 intel_dp->aux.ddc.dev.kobj.name);
1201 if (ret < 0) {
a121f4e5
VS
1202 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1203 intel_dp->aux.name, ret);
1204 intel_dp_aux_fini(intel_dp);
1205 return ret;
ab2c0672 1206 }
a121f4e5
VS
1207
1208 return 0;
a4fc5ed6
KP
1209}
1210
80f65de3
ID
1211static void
1212intel_dp_connector_unregister(struct intel_connector *intel_connector)
1213{
1214 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1215
0e32b39c
DA
1216 if (!intel_connector->mst_port)
1217 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1218 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1219 intel_connector_unregister(intel_connector);
1220}
1221
5416d871 1222static void
840b32b7 1223skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1224{
1225 u32 ctrl1;
1226
dd3cd74a
ACO
1227 memset(&pipe_config->dpll_hw_state, 0,
1228 sizeof(pipe_config->dpll_hw_state));
1229
5416d871
DL
1230 pipe_config->ddi_pll_sel = SKL_DPLL0;
1231 pipe_config->dpll_hw_state.cfgcr1 = 0;
1232 pipe_config->dpll_hw_state.cfgcr2 = 0;
1233
1234 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1235 switch (pipe_config->port_clock / 2) {
c3346ef6 1236 case 81000:
71cd8423 1237 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1238 SKL_DPLL0);
1239 break;
c3346ef6 1240 case 135000:
71cd8423 1241 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1242 SKL_DPLL0);
1243 break;
c3346ef6 1244 case 270000:
71cd8423 1245 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1246 SKL_DPLL0);
1247 break;
c3346ef6 1248 case 162000:
71cd8423 1249 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1250 SKL_DPLL0);
1251 break;
1252 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1253 results in CDCLK change. Need to handle the change of CDCLK by
1254 disabling pipes and re-enabling them */
1255 case 108000:
71cd8423 1256 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1257 SKL_DPLL0);
1258 break;
1259 case 216000:
71cd8423 1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1261 SKL_DPLL0);
1262 break;
1263
5416d871
DL
1264 }
1265 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1266}
1267
6fa2d197 1268void
840b32b7 1269hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1270{
ee46f3c7
ACO
1271 memset(&pipe_config->dpll_hw_state, 0,
1272 sizeof(pipe_config->dpll_hw_state));
1273
840b32b7
VS
1274 switch (pipe_config->port_clock / 2) {
1275 case 81000:
0e50338c
DV
1276 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1277 break;
840b32b7 1278 case 135000:
0e50338c
DV
1279 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1280 break;
840b32b7 1281 case 270000:
0e50338c
DV
1282 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1283 break;
1284 }
1285}
1286
fc0f8e25 1287static int
12f6a2e2 1288intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1289{
94ca719e
VS
1290 if (intel_dp->num_sink_rates) {
1291 *sink_rates = intel_dp->sink_rates;
1292 return intel_dp->num_sink_rates;
fc0f8e25 1293 }
12f6a2e2
VS
1294
1295 *sink_rates = default_rates;
1296
1297 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1298}
1299
e588fa18 1300bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1301{
e588fa18
ACO
1302 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1303 struct drm_device *dev = dig_port->base.base.dev;
1304
ed63baaf 1305 /* WaDisableHBR2:skl */
e87a005d 1306 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1307 return false;
1308
1309 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1310 (INTEL_INFO(dev)->gen >= 9))
1311 return true;
1312 else
1313 return false;
1314}
1315
a8f3ef61 1316static int
e588fa18 1317intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1318{
e588fa18
ACO
1319 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1320 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1321 int size;
1322
64987fc5
SJ
1323 if (IS_BROXTON(dev)) {
1324 *source_rates = bxt_rates;
af7080f5 1325 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1326 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1327 *source_rates = skl_rates;
af7080f5
TS
1328 size = ARRAY_SIZE(skl_rates);
1329 } else {
1330 *source_rates = default_rates;
1331 size = ARRAY_SIZE(default_rates);
a8f3ef61 1332 }
636280ba 1333
ed63baaf 1334 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1335 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1336 size--;
636280ba 1337
af7080f5 1338 return size;
a8f3ef61
SJ
1339}
1340
c6bb3538
DV
1341static void
1342intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1343 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1344{
1345 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1346 const struct dp_link_dpll *divisor = NULL;
1347 int i, count = 0;
c6bb3538
DV
1348
1349 if (IS_G4X(dev)) {
9dd4ffdf
CML
1350 divisor = gen4_dpll;
1351 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1352 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1353 divisor = pch_dpll;
1354 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1355 } else if (IS_CHERRYVIEW(dev)) {
1356 divisor = chv_dpll;
1357 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1358 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1359 divisor = vlv_dpll;
1360 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1361 }
9dd4ffdf
CML
1362
1363 if (divisor && count) {
1364 for (i = 0; i < count; i++) {
840b32b7 1365 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1366 pipe_config->dpll = divisor[i].dpll;
1367 pipe_config->clock_set = true;
1368 break;
1369 }
1370 }
c6bb3538
DV
1371 }
1372}
1373
2ecae76a
VS
1374static int intersect_rates(const int *source_rates, int source_len,
1375 const int *sink_rates, int sink_len,
94ca719e 1376 int *common_rates)
a8f3ef61
SJ
1377{
1378 int i = 0, j = 0, k = 0;
1379
a8f3ef61
SJ
1380 while (i < source_len && j < sink_len) {
1381 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1382 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1383 return k;
94ca719e 1384 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1385 ++k;
1386 ++i;
1387 ++j;
1388 } else if (source_rates[i] < sink_rates[j]) {
1389 ++i;
1390 } else {
1391 ++j;
1392 }
1393 }
1394 return k;
1395}
1396
94ca719e
VS
1397static int intel_dp_common_rates(struct intel_dp *intel_dp,
1398 int *common_rates)
2ecae76a 1399{
2ecae76a
VS
1400 const int *source_rates, *sink_rates;
1401 int source_len, sink_len;
1402
1403 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1404 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1405
1406 return intersect_rates(source_rates, source_len,
1407 sink_rates, sink_len,
94ca719e 1408 common_rates);
2ecae76a
VS
1409}
1410
0336400e
VS
1411static void snprintf_int_array(char *str, size_t len,
1412 const int *array, int nelem)
1413{
1414 int i;
1415
1416 str[0] = '\0';
1417
1418 for (i = 0; i < nelem; i++) {
b2f505be 1419 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1420 if (r >= len)
1421 return;
1422 str += r;
1423 len -= r;
1424 }
1425}
1426
1427static void intel_dp_print_rates(struct intel_dp *intel_dp)
1428{
0336400e 1429 const int *source_rates, *sink_rates;
94ca719e
VS
1430 int source_len, sink_len, common_len;
1431 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1432 char str[128]; /* FIXME: too big for stack? */
1433
1434 if ((drm_debug & DRM_UT_KMS) == 0)
1435 return;
1436
e588fa18 1437 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1438 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1439 DRM_DEBUG_KMS("source rates: %s\n", str);
1440
1441 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1442 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1443 DRM_DEBUG_KMS("sink rates: %s\n", str);
1444
94ca719e
VS
1445 common_len = intel_dp_common_rates(intel_dp, common_rates);
1446 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1447 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1448}
1449
f4896f15 1450static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1451{
1452 int i = 0;
1453
1454 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1455 if (find == rates[i])
1456 break;
1457
1458 return i;
1459}
1460
50fec21a
VS
1461int
1462intel_dp_max_link_rate(struct intel_dp *intel_dp)
1463{
1464 int rates[DP_MAX_SUPPORTED_RATES] = {};
1465 int len;
1466
94ca719e 1467 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1468 if (WARN_ON(len <= 0))
1469 return 162000;
1470
1471 return rates[rate_to_index(0, rates) - 1];
1472}
1473
ed4e9c1d
VS
1474int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1475{
94ca719e 1476 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1477}
1478
94223d04
ACO
1479void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1480 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1481{
1482 if (intel_dp->num_sink_rates) {
1483 *link_bw = 0;
1484 *rate_select =
1485 intel_dp_rate_select(intel_dp, port_clock);
1486 } else {
1487 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1488 *rate_select = 0;
1489 }
1490}
1491
00c09d70 1492bool
5bfe2ac0 1493intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1494 struct intel_crtc_state *pipe_config)
a4fc5ed6 1495{
5bfe2ac0 1496 struct drm_device *dev = encoder->base.dev;
36008365 1497 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1498 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1499 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1500 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1501 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1502 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1503 int lane_count, clock;
56071a20 1504 int min_lane_count = 1;
eeb6324d 1505 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1506 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1507 int min_clock = 0;
a8f3ef61 1508 int max_clock;
083f9560 1509 int bpp, mode_rate;
ff9a6750 1510 int link_avail, link_clock;
94ca719e
VS
1511 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1512 int common_len;
04a60f9f 1513 uint8_t link_bw, rate_select;
a8f3ef61 1514
94ca719e 1515 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1516
1517 /* No common link rates between source and sink */
94ca719e 1518 WARN_ON(common_len <= 0);
a8f3ef61 1519
94ca719e 1520 max_clock = common_len - 1;
a4fc5ed6 1521
bc7d38a4 1522 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1523 pipe_config->has_pch_encoder = true;
1524
03afc4a2 1525 pipe_config->has_dp_encoder = true;
f769cd24 1526 pipe_config->has_drrs = false;
9fcb1704 1527 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1528
dd06f90e
JN
1529 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1530 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1531 adjusted_mode);
a1b2278e
CK
1532
1533 if (INTEL_INFO(dev)->gen >= 9) {
1534 int ret;
e435d6e5 1535 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1536 if (ret)
1537 return ret;
1538 }
1539
b5667627 1540 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1541 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1542 intel_connector->panel.fitting_mode);
1543 else
b074cec8
JB
1544 intel_pch_panel_fitting(intel_crtc, pipe_config,
1545 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1546 }
1547
cb1793ce 1548 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1549 return false;
1550
083f9560 1551 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1552 "max bw %d pixel clock %iKHz\n",
94ca719e 1553 max_lane_count, common_rates[max_clock],
241bfc38 1554 adjusted_mode->crtc_clock);
083f9560 1555
36008365
DV
1556 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1557 * bpc in between. */
3e7ca985 1558 bpp = pipe_config->pipe_bpp;
56071a20 1559 if (is_edp(intel_dp)) {
22ce5628
TS
1560
1561 /* Get bpp from vbt only for panels that dont have bpp in edid */
1562 if (intel_connector->base.display_info.bpc == 0 &&
1563 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1564 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1565 dev_priv->vbt.edp_bpp);
1566 bpp = dev_priv->vbt.edp_bpp;
1567 }
1568
344c5bbc
JN
1569 /*
1570 * Use the maximum clock and number of lanes the eDP panel
1571 * advertizes being capable of. The panels are generally
1572 * designed to support only a single clock and lane
1573 * configuration, and typically these values correspond to the
1574 * native resolution of the panel.
1575 */
1576 min_lane_count = max_lane_count;
1577 min_clock = max_clock;
7984211e 1578 }
657445fe 1579
36008365 1580 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1581 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1582 bpp);
36008365 1583
c6930992 1584 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1585 for (lane_count = min_lane_count;
1586 lane_count <= max_lane_count;
1587 lane_count <<= 1) {
1588
94ca719e 1589 link_clock = common_rates[clock];
36008365
DV
1590 link_avail = intel_dp_max_data_rate(link_clock,
1591 lane_count);
1592
1593 if (mode_rate <= link_avail) {
1594 goto found;
1595 }
1596 }
1597 }
1598 }
c4867936 1599
36008365 1600 return false;
3685a8f3 1601
36008365 1602found:
55bc60db
VS
1603 if (intel_dp->color_range_auto) {
1604 /*
1605 * See:
1606 * CEA-861-E - 5.1 Default Encoding Parameters
1607 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1608 */
0f2a2a75
VS
1609 pipe_config->limited_color_range =
1610 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1611 } else {
1612 pipe_config->limited_color_range =
1613 intel_dp->limited_color_range;
55bc60db
VS
1614 }
1615
90a6b7b0 1616 pipe_config->lane_count = lane_count;
a8f3ef61 1617
657445fe 1618 pipe_config->pipe_bpp = bpp;
94ca719e 1619 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1620
04a60f9f
VS
1621 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1622 &link_bw, &rate_select);
1623
1624 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1625 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1626 pipe_config->port_clock, bpp);
36008365
DV
1627 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1628 mode_rate, link_avail);
a4fc5ed6 1629
03afc4a2 1630 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1631 adjusted_mode->crtc_clock,
1632 pipe_config->port_clock,
03afc4a2 1633 &pipe_config->dp_m_n);
9d1a455b 1634
439d7ac0 1635 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1636 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1637 pipe_config->has_drrs = true;
439d7ac0
PB
1638 intel_link_compute_m_n(bpp, lane_count,
1639 intel_connector->panel.downclock_mode->clock,
1640 pipe_config->port_clock,
1641 &pipe_config->dp_m2_n2);
1642 }
1643
ef11bdb3 1644 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1645 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1646 else if (IS_BROXTON(dev))
1647 /* handled in ddi */;
5416d871 1648 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1649 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1650 else
840b32b7 1651 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1652
03afc4a2 1653 return true;
a4fc5ed6
KP
1654}
1655
901c2daf
VS
1656void intel_dp_set_link_params(struct intel_dp *intel_dp,
1657 const struct intel_crtc_state *pipe_config)
1658{
1659 intel_dp->link_rate = pipe_config->port_clock;
1660 intel_dp->lane_count = pipe_config->lane_count;
1661}
1662
8ac33ed3 1663static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1664{
b934223d 1665 struct drm_device *dev = encoder->base.dev;
417e822d 1666 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1667 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1668 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1669 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1670 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1671
901c2daf
VS
1672 intel_dp_set_link_params(intel_dp, crtc->config);
1673
417e822d 1674 /*
1a2eb460 1675 * There are four kinds of DP registers:
417e822d
KP
1676 *
1677 * IBX PCH
1a2eb460
KP
1678 * SNB CPU
1679 * IVB CPU
417e822d
KP
1680 * CPT PCH
1681 *
1682 * IBX PCH and CPU are the same for almost everything,
1683 * except that the CPU DP PLL is configured in this
1684 * register
1685 *
1686 * CPT PCH is quite different, having many bits moved
1687 * to the TRANS_DP_CTL register instead. That
1688 * configuration happens (oddly) in ironlake_pch_enable
1689 */
9c9e7927 1690
417e822d
KP
1691 /* Preserve the BIOS-computed detected bit. This is
1692 * supposed to be read-only.
1693 */
1694 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1695
417e822d 1696 /* Handle DP bits in common between all three register formats */
417e822d 1697 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1698 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1699
417e822d 1700 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1701
39e5fa88 1702 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1703 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1704 intel_dp->DP |= DP_SYNC_HS_HIGH;
1705 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1706 intel_dp->DP |= DP_SYNC_VS_HIGH;
1707 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1708
6aba5b6c 1709 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1710 intel_dp->DP |= DP_ENHANCED_FRAMING;
1711
7c62a164 1712 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1713 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1714 u32 trans_dp;
1715
39e5fa88 1716 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1717
1718 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1719 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1720 trans_dp |= TRANS_DP_ENH_FRAMING;
1721 else
1722 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1723 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1724 } else {
0f2a2a75
VS
1725 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1726 crtc->config->limited_color_range)
1727 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1728
1729 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1730 intel_dp->DP |= DP_SYNC_HS_HIGH;
1731 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1732 intel_dp->DP |= DP_SYNC_VS_HIGH;
1733 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1734
6aba5b6c 1735 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1736 intel_dp->DP |= DP_ENHANCED_FRAMING;
1737
39e5fa88 1738 if (IS_CHERRYVIEW(dev))
44f37d1f 1739 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1740 else if (crtc->pipe == PIPE_B)
1741 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1742 }
a4fc5ed6
KP
1743}
1744
ffd6749d
PZ
1745#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1746#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1747
1a5ef5b7
PZ
1748#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1749#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1750
ffd6749d
PZ
1751#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1752#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1753
4be73780 1754static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1755 u32 mask,
1756 u32 value)
bd943159 1757{
30add22d 1758 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1759 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1760 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 1761
e39b999a
VS
1762 lockdep_assert_held(&dev_priv->pps_mutex);
1763
bf13e81b
JN
1764 pp_stat_reg = _pp_stat_reg(intel_dp);
1765 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1766
99ea7127 1767 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1768 mask, value,
1769 I915_READ(pp_stat_reg),
1770 I915_READ(pp_ctrl_reg));
32ce697c 1771
453c5420 1772 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1773 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1774 I915_READ(pp_stat_reg),
1775 I915_READ(pp_ctrl_reg));
32ce697c 1776 }
54c136d4
CW
1777
1778 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1779}
32ce697c 1780
4be73780 1781static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1782{
1783 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1784 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1785}
1786
4be73780 1787static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1788{
1789 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1790 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1791}
1792
4be73780 1793static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1794{
1795 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1796
1797 /* When we disable the VDD override bit last we have to do the manual
1798 * wait. */
1799 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1800 intel_dp->panel_power_cycle_delay);
1801
4be73780 1802 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1803}
1804
4be73780 1805static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1806{
1807 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1808 intel_dp->backlight_on_delay);
1809}
1810
4be73780 1811static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1812{
1813 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1814 intel_dp->backlight_off_delay);
1815}
99ea7127 1816
832dd3c1
KP
1817/* Read the current pp_control value, unlocking the register if it
1818 * is locked
1819 */
1820
453c5420 1821static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1822{
453c5420
JB
1823 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1824 struct drm_i915_private *dev_priv = dev->dev_private;
1825 u32 control;
832dd3c1 1826
e39b999a
VS
1827 lockdep_assert_held(&dev_priv->pps_mutex);
1828
bf13e81b 1829 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1830 if (!IS_BROXTON(dev)) {
1831 control &= ~PANEL_UNLOCK_MASK;
1832 control |= PANEL_UNLOCK_REGS;
1833 }
832dd3c1 1834 return control;
bd943159
KP
1835}
1836
951468f3
VS
1837/*
1838 * Must be paired with edp_panel_vdd_off().
1839 * Must hold pps_mutex around the whole on/off sequence.
1840 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1841 */
1e0560e0 1842static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1843{
30add22d 1844 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1845 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1846 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1847 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1848 enum intel_display_power_domain power_domain;
5d613501 1849 u32 pp;
f0f59a00 1850 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 1851 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1852
e39b999a
VS
1853 lockdep_assert_held(&dev_priv->pps_mutex);
1854
97af61f5 1855 if (!is_edp(intel_dp))
adddaaf4 1856 return false;
bd943159 1857
2c623c11 1858 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1859 intel_dp->want_panel_vdd = true;
99ea7127 1860
4be73780 1861 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1862 return need_to_disable;
b0665d57 1863
25f78f58 1864 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 1865 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1866
3936fcf4
VS
1867 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1868 port_name(intel_dig_port->port));
bd943159 1869
4be73780
DV
1870 if (!edp_have_panel_power(intel_dp))
1871 wait_panel_power_cycle(intel_dp);
99ea7127 1872
453c5420 1873 pp = ironlake_get_pp_control(intel_dp);
5d613501 1874 pp |= EDP_FORCE_VDD;
ebf33b18 1875
bf13e81b
JN
1876 pp_stat_reg = _pp_stat_reg(intel_dp);
1877 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1878
1879 I915_WRITE(pp_ctrl_reg, pp);
1880 POSTING_READ(pp_ctrl_reg);
1881 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1882 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1883 /*
1884 * If the panel wasn't on, delay before accessing aux channel
1885 */
4be73780 1886 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1887 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1888 port_name(intel_dig_port->port));
f01eca2e 1889 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1890 }
adddaaf4
JN
1891
1892 return need_to_disable;
1893}
1894
951468f3
VS
1895/*
1896 * Must be paired with intel_edp_panel_vdd_off() or
1897 * intel_edp_panel_off().
1898 * Nested calls to these functions are not allowed since
1899 * we drop the lock. Caller must use some higher level
1900 * locking to prevent nested calls from other threads.
1901 */
b80d6c78 1902void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1903{
c695b6b6 1904 bool vdd;
adddaaf4 1905
c695b6b6
VS
1906 if (!is_edp(intel_dp))
1907 return;
1908
773538e8 1909 pps_lock(intel_dp);
c695b6b6 1910 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1911 pps_unlock(intel_dp);
c695b6b6 1912
e2c719b7 1913 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1914 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1915}
1916
4be73780 1917static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1918{
30add22d 1919 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1920 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1921 struct intel_digital_port *intel_dig_port =
1922 dp_to_dig_port(intel_dp);
1923 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1924 enum intel_display_power_domain power_domain;
5d613501 1925 u32 pp;
f0f59a00 1926 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 1927
e39b999a 1928 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1929
15e899a0 1930 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1931
15e899a0 1932 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1933 return;
b0665d57 1934
3936fcf4
VS
1935 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1936 port_name(intel_dig_port->port));
bd943159 1937
be2c9196
VS
1938 pp = ironlake_get_pp_control(intel_dp);
1939 pp &= ~EDP_FORCE_VDD;
453c5420 1940
be2c9196
VS
1941 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1942 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1943
be2c9196
VS
1944 I915_WRITE(pp_ctrl_reg, pp);
1945 POSTING_READ(pp_ctrl_reg);
90791a5c 1946
be2c9196
VS
1947 /* Make sure sequencer is idle before allowing subsequent activity */
1948 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1949 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1950
be2c9196
VS
1951 if ((pp & POWER_TARGET_ON) == 0)
1952 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1953
25f78f58 1954 power_domain = intel_display_port_aux_power_domain(intel_encoder);
be2c9196 1955 intel_display_power_put(dev_priv, power_domain);
bd943159 1956}
5d613501 1957
4be73780 1958static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1959{
1960 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1961 struct intel_dp, panel_vdd_work);
bd943159 1962
773538e8 1963 pps_lock(intel_dp);
15e899a0
VS
1964 if (!intel_dp->want_panel_vdd)
1965 edp_panel_vdd_off_sync(intel_dp);
773538e8 1966 pps_unlock(intel_dp);
bd943159
KP
1967}
1968
aba86890
ID
1969static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1970{
1971 unsigned long delay;
1972
1973 /*
1974 * Queue the timer to fire a long time from now (relative to the power
1975 * down delay) to keep the panel power up across a sequence of
1976 * operations.
1977 */
1978 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1979 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1980}
1981
951468f3
VS
1982/*
1983 * Must be paired with edp_panel_vdd_on().
1984 * Must hold pps_mutex around the whole on/off sequence.
1985 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1986 */
4be73780 1987static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1988{
e39b999a
VS
1989 struct drm_i915_private *dev_priv =
1990 intel_dp_to_dev(intel_dp)->dev_private;
1991
1992 lockdep_assert_held(&dev_priv->pps_mutex);
1993
97af61f5
KP
1994 if (!is_edp(intel_dp))
1995 return;
5d613501 1996
e2c719b7 1997 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1998 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1999
bd943159
KP
2000 intel_dp->want_panel_vdd = false;
2001
aba86890 2002 if (sync)
4be73780 2003 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2004 else
2005 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2006}
2007
9f0fb5be 2008static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2009{
30add22d 2010 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2011 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 2012 u32 pp;
f0f59a00 2013 i915_reg_t pp_ctrl_reg;
9934c132 2014
9f0fb5be
VS
2015 lockdep_assert_held(&dev_priv->pps_mutex);
2016
97af61f5 2017 if (!is_edp(intel_dp))
bd943159 2018 return;
99ea7127 2019
3936fcf4
VS
2020 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2021 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 2022
e7a89ace
VS
2023 if (WARN(edp_have_panel_power(intel_dp),
2024 "eDP port %c panel power already on\n",
2025 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 2026 return;
9934c132 2027
4be73780 2028 wait_panel_power_cycle(intel_dp);
37c6c9b0 2029
bf13e81b 2030 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2031 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
2032 if (IS_GEN5(dev)) {
2033 /* ILK workaround: disable reset around power sequence */
2034 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2035 I915_WRITE(pp_ctrl_reg, pp);
2036 POSTING_READ(pp_ctrl_reg);
05ce1a49 2037 }
37c6c9b0 2038
1c0ae80a 2039 pp |= POWER_TARGET_ON;
99ea7127
KP
2040 if (!IS_GEN5(dev))
2041 pp |= PANEL_POWER_RESET;
2042
453c5420
JB
2043 I915_WRITE(pp_ctrl_reg, pp);
2044 POSTING_READ(pp_ctrl_reg);
9934c132 2045
4be73780 2046 wait_panel_on(intel_dp);
dce56b3c 2047 intel_dp->last_power_on = jiffies;
9934c132 2048
05ce1a49
KP
2049 if (IS_GEN5(dev)) {
2050 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2051 I915_WRITE(pp_ctrl_reg, pp);
2052 POSTING_READ(pp_ctrl_reg);
05ce1a49 2053 }
9f0fb5be 2054}
e39b999a 2055
9f0fb5be
VS
2056void intel_edp_panel_on(struct intel_dp *intel_dp)
2057{
2058 if (!is_edp(intel_dp))
2059 return;
2060
2061 pps_lock(intel_dp);
2062 edp_panel_on(intel_dp);
773538e8 2063 pps_unlock(intel_dp);
9934c132
JB
2064}
2065
9f0fb5be
VS
2066
2067static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2068{
4e6e1a54
ID
2069 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2070 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2071 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2072 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2073 enum intel_display_power_domain power_domain;
99ea7127 2074 u32 pp;
f0f59a00 2075 i915_reg_t pp_ctrl_reg;
9934c132 2076
9f0fb5be
VS
2077 lockdep_assert_held(&dev_priv->pps_mutex);
2078
97af61f5
KP
2079 if (!is_edp(intel_dp))
2080 return;
37c6c9b0 2081
3936fcf4
VS
2082 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2083 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2084
3936fcf4
VS
2085 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2086 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2087
453c5420 2088 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2089 /* We need to switch off panel power _and_ force vdd, for otherwise some
2090 * panels get very unhappy and cease to work. */
b3064154
PJ
2091 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2092 EDP_BLC_ENABLE);
453c5420 2093
bf13e81b 2094 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2095
849e39f5
PZ
2096 intel_dp->want_panel_vdd = false;
2097
453c5420
JB
2098 I915_WRITE(pp_ctrl_reg, pp);
2099 POSTING_READ(pp_ctrl_reg);
9934c132 2100
dce56b3c 2101 intel_dp->last_power_cycle = jiffies;
4be73780 2102 wait_panel_off(intel_dp);
849e39f5
PZ
2103
2104 /* We got a reference when we enabled the VDD. */
25f78f58 2105 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 2106 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2107}
e39b999a 2108
9f0fb5be
VS
2109void intel_edp_panel_off(struct intel_dp *intel_dp)
2110{
2111 if (!is_edp(intel_dp))
2112 return;
e39b999a 2113
9f0fb5be
VS
2114 pps_lock(intel_dp);
2115 edp_panel_off(intel_dp);
773538e8 2116 pps_unlock(intel_dp);
9934c132
JB
2117}
2118
1250d107
JN
2119/* Enable backlight in the panel power control. */
2120static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2121{
da63a9f2
PZ
2122 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2123 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2124 struct drm_i915_private *dev_priv = dev->dev_private;
2125 u32 pp;
f0f59a00 2126 i915_reg_t pp_ctrl_reg;
32f9d658 2127
01cb9ea6
JB
2128 /*
2129 * If we enable the backlight right away following a panel power
2130 * on, we may see slight flicker as the panel syncs with the eDP
2131 * link. So delay a bit to make sure the image is solid before
2132 * allowing it to appear.
2133 */
4be73780 2134 wait_backlight_on(intel_dp);
e39b999a 2135
773538e8 2136 pps_lock(intel_dp);
e39b999a 2137
453c5420 2138 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2139 pp |= EDP_BLC_ENABLE;
453c5420 2140
bf13e81b 2141 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2142
2143 I915_WRITE(pp_ctrl_reg, pp);
2144 POSTING_READ(pp_ctrl_reg);
e39b999a 2145
773538e8 2146 pps_unlock(intel_dp);
32f9d658
ZW
2147}
2148
1250d107
JN
2149/* Enable backlight PWM and backlight PP control. */
2150void intel_edp_backlight_on(struct intel_dp *intel_dp)
2151{
2152 if (!is_edp(intel_dp))
2153 return;
2154
2155 DRM_DEBUG_KMS("\n");
2156
2157 intel_panel_enable_backlight(intel_dp->attached_connector);
2158 _intel_edp_backlight_on(intel_dp);
2159}
2160
2161/* Disable backlight in the panel power control. */
2162static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2163{
30add22d 2164 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 pp;
f0f59a00 2167 i915_reg_t pp_ctrl_reg;
32f9d658 2168
f01eca2e
KP
2169 if (!is_edp(intel_dp))
2170 return;
2171
773538e8 2172 pps_lock(intel_dp);
e39b999a 2173
453c5420 2174 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2175 pp &= ~EDP_BLC_ENABLE;
453c5420 2176
bf13e81b 2177 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2178
2179 I915_WRITE(pp_ctrl_reg, pp);
2180 POSTING_READ(pp_ctrl_reg);
f7d2323c 2181
773538e8 2182 pps_unlock(intel_dp);
e39b999a
VS
2183
2184 intel_dp->last_backlight_off = jiffies;
f7d2323c 2185 edp_wait_backlight_off(intel_dp);
1250d107 2186}
f7d2323c 2187
1250d107
JN
2188/* Disable backlight PP control and backlight PWM. */
2189void intel_edp_backlight_off(struct intel_dp *intel_dp)
2190{
2191 if (!is_edp(intel_dp))
2192 return;
2193
2194 DRM_DEBUG_KMS("\n");
f7d2323c 2195
1250d107 2196 _intel_edp_backlight_off(intel_dp);
f7d2323c 2197 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2198}
a4fc5ed6 2199
73580fb7
JN
2200/*
2201 * Hook for controlling the panel power control backlight through the bl_power
2202 * sysfs attribute. Take care to handle multiple calls.
2203 */
2204static void intel_edp_backlight_power(struct intel_connector *connector,
2205 bool enable)
2206{
2207 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2208 bool is_enabled;
2209
773538e8 2210 pps_lock(intel_dp);
e39b999a 2211 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2212 pps_unlock(intel_dp);
73580fb7
JN
2213
2214 if (is_enabled == enable)
2215 return;
2216
23ba9373
JN
2217 DRM_DEBUG_KMS("panel power control backlight %s\n",
2218 enable ? "enable" : "disable");
73580fb7
JN
2219
2220 if (enable)
2221 _intel_edp_backlight_on(intel_dp);
2222 else
2223 _intel_edp_backlight_off(intel_dp);
2224}
2225
64e1077a
VS
2226static const char *state_string(bool enabled)
2227{
2228 return enabled ? "on" : "off";
2229}
2230
2231static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2232{
2233 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2234 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2235 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2236
2237 I915_STATE_WARN(cur_state != state,
2238 "DP port %c state assertion failure (expected %s, current %s)\n",
2239 port_name(dig_port->port),
2240 state_string(state), state_string(cur_state));
2241}
2242#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2243
2244static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2245{
2246 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2247
2248 I915_STATE_WARN(cur_state != state,
2249 "eDP PLL state assertion failure (expected %s, current %s)\n",
2250 state_string(state), state_string(cur_state));
2251}
2252#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2253#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2254
2bd2ad64 2255static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2256{
da63a9f2 2257 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2258 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2259 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2260
64e1077a
VS
2261 assert_pipe_disabled(dev_priv, crtc->pipe);
2262 assert_dp_port_disabled(intel_dp);
2263 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2264
abfce949
VS
2265 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2266 crtc->config->port_clock);
2267
2268 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2269
2270 if (crtc->config->port_clock == 162000)
2271 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2272 else
2273 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2274
2275 I915_WRITE(DP_A, intel_dp->DP);
2276 POSTING_READ(DP_A);
2277 udelay(500);
2278
0767935e 2279 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2280
0767935e 2281 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2282 POSTING_READ(DP_A);
2283 udelay(200);
d240f20f
JB
2284}
2285
2bd2ad64 2286static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2287{
da63a9f2 2288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2289 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2290 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2291
64e1077a
VS
2292 assert_pipe_disabled(dev_priv, crtc->pipe);
2293 assert_dp_port_disabled(intel_dp);
2294 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2295
abfce949
VS
2296 DRM_DEBUG_KMS("disabling eDP PLL\n");
2297
6fec7662 2298 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2299
6fec7662 2300 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2301 POSTING_READ(DP_A);
d240f20f
JB
2302 udelay(200);
2303}
2304
c7ad3810 2305/* If the sink supports it, try to set the power state appropriately */
c19b0669 2306void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2307{
2308 int ret, i;
2309
2310 /* Should have a valid DPCD by this point */
2311 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2312 return;
2313
2314 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2315 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2316 DP_SET_POWER_D3);
c7ad3810
JB
2317 } else {
2318 /*
2319 * When turning on, we need to retry for 1ms to give the sink
2320 * time to wake up.
2321 */
2322 for (i = 0; i < 3; i++) {
9d1a1031
JN
2323 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2324 DP_SET_POWER_D0);
c7ad3810
JB
2325 if (ret == 1)
2326 break;
2327 msleep(1);
2328 }
2329 }
f9cac721
JN
2330
2331 if (ret != 1)
2332 DRM_DEBUG_KMS("failed to %s sink power state\n",
2333 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2334}
2335
19d8fe15
DV
2336static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2337 enum pipe *pipe)
d240f20f 2338{
19d8fe15 2339 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2340 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2341 struct drm_device *dev = encoder->base.dev;
2342 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2343 enum intel_display_power_domain power_domain;
2344 u32 tmp;
2345
2346 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2347 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2348 return false;
2349
2350 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2351
2352 if (!(tmp & DP_PORT_EN))
2353 return false;
2354
39e5fa88 2355 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2356 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2357 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2358 enum pipe p;
19d8fe15 2359
adc289d7
VS
2360 for_each_pipe(dev_priv, p) {
2361 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2362 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2363 *pipe = p;
19d8fe15
DV
2364 return true;
2365 }
2366 }
19d8fe15 2367
4a0833ec 2368 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
f0f59a00 2369 i915_mmio_reg_offset(intel_dp->output_reg));
39e5fa88
VS
2370 } else if (IS_CHERRYVIEW(dev)) {
2371 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2372 } else {
2373 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2374 }
d240f20f 2375
19d8fe15
DV
2376 return true;
2377}
d240f20f 2378
045ac3b5 2379static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2380 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2381{
2382 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2383 u32 tmp, flags = 0;
63000ef6
XZ
2384 struct drm_device *dev = encoder->base.dev;
2385 struct drm_i915_private *dev_priv = dev->dev_private;
2386 enum port port = dp_to_dig_port(intel_dp)->port;
2387 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2388 int dotclock;
045ac3b5 2389
9ed109a7 2390 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2391
2392 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2393
39e5fa88 2394 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2395 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2396
2397 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2398 flags |= DRM_MODE_FLAG_PHSYNC;
2399 else
2400 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2401
b81e34c2 2402 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2403 flags |= DRM_MODE_FLAG_PVSYNC;
2404 else
2405 flags |= DRM_MODE_FLAG_NVSYNC;
2406 } else {
39e5fa88 2407 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2408 flags |= DRM_MODE_FLAG_PHSYNC;
2409 else
2410 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2411
39e5fa88 2412 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2413 flags |= DRM_MODE_FLAG_PVSYNC;
2414 else
2415 flags |= DRM_MODE_FLAG_NVSYNC;
2416 }
045ac3b5 2417
2d112de7 2418 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2419
8c875fca
VS
2420 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2421 tmp & DP_COLOR_RANGE_16_235)
2422 pipe_config->limited_color_range = true;
2423
eb14cb74
VS
2424 pipe_config->has_dp_encoder = true;
2425
90a6b7b0
VS
2426 pipe_config->lane_count =
2427 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2428
eb14cb74
VS
2429 intel_dp_get_m_n(crtc, pipe_config);
2430
18442d08 2431 if (port == PORT_A) {
b377e0df 2432 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2433 pipe_config->port_clock = 162000;
2434 else
2435 pipe_config->port_clock = 270000;
2436 }
18442d08
VS
2437
2438 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2439 &pipe_config->dp_m_n);
2440
2441 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2442 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2443
2d112de7 2444 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2445
c6cd2ee2
JN
2446 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2447 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2448 /*
2449 * This is a big fat ugly hack.
2450 *
2451 * Some machines in UEFI boot mode provide us a VBT that has 18
2452 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2453 * unknown we fail to light up. Yet the same BIOS boots up with
2454 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2455 * max, not what it tells us to use.
2456 *
2457 * Note: This will still be broken if the eDP panel is not lit
2458 * up by the BIOS, and thus we can't get the mode at module
2459 * load.
2460 */
2461 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2462 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2463 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2464 }
045ac3b5
JB
2465}
2466
e8cb4558 2467static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2468{
e8cb4558 2469 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2470 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2471 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2472
6e3c9717 2473 if (crtc->config->has_audio)
495a5bb8 2474 intel_audio_codec_disable(encoder);
6cb49835 2475
b32c6f48
RV
2476 if (HAS_PSR(dev) && !HAS_DDI(dev))
2477 intel_psr_disable(intel_dp);
2478
6cb49835
DV
2479 /* Make sure the panel is off before trying to change the mode. But also
2480 * ensure that we have vdd while we switch off the panel. */
24f3e092 2481 intel_edp_panel_vdd_on(intel_dp);
4be73780 2482 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2483 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2484 intel_edp_panel_off(intel_dp);
3739850b 2485
08aff3fe
VS
2486 /* disable the port before the pipe on g4x */
2487 if (INTEL_INFO(dev)->gen < 5)
3739850b 2488 intel_dp_link_down(intel_dp);
d240f20f
JB
2489}
2490
08aff3fe 2491static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2492{
2bd2ad64 2493 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2494 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2495
49277c31 2496 intel_dp_link_down(intel_dp);
abfce949
VS
2497
2498 /* Only ilk+ has port A */
08aff3fe
VS
2499 if (port == PORT_A)
2500 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2501}
2502
2503static void vlv_post_disable_dp(struct intel_encoder *encoder)
2504{
2505 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2506
2507 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2508}
2509
a8f327fb
VS
2510static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2511 bool reset)
580d3811 2512{
a8f327fb
VS
2513 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2514 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2515 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2516 enum pipe pipe = crtc->pipe;
2517 uint32_t val;
580d3811 2518
a8f327fb
VS
2519 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2520 if (reset)
2521 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2522 else
2523 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2524 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2525
a8f327fb
VS
2526 if (crtc->config->lane_count > 2) {
2527 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2528 if (reset)
2529 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2530 else
2531 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2532 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2533 }
580d3811 2534
97fd4d5c 2535 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2536 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2537 if (reset)
2538 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2539 else
2540 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2541 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2542
a8f327fb 2543 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2544 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2545 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2546 if (reset)
2547 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2548 else
2549 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2550 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2551 }
a8f327fb 2552}
97fd4d5c 2553
a8f327fb
VS
2554static void chv_post_disable_dp(struct intel_encoder *encoder)
2555{
2556 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2557 struct drm_device *dev = encoder->base.dev;
2558 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2559
a8f327fb
VS
2560 intel_dp_link_down(intel_dp);
2561
2562 mutex_lock(&dev_priv->sb_lock);
2563
2564 /* Assert data lane reset */
2565 chv_data_lane_soft_reset(encoder, true);
580d3811 2566
a580516d 2567 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2568}
2569
7b13b58a
VS
2570static void
2571_intel_dp_set_link_train(struct intel_dp *intel_dp,
2572 uint32_t *DP,
2573 uint8_t dp_train_pat)
2574{
2575 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2576 struct drm_device *dev = intel_dig_port->base.base.dev;
2577 struct drm_i915_private *dev_priv = dev->dev_private;
2578 enum port port = intel_dig_port->port;
2579
2580 if (HAS_DDI(dev)) {
2581 uint32_t temp = I915_READ(DP_TP_CTL(port));
2582
2583 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2584 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2585 else
2586 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2587
2588 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2589 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2590 case DP_TRAINING_PATTERN_DISABLE:
2591 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2592
2593 break;
2594 case DP_TRAINING_PATTERN_1:
2595 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2596 break;
2597 case DP_TRAINING_PATTERN_2:
2598 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2599 break;
2600 case DP_TRAINING_PATTERN_3:
2601 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2602 break;
2603 }
2604 I915_WRITE(DP_TP_CTL(port), temp);
2605
39e5fa88
VS
2606 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2607 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2608 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2609
2610 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2611 case DP_TRAINING_PATTERN_DISABLE:
2612 *DP |= DP_LINK_TRAIN_OFF_CPT;
2613 break;
2614 case DP_TRAINING_PATTERN_1:
2615 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2616 break;
2617 case DP_TRAINING_PATTERN_2:
2618 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2619 break;
2620 case DP_TRAINING_PATTERN_3:
2621 DRM_ERROR("DP training pattern 3 not supported\n");
2622 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2623 break;
2624 }
2625
2626 } else {
2627 if (IS_CHERRYVIEW(dev))
2628 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2629 else
2630 *DP &= ~DP_LINK_TRAIN_MASK;
2631
2632 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2633 case DP_TRAINING_PATTERN_DISABLE:
2634 *DP |= DP_LINK_TRAIN_OFF;
2635 break;
2636 case DP_TRAINING_PATTERN_1:
2637 *DP |= DP_LINK_TRAIN_PAT_1;
2638 break;
2639 case DP_TRAINING_PATTERN_2:
2640 *DP |= DP_LINK_TRAIN_PAT_2;
2641 break;
2642 case DP_TRAINING_PATTERN_3:
2643 if (IS_CHERRYVIEW(dev)) {
2644 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2645 } else {
2646 DRM_ERROR("DP training pattern 3 not supported\n");
2647 *DP |= DP_LINK_TRAIN_PAT_2;
2648 }
2649 break;
2650 }
2651 }
2652}
2653
2654static void intel_dp_enable_port(struct intel_dp *intel_dp)
2655{
2656 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2657 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2658 struct intel_crtc *crtc =
2659 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2660
7b13b58a
VS
2661 /* enable with pattern 1 (as per spec) */
2662 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2663 DP_TRAINING_PATTERN_1);
2664
2665 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2666 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2667
2668 /*
2669 * Magic for VLV/CHV. We _must_ first set up the register
2670 * without actually enabling the port, and then do another
2671 * write to enable the port. Otherwise link training will
2672 * fail when the power sequencer is freshly used for this port.
2673 */
2674 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2675 if (crtc->config->has_audio)
2676 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2677
2678 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2679 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2680}
2681
e8cb4558 2682static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2683{
e8cb4558
DV
2684 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2685 struct drm_device *dev = encoder->base.dev;
2686 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2687 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2688 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2689 enum port port = dp_to_dig_port(intel_dp)->port;
2690 enum pipe pipe = crtc->pipe;
5d613501 2691
0c33d8d7
DV
2692 if (WARN_ON(dp_reg & DP_PORT_EN))
2693 return;
5d613501 2694
093e3f13
VS
2695 pps_lock(intel_dp);
2696
2697 if (IS_VALLEYVIEW(dev))
2698 vlv_init_panel_power_sequencer(intel_dp);
2699
7b13b58a 2700 intel_dp_enable_port(intel_dp);
093e3f13 2701
d6fbdd15
VS
2702 if (port == PORT_A && IS_GEN5(dev_priv)) {
2703 /*
2704 * Underrun reporting for the other pipe was disabled in
2705 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2706 * enabled, so it's now safe to re-enable underrun reporting.
2707 */
2708 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2709 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2710 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2711 }
2712
093e3f13
VS
2713 edp_panel_vdd_on(intel_dp);
2714 edp_panel_on(intel_dp);
2715 edp_panel_vdd_off(intel_dp, true);
2716
2717 pps_unlock(intel_dp);
2718
e0fce78f
VS
2719 if (IS_VALLEYVIEW(dev)) {
2720 unsigned int lane_mask = 0x0;
2721
2722 if (IS_CHERRYVIEW(dev))
2723 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2724
9b6de0a1
VS
2725 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2726 lane_mask);
e0fce78f 2727 }
61234fa5 2728
f01eca2e 2729 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2730 intel_dp_start_link_train(intel_dp);
3ab9c637 2731 intel_dp_stop_link_train(intel_dp);
c1dec79a 2732
6e3c9717 2733 if (crtc->config->has_audio) {
c1dec79a 2734 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2735 pipe_name(pipe));
c1dec79a
JN
2736 intel_audio_codec_enable(encoder);
2737 }
ab1f90f9 2738}
89b667f8 2739
ecff4f3b
JN
2740static void g4x_enable_dp(struct intel_encoder *encoder)
2741{
828f5c6e
JN
2742 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2743
ecff4f3b 2744 intel_enable_dp(encoder);
4be73780 2745 intel_edp_backlight_on(intel_dp);
ab1f90f9 2746}
89b667f8 2747
ab1f90f9
JN
2748static void vlv_enable_dp(struct intel_encoder *encoder)
2749{
828f5c6e
JN
2750 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2751
4be73780 2752 intel_edp_backlight_on(intel_dp);
b32c6f48 2753 intel_psr_enable(intel_dp);
d240f20f
JB
2754}
2755
ecff4f3b 2756static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2757{
d6fbdd15 2758 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2759 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2760 enum port port = dp_to_dig_port(intel_dp)->port;
2761 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2762
8ac33ed3
DV
2763 intel_dp_prepare(encoder);
2764
d6fbdd15
VS
2765 if (port == PORT_A && IS_GEN5(dev_priv)) {
2766 /*
2767 * We get FIFO underruns on the other pipe when
2768 * enabling the CPU eDP PLL, and when enabling CPU
2769 * eDP port. We could potentially avoid the PLL
2770 * underrun with a vblank wait just prior to enabling
2771 * the PLL, but that doesn't appear to help the port
2772 * enable case. Just sweep it all under the rug.
2773 */
2774 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2775 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2776 }
2777
d41f1efb 2778 /* Only ilk+ has port A */
abfce949 2779 if (port == PORT_A)
ab1f90f9
JN
2780 ironlake_edp_pll_on(intel_dp);
2781}
2782
83b84597
VS
2783static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2784{
2785 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2786 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2787 enum pipe pipe = intel_dp->pps_pipe;
f0f59a00 2788 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
83b84597
VS
2789
2790 edp_panel_vdd_off_sync(intel_dp);
2791
2792 /*
2793 * VLV seems to get confused when multiple power seqeuencers
2794 * have the same port selected (even if only one has power/vdd
2795 * enabled). The failure manifests as vlv_wait_port_ready() failing
2796 * CHV on the other hand doesn't seem to mind having the same port
2797 * selected in multiple power seqeuencers, but let's clear the
2798 * port select always when logically disconnecting a power sequencer
2799 * from a port.
2800 */
2801 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2802 pipe_name(pipe), port_name(intel_dig_port->port));
2803 I915_WRITE(pp_on_reg, 0);
2804 POSTING_READ(pp_on_reg);
2805
2806 intel_dp->pps_pipe = INVALID_PIPE;
2807}
2808
a4a5d2f8
VS
2809static void vlv_steal_power_sequencer(struct drm_device *dev,
2810 enum pipe pipe)
2811{
2812 struct drm_i915_private *dev_priv = dev->dev_private;
2813 struct intel_encoder *encoder;
2814
2815 lockdep_assert_held(&dev_priv->pps_mutex);
2816
ac3c12e4
VS
2817 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2818 return;
2819
a4a5d2f8
VS
2820 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2821 base.head) {
2822 struct intel_dp *intel_dp;
773538e8 2823 enum port port;
a4a5d2f8
VS
2824
2825 if (encoder->type != INTEL_OUTPUT_EDP)
2826 continue;
2827
2828 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2829 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2830
2831 if (intel_dp->pps_pipe != pipe)
2832 continue;
2833
2834 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2835 pipe_name(pipe), port_name(port));
a4a5d2f8 2836
e02f9a06 2837 WARN(encoder->base.crtc,
034e43c6
VS
2838 "stealing pipe %c power sequencer from active eDP port %c\n",
2839 pipe_name(pipe), port_name(port));
a4a5d2f8 2840
a4a5d2f8 2841 /* make sure vdd is off before we steal it */
83b84597 2842 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2843 }
2844}
2845
2846static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2847{
2848 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2849 struct intel_encoder *encoder = &intel_dig_port->base;
2850 struct drm_device *dev = encoder->base.dev;
2851 struct drm_i915_private *dev_priv = dev->dev_private;
2852 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2853
2854 lockdep_assert_held(&dev_priv->pps_mutex);
2855
093e3f13
VS
2856 if (!is_edp(intel_dp))
2857 return;
2858
a4a5d2f8
VS
2859 if (intel_dp->pps_pipe == crtc->pipe)
2860 return;
2861
2862 /*
2863 * If another power sequencer was being used on this
2864 * port previously make sure to turn off vdd there while
2865 * we still have control of it.
2866 */
2867 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2868 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2869
2870 /*
2871 * We may be stealing the power
2872 * sequencer from another port.
2873 */
2874 vlv_steal_power_sequencer(dev, crtc->pipe);
2875
2876 /* now it's all ours */
2877 intel_dp->pps_pipe = crtc->pipe;
2878
2879 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2880 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2881
2882 /* init power sequencer on this pipe and port */
36b5f425
VS
2883 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2884 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2885}
2886
ab1f90f9 2887static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2888{
2bd2ad64 2889 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2890 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2891 struct drm_device *dev = encoder->base.dev;
89b667f8 2892 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2893 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2894 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2895 int pipe = intel_crtc->pipe;
2896 u32 val;
a4fc5ed6 2897
a580516d 2898 mutex_lock(&dev_priv->sb_lock);
89b667f8 2899
ab3c759a 2900 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2901 val = 0;
2902 if (pipe)
2903 val |= (1<<21);
2904 else
2905 val &= ~(1<<21);
2906 val |= 0x001000c4;
ab3c759a
CML
2907 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2908 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2909 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2910
a580516d 2911 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2912
2913 intel_enable_dp(encoder);
89b667f8
JB
2914}
2915
ecff4f3b 2916static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2917{
2918 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2919 struct drm_device *dev = encoder->base.dev;
2920 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2921 struct intel_crtc *intel_crtc =
2922 to_intel_crtc(encoder->base.crtc);
e4607fcf 2923 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2924 int pipe = intel_crtc->pipe;
89b667f8 2925
8ac33ed3
DV
2926 intel_dp_prepare(encoder);
2927
89b667f8 2928 /* Program Tx lane resets to default */
a580516d 2929 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2930 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2931 DPIO_PCS_TX_LANE2_RESET |
2932 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2933 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2934 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2935 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2936 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2937 DPIO_PCS_CLK_SOFT_RESET);
2938
2939 /* Fix up inter-pair skew failure */
ab3c759a
CML
2940 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2941 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2942 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2943 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2944}
2945
e4a1d846
CML
2946static void chv_pre_enable_dp(struct intel_encoder *encoder)
2947{
2948 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2949 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2950 struct drm_device *dev = encoder->base.dev;
2951 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2952 struct intel_crtc *intel_crtc =
2953 to_intel_crtc(encoder->base.crtc);
2954 enum dpio_channel ch = vlv_dport_to_channel(dport);
2955 int pipe = intel_crtc->pipe;
2e523e98 2956 int data, i, stagger;
949c1d43 2957 u32 val;
e4a1d846 2958
a580516d 2959 mutex_lock(&dev_priv->sb_lock);
949c1d43 2960
570e2a74
VS
2961 /* allow hardware to manage TX FIFO reset source */
2962 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2963 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2964 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2965
e0fce78f
VS
2966 if (intel_crtc->config->lane_count > 2) {
2967 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2968 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2969 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2970 }
570e2a74 2971
949c1d43 2972 /* Program Tx lane latency optimal setting*/
e0fce78f 2973 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 2974 /* Set the upar bit */
e0fce78f
VS
2975 if (intel_crtc->config->lane_count == 1)
2976 data = 0x0;
2977 else
2978 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
2979 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2980 data << DPIO_UPAR_SHIFT);
2981 }
2982
2983 /* Data lane stagger programming */
2e523e98
VS
2984 if (intel_crtc->config->port_clock > 270000)
2985 stagger = 0x18;
2986 else if (intel_crtc->config->port_clock > 135000)
2987 stagger = 0xd;
2988 else if (intel_crtc->config->port_clock > 67500)
2989 stagger = 0x7;
2990 else if (intel_crtc->config->port_clock > 33750)
2991 stagger = 0x4;
2992 else
2993 stagger = 0x2;
2994
2995 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2996 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2997 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2998
e0fce78f
VS
2999 if (intel_crtc->config->lane_count > 2) {
3000 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3001 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3002 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3003 }
2e523e98
VS
3004
3005 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3006 DPIO_LANESTAGGER_STRAP(stagger) |
3007 DPIO_LANESTAGGER_STRAP_OVRD |
3008 DPIO_TX1_STAGGER_MASK(0x1f) |
3009 DPIO_TX1_STAGGER_MULT(6) |
3010 DPIO_TX2_STAGGER_MULT(0));
3011
e0fce78f
VS
3012 if (intel_crtc->config->lane_count > 2) {
3013 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3014 DPIO_LANESTAGGER_STRAP(stagger) |
3015 DPIO_LANESTAGGER_STRAP_OVRD |
3016 DPIO_TX1_STAGGER_MASK(0x1f) |
3017 DPIO_TX1_STAGGER_MULT(7) |
3018 DPIO_TX2_STAGGER_MULT(5));
3019 }
e4a1d846 3020
a8f327fb
VS
3021 /* Deassert data lane reset */
3022 chv_data_lane_soft_reset(encoder, false);
3023
a580516d 3024 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 3025
e4a1d846 3026 intel_enable_dp(encoder);
b0b33846
VS
3027
3028 /* Second common lane will stay alive on its own now */
3029 if (dport->release_cl2_override) {
3030 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3031 dport->release_cl2_override = false;
3032 }
e4a1d846
CML
3033}
3034
9197c88b
VS
3035static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3036{
3037 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3038 struct drm_device *dev = encoder->base.dev;
3039 struct drm_i915_private *dev_priv = dev->dev_private;
3040 struct intel_crtc *intel_crtc =
3041 to_intel_crtc(encoder->base.crtc);
3042 enum dpio_channel ch = vlv_dport_to_channel(dport);
3043 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
3044 unsigned int lane_mask =
3045 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
3046 u32 val;
3047
625695f8
VS
3048 intel_dp_prepare(encoder);
3049
b0b33846
VS
3050 /*
3051 * Must trick the second common lane into life.
3052 * Otherwise we can't even access the PLL.
3053 */
3054 if (ch == DPIO_CH0 && pipe == PIPE_B)
3055 dport->release_cl2_override =
3056 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3057
e0fce78f
VS
3058 chv_phy_powergate_lanes(encoder, true, lane_mask);
3059
a580516d 3060 mutex_lock(&dev_priv->sb_lock);
9197c88b 3061
a8f327fb
VS
3062 /* Assert data lane reset */
3063 chv_data_lane_soft_reset(encoder, true);
3064
b9e5ac3c
VS
3065 /* program left/right clock distribution */
3066 if (pipe != PIPE_B) {
3067 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3068 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3069 if (ch == DPIO_CH0)
3070 val |= CHV_BUFLEFTENA1_FORCE;
3071 if (ch == DPIO_CH1)
3072 val |= CHV_BUFRIGHTENA1_FORCE;
3073 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3074 } else {
3075 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3076 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3077 if (ch == DPIO_CH0)
3078 val |= CHV_BUFLEFTENA2_FORCE;
3079 if (ch == DPIO_CH1)
3080 val |= CHV_BUFRIGHTENA2_FORCE;
3081 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3082 }
3083
9197c88b
VS
3084 /* program clock channel usage */
3085 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3086 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3087 if (pipe != PIPE_B)
3088 val &= ~CHV_PCS_USEDCLKCHANNEL;
3089 else
3090 val |= CHV_PCS_USEDCLKCHANNEL;
3091 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3092
e0fce78f
VS
3093 if (intel_crtc->config->lane_count > 2) {
3094 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3095 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3096 if (pipe != PIPE_B)
3097 val &= ~CHV_PCS_USEDCLKCHANNEL;
3098 else
3099 val |= CHV_PCS_USEDCLKCHANNEL;
3100 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3101 }
9197c88b
VS
3102
3103 /*
3104 * This a a bit weird since generally CL
3105 * matches the pipe, but here we need to
3106 * pick the CL based on the port.
3107 */
3108 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3109 if (pipe != PIPE_B)
3110 val &= ~CHV_CMN_USEDCLKCHANNEL;
3111 else
3112 val |= CHV_CMN_USEDCLKCHANNEL;
3113 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3114
a580516d 3115 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3116}
3117
d6db995f
VS
3118static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3119{
3120 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3121 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3122 u32 val;
3123
3124 mutex_lock(&dev_priv->sb_lock);
3125
3126 /* disable left/right clock distribution */
3127 if (pipe != PIPE_B) {
3128 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3129 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3130 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3131 } else {
3132 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3133 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3134 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3135 }
3136
3137 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3138
b0b33846
VS
3139 /*
3140 * Leave the power down bit cleared for at least one
3141 * lane so that chv_powergate_phy_ch() will power
3142 * on something when the channel is otherwise unused.
3143 * When the port is off and the override is removed
3144 * the lanes power down anyway, so otherwise it doesn't
3145 * really matter what the state of power down bits is
3146 * after this.
3147 */
e0fce78f 3148 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3149}
3150
a4fc5ed6 3151/*
df0c237d
JB
3152 * Native read with retry for link status and receiver capability reads for
3153 * cases where the sink may still be asleep.
9d1a1031
JN
3154 *
3155 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3156 * supposed to retry 3 times per the spec.
a4fc5ed6 3157 */
9d1a1031
JN
3158static ssize_t
3159intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3160 void *buffer, size_t size)
a4fc5ed6 3161{
9d1a1031
JN
3162 ssize_t ret;
3163 int i;
61da5fab 3164
f6a19066
VS
3165 /*
3166 * Sometime we just get the same incorrect byte repeated
3167 * over the entire buffer. Doing just one throw away read
3168 * initially seems to "solve" it.
3169 */
3170 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3171
61da5fab 3172 for (i = 0; i < 3; i++) {
9d1a1031
JN
3173 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3174 if (ret == size)
3175 return ret;
61da5fab
JB
3176 msleep(1);
3177 }
a4fc5ed6 3178
9d1a1031 3179 return ret;
a4fc5ed6
KP
3180}
3181
3182/*
3183 * Fetch AUX CH registers 0x202 - 0x207 which contain
3184 * link status information
3185 */
94223d04 3186bool
93f62dad 3187intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3188{
9d1a1031
JN
3189 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3190 DP_LANE0_1_STATUS,
3191 link_status,
3192 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3193}
3194
1100244e 3195/* These are source-specific values. */
94223d04 3196uint8_t
1a2eb460 3197intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3198{
30add22d 3199 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3200 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3201 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3202
9314726b
VK
3203 if (IS_BROXTON(dev))
3204 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3205 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3206 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3207 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3208 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 3209 } else if (IS_VALLEYVIEW(dev))
bd60018a 3210 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3211 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3212 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3213 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3214 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3215 else
bd60018a 3216 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3217}
3218
94223d04 3219uint8_t
1a2eb460
KP
3220intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3221{
30add22d 3222 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3223 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3224
5a9d1f1a
DL
3225 if (INTEL_INFO(dev)->gen >= 9) {
3226 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3227 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3228 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3230 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3232 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3234 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3235 default:
3236 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3237 }
3238 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3239 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3241 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3242 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3243 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3245 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3247 default:
bd60018a 3248 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3249 }
e2fa6fba
P
3250 } else if (IS_VALLEYVIEW(dev)) {
3251 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3253 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3255 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3257 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3259 default:
bd60018a 3260 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3261 }
bc7d38a4 3262 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3263 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3267 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3268 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3269 default:
bd60018a 3270 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3271 }
3272 } else {
3273 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3275 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3277 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3281 default:
bd60018a 3282 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3283 }
a4fc5ed6
KP
3284 }
3285}
3286
5829975c 3287static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3288{
3289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3290 struct drm_i915_private *dev_priv = dev->dev_private;
3291 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3292 struct intel_crtc *intel_crtc =
3293 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3294 unsigned long demph_reg_value, preemph_reg_value,
3295 uniqtranscale_reg_value;
3296 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3297 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3298 int pipe = intel_crtc->pipe;
e2fa6fba
P
3299
3300 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3301 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3302 preemph_reg_value = 0x0004000;
3303 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3305 demph_reg_value = 0x2B405555;
3306 uniqtranscale_reg_value = 0x552AB83A;
3307 break;
bd60018a 3308 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3309 demph_reg_value = 0x2B404040;
3310 uniqtranscale_reg_value = 0x5548B83A;
3311 break;
bd60018a 3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3313 demph_reg_value = 0x2B245555;
3314 uniqtranscale_reg_value = 0x5560B83A;
3315 break;
bd60018a 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3317 demph_reg_value = 0x2B405555;
3318 uniqtranscale_reg_value = 0x5598DA3A;
3319 break;
3320 default:
3321 return 0;
3322 }
3323 break;
bd60018a 3324 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3325 preemph_reg_value = 0x0002000;
3326 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3327 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3328 demph_reg_value = 0x2B404040;
3329 uniqtranscale_reg_value = 0x5552B83A;
3330 break;
bd60018a 3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3332 demph_reg_value = 0x2B404848;
3333 uniqtranscale_reg_value = 0x5580B83A;
3334 break;
bd60018a 3335 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3336 demph_reg_value = 0x2B404040;
3337 uniqtranscale_reg_value = 0x55ADDA3A;
3338 break;
3339 default:
3340 return 0;
3341 }
3342 break;
bd60018a 3343 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3344 preemph_reg_value = 0x0000000;
3345 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3347 demph_reg_value = 0x2B305555;
3348 uniqtranscale_reg_value = 0x5570B83A;
3349 break;
bd60018a 3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3351 demph_reg_value = 0x2B2B4040;
3352 uniqtranscale_reg_value = 0x55ADDA3A;
3353 break;
3354 default:
3355 return 0;
3356 }
3357 break;
bd60018a 3358 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3359 preemph_reg_value = 0x0006000;
3360 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3361 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3362 demph_reg_value = 0x1B405555;
3363 uniqtranscale_reg_value = 0x55ADDA3A;
3364 break;
3365 default:
3366 return 0;
3367 }
3368 break;
3369 default:
3370 return 0;
3371 }
3372
a580516d 3373 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3374 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3375 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3376 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3377 uniqtranscale_reg_value);
ab3c759a
CML
3378 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3379 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3380 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3381 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3382 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3383
3384 return 0;
3385}
3386
67fa24b4
VS
3387static bool chv_need_uniq_trans_scale(uint8_t train_set)
3388{
3389 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3390 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3391}
3392
5829975c 3393static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3394{
3395 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3396 struct drm_i915_private *dev_priv = dev->dev_private;
3397 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3398 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3399 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3400 uint8_t train_set = intel_dp->train_set[0];
3401 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3402 enum pipe pipe = intel_crtc->pipe;
3403 int i;
e4a1d846
CML
3404
3405 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3406 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3407 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3408 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3409 deemph_reg_value = 128;
3410 margin_reg_value = 52;
3411 break;
bd60018a 3412 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3413 deemph_reg_value = 128;
3414 margin_reg_value = 77;
3415 break;
bd60018a 3416 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3417 deemph_reg_value = 128;
3418 margin_reg_value = 102;
3419 break;
bd60018a 3420 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3421 deemph_reg_value = 128;
3422 margin_reg_value = 154;
3423 /* FIXME extra to set for 1200 */
3424 break;
3425 default:
3426 return 0;
3427 }
3428 break;
bd60018a 3429 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3430 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3432 deemph_reg_value = 85;
3433 margin_reg_value = 78;
3434 break;
bd60018a 3435 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3436 deemph_reg_value = 85;
3437 margin_reg_value = 116;
3438 break;
bd60018a 3439 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3440 deemph_reg_value = 85;
3441 margin_reg_value = 154;
3442 break;
3443 default:
3444 return 0;
3445 }
3446 break;
bd60018a 3447 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3448 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3449 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3450 deemph_reg_value = 64;
3451 margin_reg_value = 104;
3452 break;
bd60018a 3453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3454 deemph_reg_value = 64;
3455 margin_reg_value = 154;
3456 break;
3457 default:
3458 return 0;
3459 }
3460 break;
bd60018a 3461 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3462 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3464 deemph_reg_value = 43;
3465 margin_reg_value = 154;
3466 break;
3467 default:
3468 return 0;
3469 }
3470 break;
3471 default:
3472 return 0;
3473 }
3474
a580516d 3475 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3476
3477 /* Clear calc init */
1966e59e
VS
3478 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3479 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3480 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3481 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3482 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3483
e0fce78f
VS
3484 if (intel_crtc->config->lane_count > 2) {
3485 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3486 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3487 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3488 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3489 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3490 }
e4a1d846 3491
a02ef3c7
VS
3492 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3493 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3494 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3495 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3496
e0fce78f
VS
3497 if (intel_crtc->config->lane_count > 2) {
3498 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3499 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3500 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3501 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3502 }
a02ef3c7 3503
e4a1d846 3504 /* Program swing deemph */
e0fce78f 3505 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3506 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3507 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3508 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3509 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3510 }
e4a1d846
CML
3511
3512 /* Program swing margin */
e0fce78f 3513 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3514 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3515
1fb44505
VS
3516 val &= ~DPIO_SWING_MARGIN000_MASK;
3517 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3518
3519 /*
3520 * Supposedly this value shouldn't matter when unique transition
3521 * scale is disabled, but in fact it does matter. Let's just
3522 * always program the same value and hope it's OK.
3523 */
3524 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3525 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3526
f72df8db
VS
3527 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3528 }
e4a1d846 3529
67fa24b4
VS
3530 /*
3531 * The document said it needs to set bit 27 for ch0 and bit 26
3532 * for ch1. Might be a typo in the doc.
3533 * For now, for this unique transition scale selection, set bit
3534 * 27 for ch0 and ch1.
3535 */
e0fce78f 3536 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3537 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3538 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3539 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3540 else
3541 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3542 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3543 }
3544
3545 /* Start swing calculation */
1966e59e
VS
3546 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3547 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3548 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3549
e0fce78f
VS
3550 if (intel_crtc->config->lane_count > 2) {
3551 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3552 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3553 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3554 }
e4a1d846 3555
a580516d 3556 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3557
3558 return 0;
3559}
3560
a4fc5ed6 3561static uint32_t
5829975c 3562gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3563{
3cf2efb1 3564 uint32_t signal_levels = 0;
a4fc5ed6 3565
3cf2efb1 3566 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3567 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3568 default:
3569 signal_levels |= DP_VOLTAGE_0_4;
3570 break;
bd60018a 3571 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3572 signal_levels |= DP_VOLTAGE_0_6;
3573 break;
bd60018a 3574 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3575 signal_levels |= DP_VOLTAGE_0_8;
3576 break;
bd60018a 3577 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3578 signal_levels |= DP_VOLTAGE_1_2;
3579 break;
3580 }
3cf2efb1 3581 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3582 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3583 default:
3584 signal_levels |= DP_PRE_EMPHASIS_0;
3585 break;
bd60018a 3586 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3587 signal_levels |= DP_PRE_EMPHASIS_3_5;
3588 break;
bd60018a 3589 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3590 signal_levels |= DP_PRE_EMPHASIS_6;
3591 break;
bd60018a 3592 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3593 signal_levels |= DP_PRE_EMPHASIS_9_5;
3594 break;
3595 }
3596 return signal_levels;
3597}
3598
e3421a18
ZW
3599/* Gen6's DP voltage swing and pre-emphasis control */
3600static uint32_t
5829975c 3601gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3602{
3c5a62b5
YL
3603 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3604 DP_TRAIN_PRE_EMPHASIS_MASK);
3605 switch (signal_levels) {
bd60018a
SJ
3606 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3607 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3608 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3610 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3611 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3613 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3614 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3616 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3617 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3618 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3619 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3620 default:
3c5a62b5
YL
3621 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3622 "0x%x\n", signal_levels);
3623 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3624 }
3625}
3626
1a2eb460
KP
3627/* Gen7's DP voltage swing and pre-emphasis control */
3628static uint32_t
5829975c 3629gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3630{
3631 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3632 DP_TRAIN_PRE_EMPHASIS_MASK);
3633 switch (signal_levels) {
bd60018a 3634 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3635 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3636 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3637 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3638 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3639 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3640
bd60018a 3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3642 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3643 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3644 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3645
bd60018a 3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3647 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3648 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3649 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3650
3651 default:
3652 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3653 "0x%x\n", signal_levels);
3654 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3655 }
3656}
3657
94223d04 3658void
f4eb692e 3659intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3660{
3661 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3662 enum port port = intel_dig_port->port;
f0a3424e 3663 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3664 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3665 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3666 uint8_t train_set = intel_dp->train_set[0];
3667
f8896f5d
DW
3668 if (HAS_DDI(dev)) {
3669 signal_levels = ddi_signal_levels(intel_dp);
3670
3671 if (IS_BROXTON(dev))
3672 signal_levels = 0;
3673 else
3674 mask = DDI_BUF_EMP_MASK;
e4a1d846 3675 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3676 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3677 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3678 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3679 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3680 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3681 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3682 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3683 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3684 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3685 } else {
5829975c 3686 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3687 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3688 }
3689
96fb9f9b
VK
3690 if (mask)
3691 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3692
3693 DRM_DEBUG_KMS("Using vswing level %d\n",
3694 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3695 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3696 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3697 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3698
f4eb692e 3699 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3700
3701 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3702 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3703}
3704
94223d04 3705void
e9c176d5
ACO
3706intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3707 uint8_t dp_train_pat)
a4fc5ed6 3708{
174edf1f 3709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3710 struct drm_i915_private *dev_priv =
3711 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3712
f4eb692e 3713 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3714
f4eb692e 3715 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3716 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3717}
3718
94223d04 3719void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3720{
3721 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3722 struct drm_device *dev = intel_dig_port->base.base.dev;
3723 struct drm_i915_private *dev_priv = dev->dev_private;
3724 enum port port = intel_dig_port->port;
3725 uint32_t val;
3726
3727 if (!HAS_DDI(dev))
3728 return;
3729
3730 val = I915_READ(DP_TP_CTL(port));
3731 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3732 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3733 I915_WRITE(DP_TP_CTL(port), val);
3734
3735 /*
3736 * On PORT_A we can have only eDP in SST mode. There the only reason
3737 * we need to set idle transmission mode is to work around a HW issue
3738 * where we enable the pipe while not in idle link-training mode.
3739 * In this case there is requirement to wait for a minimum number of
3740 * idle patterns to be sent.
3741 */
3742 if (port == PORT_A)
3743 return;
3744
3745 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3746 1))
3747 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3748}
3749
a4fc5ed6 3750static void
ea5b213a 3751intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3752{
da63a9f2 3753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3754 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3755 enum port port = intel_dig_port->port;
da63a9f2 3756 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3757 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3758 uint32_t DP = intel_dp->DP;
a4fc5ed6 3759
bc76e320 3760 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3761 return;
3762
0c33d8d7 3763 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3764 return;
3765
28c97730 3766 DRM_DEBUG_KMS("\n");
32f9d658 3767
39e5fa88
VS
3768 if ((IS_GEN7(dev) && port == PORT_A) ||
3769 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3770 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3771 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3772 } else {
aad3d14d
VS
3773 if (IS_CHERRYVIEW(dev))
3774 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3775 else
3776 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3777 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3778 }
1612c8bd 3779 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3780 POSTING_READ(intel_dp->output_reg);
5eb08b69 3781
1612c8bd
VS
3782 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3783 I915_WRITE(intel_dp->output_reg, DP);
3784 POSTING_READ(intel_dp->output_reg);
3785
3786 /*
3787 * HW workaround for IBX, we need to move the port
3788 * to transcoder A after disabling it to allow the
3789 * matching HDMI port to be enabled on transcoder A.
3790 */
3791 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3792 /*
3793 * We get CPU/PCH FIFO underruns on the other pipe when
3794 * doing the workaround. Sweep them under the rug.
3795 */
3796 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3797 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3798
1612c8bd
VS
3799 /* always enable with pattern 1 (as per spec) */
3800 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3801 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3802 I915_WRITE(intel_dp->output_reg, DP);
3803 POSTING_READ(intel_dp->output_reg);
3804
3805 DP &= ~DP_PORT_EN;
5bddd17f 3806 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3807 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3808
3809 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3810 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3811 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3812 }
3813
f01eca2e 3814 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3815
3816 intel_dp->DP = DP;
a4fc5ed6
KP
3817}
3818
26d61aad
KP
3819static bool
3820intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3821{
a031d709
RV
3822 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3823 struct drm_device *dev = dig_port->base.base.dev;
3824 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3825 uint8_t rev;
a031d709 3826
9d1a1031
JN
3827 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3828 sizeof(intel_dp->dpcd)) < 0)
edb39244 3829 return false; /* aux transfer failed */
92fd8fd1 3830
a8e98153 3831 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3832
edb39244
AJ
3833 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3834 return false; /* DPCD not present */
3835
2293bb5c
SK
3836 /* Check if the panel supports PSR */
3837 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3838 if (is_edp(intel_dp)) {
9d1a1031
JN
3839 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3840 intel_dp->psr_dpcd,
3841 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3842 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3843 dev_priv->psr.sink_support = true;
50003939 3844 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3845 }
474d1ec4
SJ
3846
3847 if (INTEL_INFO(dev)->gen >= 9 &&
3848 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3849 uint8_t frame_sync_cap;
3850
3851 dev_priv->psr.sink_support = true;
3852 intel_dp_dpcd_read_wake(&intel_dp->aux,
3853 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3854 &frame_sync_cap, 1);
3855 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3856 /* PSR2 needs frame sync as well */
3857 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3858 DRM_DEBUG_KMS("PSR2 %s on sink",
3859 dev_priv->psr.psr2_support ? "supported" : "not supported");
3860 }
50003939
JN
3861 }
3862
bc5133d5 3863 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3864 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3865 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3866
fc0f8e25
SJ
3867 /* Intermediate frequency support */
3868 if (is_edp(intel_dp) &&
3869 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3870 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3871 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3872 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3873 int i;
3874
fc0f8e25
SJ
3875 intel_dp_dpcd_read_wake(&intel_dp->aux,
3876 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3877 sink_rates,
3878 sizeof(sink_rates));
ea2d8a42 3879
94ca719e
VS
3880 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3881 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3882
3883 if (val == 0)
3884 break;
3885
af77b974
SJ
3886 /* Value read is in kHz while drm clock is saved in deca-kHz */
3887 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3888 }
94ca719e 3889 intel_dp->num_sink_rates = i;
fc0f8e25 3890 }
0336400e
VS
3891
3892 intel_dp_print_rates(intel_dp);
3893
edb39244
AJ
3894 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3895 DP_DWN_STRM_PORT_PRESENT))
3896 return true; /* native DP sink */
3897
3898 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3899 return true; /* no per-port downstream info */
3900
9d1a1031
JN
3901 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3902 intel_dp->downstream_ports,
3903 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3904 return false; /* downstream port status fetch failed */
3905
3906 return true;
92fd8fd1
KP
3907}
3908
0d198328
AJ
3909static void
3910intel_dp_probe_oui(struct intel_dp *intel_dp)
3911{
3912 u8 buf[3];
3913
3914 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3915 return;
3916
9d1a1031 3917 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3918 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3919 buf[0], buf[1], buf[2]);
3920
9d1a1031 3921 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3922 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3923 buf[0], buf[1], buf[2]);
3924}
3925
0e32b39c
DA
3926static bool
3927intel_dp_probe_mst(struct intel_dp *intel_dp)
3928{
3929 u8 buf[1];
3930
3931 if (!intel_dp->can_mst)
3932 return false;
3933
3934 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3935 return false;
3936
0e32b39c
DA
3937 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3938 if (buf[0] & DP_MST_CAP) {
3939 DRM_DEBUG_KMS("Sink is MST capable\n");
3940 intel_dp->is_mst = true;
3941 } else {
3942 DRM_DEBUG_KMS("Sink is not MST capable\n");
3943 intel_dp->is_mst = false;
3944 }
3945 }
0e32b39c
DA
3946
3947 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3948 return intel_dp->is_mst;
3949}
3950
e5a1cab5 3951static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3952{
082dcc7c 3953 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 3954 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c 3955 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3956 u8 buf;
e5a1cab5 3957 int ret = 0;
c6297843
RV
3958 int count = 0;
3959 int attempts = 10;
d2e216d0 3960
082dcc7c
RV
3961 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3962 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3963 ret = -EIO;
3964 goto out;
4373f0f2
PZ
3965 }
3966
082dcc7c 3967 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 3968 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 3969 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3970 ret = -EIO;
3971 goto out;
3972 }
d2e216d0 3973
c6297843
RV
3974 do {
3975 intel_wait_for_vblank(dev, intel_crtc->pipe);
3976
3977 if (drm_dp_dpcd_readb(&intel_dp->aux,
3978 DP_TEST_SINK_MISC, &buf) < 0) {
3979 ret = -EIO;
3980 goto out;
3981 }
3982 count = buf & DP_TEST_COUNT_MASK;
3983 } while (--attempts && count);
3984
3985 if (attempts == 0) {
3986 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
3987 ret = -ETIMEDOUT;
3988 }
3989
e5a1cab5 3990 out:
082dcc7c 3991 hsw_enable_ips(intel_crtc);
e5a1cab5 3992 return ret;
082dcc7c
RV
3993}
3994
3995static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3996{
3997 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 3998 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c
RV
3999 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4000 u8 buf;
e5a1cab5
RV
4001 int ret;
4002
082dcc7c
RV
4003 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4004 return -EIO;
4005
4006 if (!(buf & DP_TEST_CRC_SUPPORTED))
4007 return -ENOTTY;
4008
4009 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4010 return -EIO;
4011
6d8175da
RV
4012 if (buf & DP_TEST_SINK_START) {
4013 ret = intel_dp_sink_crc_stop(intel_dp);
4014 if (ret)
4015 return ret;
4016 }
4017
082dcc7c 4018 hsw_disable_ips(intel_crtc);
1dda5f93 4019
9d1a1031 4020 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4021 buf | DP_TEST_SINK_START) < 0) {
4022 hsw_enable_ips(intel_crtc);
4023 return -EIO;
4373f0f2
PZ
4024 }
4025
d72f9d91 4026 intel_wait_for_vblank(dev, intel_crtc->pipe);
082dcc7c
RV
4027 return 0;
4028}
4029
4030int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4031{
4032 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4033 struct drm_device *dev = dig_port->base.base.dev;
4034 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4035 u8 buf;
621d4c76 4036 int count, ret;
082dcc7c 4037 int attempts = 6;
082dcc7c
RV
4038
4039 ret = intel_dp_sink_crc_start(intel_dp);
4040 if (ret)
4041 return ret;
4042
ad9dc91b 4043 do {
621d4c76
RV
4044 intel_wait_for_vblank(dev, intel_crtc->pipe);
4045
1dda5f93 4046 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4047 DP_TEST_SINK_MISC, &buf) < 0) {
4048 ret = -EIO;
afe0d67e 4049 goto stop;
4373f0f2 4050 }
621d4c76 4051 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4052
7e38eeff 4053 } while (--attempts && count == 0);
ad9dc91b
RV
4054
4055 if (attempts == 0) {
7e38eeff
RV
4056 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4057 ret = -ETIMEDOUT;
4058 goto stop;
4059 }
4060
4061 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4062 ret = -EIO;
4063 goto stop;
ad9dc91b 4064 }
d2e216d0 4065
afe0d67e 4066stop:
082dcc7c 4067 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4068 return ret;
d2e216d0
RV
4069}
4070
a60f0e38
JB
4071static bool
4072intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4073{
9d1a1031
JN
4074 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4075 DP_DEVICE_SERVICE_IRQ_VECTOR,
4076 sink_irq_vector, 1) == 1;
a60f0e38
JB
4077}
4078
0e32b39c
DA
4079static bool
4080intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4081{
4082 int ret;
4083
4084 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4085 DP_SINK_COUNT_ESI,
4086 sink_irq_vector, 14);
4087 if (ret != 14)
4088 return false;
4089
4090 return true;
4091}
4092
c5d5ab7a
TP
4093static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4094{
4095 uint8_t test_result = DP_TEST_ACK;
4096 return test_result;
4097}
4098
4099static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4100{
4101 uint8_t test_result = DP_TEST_NAK;
4102 return test_result;
4103}
4104
4105static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4106{
c5d5ab7a 4107 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4108 struct intel_connector *intel_connector = intel_dp->attached_connector;
4109 struct drm_connector *connector = &intel_connector->base;
4110
4111 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4112 connector->edid_corrupt ||
559be30c
TP
4113 intel_dp->aux.i2c_defer_count > 6) {
4114 /* Check EDID read for NACKs, DEFERs and corruption
4115 * (DP CTS 1.2 Core r1.1)
4116 * 4.2.2.4 : Failed EDID read, I2C_NAK
4117 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4118 * 4.2.2.6 : EDID corruption detected
4119 * Use failsafe mode for all cases
4120 */
4121 if (intel_dp->aux.i2c_nack_count > 0 ||
4122 intel_dp->aux.i2c_defer_count > 0)
4123 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4124 intel_dp->aux.i2c_nack_count,
4125 intel_dp->aux.i2c_defer_count);
4126 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4127 } else {
f79b468e
TS
4128 struct edid *block = intel_connector->detect_edid;
4129
4130 /* We have to write the checksum
4131 * of the last block read
4132 */
4133 block += intel_connector->detect_edid->extensions;
4134
559be30c
TP
4135 if (!drm_dp_dpcd_write(&intel_dp->aux,
4136 DP_TEST_EDID_CHECKSUM,
f79b468e 4137 &block->checksum,
5a1cc655 4138 1))
559be30c
TP
4139 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4140
4141 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4142 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4143 }
4144
4145 /* Set test active flag here so userspace doesn't interrupt things */
4146 intel_dp->compliance_test_active = 1;
4147
c5d5ab7a
TP
4148 return test_result;
4149}
4150
4151static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4152{
c5d5ab7a
TP
4153 uint8_t test_result = DP_TEST_NAK;
4154 return test_result;
4155}
4156
4157static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4158{
4159 uint8_t response = DP_TEST_NAK;
4160 uint8_t rxdata = 0;
4161 int status = 0;
4162
c5d5ab7a
TP
4163 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4164 if (status <= 0) {
4165 DRM_DEBUG_KMS("Could not read test request from sink\n");
4166 goto update_status;
4167 }
4168
4169 switch (rxdata) {
4170 case DP_TEST_LINK_TRAINING:
4171 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4172 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4173 response = intel_dp_autotest_link_training(intel_dp);
4174 break;
4175 case DP_TEST_LINK_VIDEO_PATTERN:
4176 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4177 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4178 response = intel_dp_autotest_video_pattern(intel_dp);
4179 break;
4180 case DP_TEST_LINK_EDID_READ:
4181 DRM_DEBUG_KMS("EDID test requested\n");
4182 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4183 response = intel_dp_autotest_edid(intel_dp);
4184 break;
4185 case DP_TEST_LINK_PHY_TEST_PATTERN:
4186 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4187 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4188 response = intel_dp_autotest_phy_pattern(intel_dp);
4189 break;
4190 default:
4191 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4192 break;
4193 }
4194
4195update_status:
4196 status = drm_dp_dpcd_write(&intel_dp->aux,
4197 DP_TEST_RESPONSE,
4198 &response, 1);
4199 if (status <= 0)
4200 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4201}
4202
0e32b39c
DA
4203static int
4204intel_dp_check_mst_status(struct intel_dp *intel_dp)
4205{
4206 bool bret;
4207
4208 if (intel_dp->is_mst) {
4209 u8 esi[16] = { 0 };
4210 int ret = 0;
4211 int retry;
4212 bool handled;
4213 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4214go_again:
4215 if (bret == true) {
4216
4217 /* check link status - esi[10] = 0x200c */
90a6b7b0 4218 if (intel_dp->active_mst_links &&
901c2daf 4219 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4220 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4221 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4222 intel_dp_stop_link_train(intel_dp);
4223 }
4224
6f34cc39 4225 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4226 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4227
4228 if (handled) {
4229 for (retry = 0; retry < 3; retry++) {
4230 int wret;
4231 wret = drm_dp_dpcd_write(&intel_dp->aux,
4232 DP_SINK_COUNT_ESI+1,
4233 &esi[1], 3);
4234 if (wret == 3) {
4235 break;
4236 }
4237 }
4238
4239 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4240 if (bret == true) {
6f34cc39 4241 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4242 goto go_again;
4243 }
4244 } else
4245 ret = 0;
4246
4247 return ret;
4248 } else {
4249 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4250 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4251 intel_dp->is_mst = false;
4252 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4253 /* send a hotplug event */
4254 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4255 }
4256 }
4257 return -EINVAL;
4258}
4259
a4fc5ed6
KP
4260/*
4261 * According to DP spec
4262 * 5.1.2:
4263 * 1. Read DPCD
4264 * 2. Configure link according to Receiver Capabilities
4265 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4266 * 4. Check link status on receipt of hot-plug interrupt
4267 */
a5146200 4268static void
ea5b213a 4269intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4270{
5b215bcf 4271 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4272 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4273 u8 sink_irq_vector;
93f62dad 4274 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4275
5b215bcf
DA
4276 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4277
4df6960e
SS
4278 /*
4279 * Clearing compliance test variables to allow capturing
4280 * of values for next automated test request.
4281 */
4282 intel_dp->compliance_test_active = 0;
4283 intel_dp->compliance_test_type = 0;
4284 intel_dp->compliance_test_data = 0;
4285
e02f9a06 4286 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4287 return;
4288
1a125d8a
ID
4289 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4290 return;
4291
92fd8fd1 4292 /* Try to read receiver status if the link appears to be up */
93f62dad 4293 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4294 return;
4295 }
4296
92fd8fd1 4297 /* Now read the DPCD to see if it's actually running */
26d61aad 4298 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4299 return;
4300 }
4301
a60f0e38
JB
4302 /* Try to read the source of the interrupt */
4303 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4304 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4305 /* Clear interrupt source */
9d1a1031
JN
4306 drm_dp_dpcd_writeb(&intel_dp->aux,
4307 DP_DEVICE_SERVICE_IRQ_VECTOR,
4308 sink_irq_vector);
a60f0e38
JB
4309
4310 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4311 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4312 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4313 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4314 }
4315
14631e9d
SS
4316 /* if link training is requested we should perform it always */
4317 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4318 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
92fd8fd1 4319 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4320 intel_encoder->base.name);
33a34e4e 4321 intel_dp_start_link_train(intel_dp);
3ab9c637 4322 intel_dp_stop_link_train(intel_dp);
33a34e4e 4323 }
a4fc5ed6 4324}
a4fc5ed6 4325
caf9ab24 4326/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4327static enum drm_connector_status
26d61aad 4328intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4329{
caf9ab24 4330 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4331 uint8_t type;
4332
4333 if (!intel_dp_get_dpcd(intel_dp))
4334 return connector_status_disconnected;
4335
4336 /* if there's no downstream port, we're done */
4337 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4338 return connector_status_connected;
caf9ab24
AJ
4339
4340 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4341 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4342 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4343 uint8_t reg;
9d1a1031
JN
4344
4345 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4346 &reg, 1) < 0)
caf9ab24 4347 return connector_status_unknown;
9d1a1031 4348
23235177
AJ
4349 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4350 : connector_status_disconnected;
caf9ab24
AJ
4351 }
4352
4353 /* If no HPD, poke DDC gently */
0b99836f 4354 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4355 return connector_status_connected;
caf9ab24
AJ
4356
4357 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4358 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4359 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4360 if (type == DP_DS_PORT_TYPE_VGA ||
4361 type == DP_DS_PORT_TYPE_NON_EDID)
4362 return connector_status_unknown;
4363 } else {
4364 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4365 DP_DWN_STRM_PORT_TYPE_MASK;
4366 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4367 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4368 return connector_status_unknown;
4369 }
caf9ab24
AJ
4370
4371 /* Anything else is out of spec, warn and ignore */
4372 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4373 return connector_status_disconnected;
71ba9000
AJ
4374}
4375
d410b56d
CW
4376static enum drm_connector_status
4377edp_detect(struct intel_dp *intel_dp)
4378{
4379 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4380 enum drm_connector_status status;
4381
4382 status = intel_panel_detect(dev);
4383 if (status == connector_status_unknown)
4384 status = connector_status_connected;
4385
4386 return status;
4387}
4388
b93433cc
JN
4389static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4390 struct intel_digital_port *port)
5eb08b69 4391{
b93433cc 4392 u32 bit;
01cb9ea6 4393
0df53b77
JN
4394 switch (port->port) {
4395 case PORT_A:
4396 return true;
4397 case PORT_B:
4398 bit = SDE_PORTB_HOTPLUG;
4399 break;
4400 case PORT_C:
4401 bit = SDE_PORTC_HOTPLUG;
4402 break;
4403 case PORT_D:
4404 bit = SDE_PORTD_HOTPLUG;
4405 break;
4406 default:
4407 MISSING_CASE(port->port);
4408 return false;
4409 }
4410
4411 return I915_READ(SDEISR) & bit;
4412}
4413
4414static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4415 struct intel_digital_port *port)
4416{
4417 u32 bit;
4418
4419 switch (port->port) {
4420 case PORT_A:
4421 return true;
4422 case PORT_B:
4423 bit = SDE_PORTB_HOTPLUG_CPT;
4424 break;
4425 case PORT_C:
4426 bit = SDE_PORTC_HOTPLUG_CPT;
4427 break;
4428 case PORT_D:
4429 bit = SDE_PORTD_HOTPLUG_CPT;
4430 break;
a78695d3
JN
4431 case PORT_E:
4432 bit = SDE_PORTE_HOTPLUG_SPT;
4433 break;
0df53b77
JN
4434 default:
4435 MISSING_CASE(port->port);
4436 return false;
b93433cc 4437 }
1b469639 4438
b93433cc 4439 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4440}
4441
7e66bcf2 4442static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4443 struct intel_digital_port *port)
a4fc5ed6 4444{
9642c81c 4445 u32 bit;
5eb08b69 4446
9642c81c
JN
4447 switch (port->port) {
4448 case PORT_B:
4449 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4450 break;
4451 case PORT_C:
4452 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4453 break;
4454 case PORT_D:
4455 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4456 break;
4457 default:
4458 MISSING_CASE(port->port);
4459 return false;
4460 }
4461
4462 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4463}
4464
4465static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4466 struct intel_digital_port *port)
4467{
4468 u32 bit;
4469
4470 switch (port->port) {
4471 case PORT_B:
4472 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4473 break;
4474 case PORT_C:
4475 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4476 break;
4477 case PORT_D:
4478 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4479 break;
4480 default:
4481 MISSING_CASE(port->port);
4482 return false;
a4fc5ed6
KP
4483 }
4484
1d245987 4485 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4486}
4487
e464bfde 4488static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4489 struct intel_digital_port *intel_dig_port)
e464bfde 4490{
e2ec35a5
SJ
4491 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4492 enum port port;
e464bfde
JN
4493 u32 bit;
4494
e2ec35a5
SJ
4495 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4496 switch (port) {
e464bfde
JN
4497 case PORT_A:
4498 bit = BXT_DE_PORT_HP_DDIA;
4499 break;
4500 case PORT_B:
4501 bit = BXT_DE_PORT_HP_DDIB;
4502 break;
4503 case PORT_C:
4504 bit = BXT_DE_PORT_HP_DDIC;
4505 break;
4506 default:
e2ec35a5 4507 MISSING_CASE(port);
e464bfde
JN
4508 return false;
4509 }
4510
4511 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4512}
4513
7e66bcf2
JN
4514/*
4515 * intel_digital_port_connected - is the specified port connected?
4516 * @dev_priv: i915 private structure
4517 * @port: the port to test
4518 *
4519 * Return %true if @port is connected, %false otherwise.
4520 */
237ed86c 4521bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4522 struct intel_digital_port *port)
4523{
0df53b77 4524 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4525 return ibx_digital_port_connected(dev_priv, port);
0df53b77
JN
4526 if (HAS_PCH_SPLIT(dev_priv))
4527 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4528 else if (IS_BROXTON(dev_priv))
4529 return bxt_digital_port_connected(dev_priv, port);
9642c81c
JN
4530 else if (IS_VALLEYVIEW(dev_priv))
4531 return vlv_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4532 else
4533 return g4x_digital_port_connected(dev_priv, port);
4534}
4535
b93433cc
JN
4536static enum drm_connector_status
4537ironlake_dp_detect(struct intel_dp *intel_dp)
4538{
4539 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4540 struct drm_i915_private *dev_priv = dev->dev_private;
4541 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4542
7e66bcf2 4543 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
b93433cc
JN
4544 return connector_status_disconnected;
4545
4546 return intel_dp_detect_dpcd(intel_dp);
4547}
4548
2a592bec
DA
4549static enum drm_connector_status
4550g4x_dp_detect(struct intel_dp *intel_dp)
4551{
4552 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4553 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2a592bec 4554
7e66bcf2 4555 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
a4fc5ed6
KP
4556 return connector_status_disconnected;
4557
26d61aad 4558 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4559}
4560
8c241fef 4561static struct edid *
beb60608 4562intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4563{
beb60608 4564 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4565
9cd300e0
JN
4566 /* use cached edid if we have one */
4567 if (intel_connector->edid) {
9cd300e0
JN
4568 /* invalid edid */
4569 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4570 return NULL;
4571
55e9edeb 4572 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4573 } else
4574 return drm_get_edid(&intel_connector->base,
4575 &intel_dp->aux.ddc);
4576}
8c241fef 4577
beb60608
CW
4578static void
4579intel_dp_set_edid(struct intel_dp *intel_dp)
4580{
4581 struct intel_connector *intel_connector = intel_dp->attached_connector;
4582 struct edid *edid;
8c241fef 4583
beb60608
CW
4584 edid = intel_dp_get_edid(intel_dp);
4585 intel_connector->detect_edid = edid;
4586
4587 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4588 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4589 else
4590 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4591}
4592
beb60608
CW
4593static void
4594intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4595{
beb60608 4596 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4597
beb60608
CW
4598 kfree(intel_connector->detect_edid);
4599 intel_connector->detect_edid = NULL;
9cd300e0 4600
beb60608
CW
4601 intel_dp->has_audio = false;
4602}
d6f24d0f 4603
a9756bb5
ZW
4604static enum drm_connector_status
4605intel_dp_detect(struct drm_connector *connector, bool force)
4606{
4607 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4608 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4609 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4610 struct drm_device *dev = connector->dev;
a9756bb5 4611 enum drm_connector_status status;
671dedd2 4612 enum intel_display_power_domain power_domain;
0e32b39c 4613 bool ret;
09b1eb13 4614 u8 sink_irq_vector;
a9756bb5 4615
164c8598 4616 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4617 connector->base.id, connector->name);
beb60608 4618 intel_dp_unset_edid(intel_dp);
164c8598 4619
0e32b39c
DA
4620 if (intel_dp->is_mst) {
4621 /* MST devices are disconnected from a monitor POV */
4622 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4623 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4624 return connector_status_disconnected;
0e32b39c
DA
4625 }
4626
25f78f58
VS
4627 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4628 intel_display_power_get(to_i915(dev), power_domain);
a9756bb5 4629
d410b56d
CW
4630 /* Can't disconnect eDP, but you can close the lid... */
4631 if (is_edp(intel_dp))
4632 status = edp_detect(intel_dp);
4633 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4634 status = ironlake_dp_detect(intel_dp);
4635 else
4636 status = g4x_dp_detect(intel_dp);
4df6960e
SS
4637 if (status != connector_status_connected) {
4638 intel_dp->compliance_test_active = 0;
4639 intel_dp->compliance_test_type = 0;
4640 intel_dp->compliance_test_data = 0;
4641
c8c8fb33 4642 goto out;
4df6960e 4643 }
a9756bb5 4644
0d198328
AJ
4645 intel_dp_probe_oui(intel_dp);
4646
0e32b39c
DA
4647 ret = intel_dp_probe_mst(intel_dp);
4648 if (ret) {
4649 /* if we are in MST mode then this connector
4650 won't appear connected or have anything with EDID on it */
4651 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4652 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4653 status = connector_status_disconnected;
4654 goto out;
4655 }
4656
4df6960e
SS
4657 /*
4658 * Clearing NACK and defer counts to get their exact values
4659 * while reading EDID which are required by Compliance tests
4660 * 4.2.2.4 and 4.2.2.5
4661 */
4662 intel_dp->aux.i2c_nack_count = 0;
4663 intel_dp->aux.i2c_defer_count = 0;
4664
beb60608 4665 intel_dp_set_edid(intel_dp);
a9756bb5 4666
d63885da
PZ
4667 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4668 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4669 status = connector_status_connected;
4670
09b1eb13
TP
4671 /* Try to read the source of the interrupt */
4672 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4673 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4674 /* Clear interrupt source */
4675 drm_dp_dpcd_writeb(&intel_dp->aux,
4676 DP_DEVICE_SERVICE_IRQ_VECTOR,
4677 sink_irq_vector);
4678
4679 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4680 intel_dp_handle_test_request(intel_dp);
4681 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4682 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4683 }
4684
c8c8fb33 4685out:
25f78f58 4686 intel_display_power_put(to_i915(dev), power_domain);
c8c8fb33 4687 return status;
a4fc5ed6
KP
4688}
4689
beb60608
CW
4690static void
4691intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4692{
df0e9248 4693 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4694 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
25f78f58 4695 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
671dedd2 4696 enum intel_display_power_domain power_domain;
a4fc5ed6 4697
beb60608
CW
4698 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4699 connector->base.id, connector->name);
4700 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4701
beb60608
CW
4702 if (connector->status != connector_status_connected)
4703 return;
671dedd2 4704
25f78f58
VS
4705 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4706 intel_display_power_get(dev_priv, power_domain);
beb60608
CW
4707
4708 intel_dp_set_edid(intel_dp);
4709
25f78f58 4710 intel_display_power_put(dev_priv, power_domain);
beb60608
CW
4711
4712 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4713 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4714}
4715
4716static int intel_dp_get_modes(struct drm_connector *connector)
4717{
4718 struct intel_connector *intel_connector = to_intel_connector(connector);
4719 struct edid *edid;
4720
4721 edid = intel_connector->detect_edid;
4722 if (edid) {
4723 int ret = intel_connector_update_modes(connector, edid);
4724 if (ret)
4725 return ret;
4726 }
32f9d658 4727
f8779fda 4728 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4729 if (is_edp(intel_attached_dp(connector)) &&
4730 intel_connector->panel.fixed_mode) {
f8779fda 4731 struct drm_display_mode *mode;
beb60608
CW
4732
4733 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4734 intel_connector->panel.fixed_mode);
f8779fda 4735 if (mode) {
32f9d658
ZW
4736 drm_mode_probed_add(connector, mode);
4737 return 1;
4738 }
4739 }
beb60608 4740
32f9d658 4741 return 0;
a4fc5ed6
KP
4742}
4743
1aad7ac0
CW
4744static bool
4745intel_dp_detect_audio(struct drm_connector *connector)
4746{
1aad7ac0 4747 bool has_audio = false;
beb60608 4748 struct edid *edid;
1aad7ac0 4749
beb60608
CW
4750 edid = to_intel_connector(connector)->detect_edid;
4751 if (edid)
1aad7ac0 4752 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4753
1aad7ac0
CW
4754 return has_audio;
4755}
4756
f684960e
CW
4757static int
4758intel_dp_set_property(struct drm_connector *connector,
4759 struct drm_property *property,
4760 uint64_t val)
4761{
e953fd7b 4762 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4763 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4764 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4765 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4766 int ret;
4767
662595df 4768 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4769 if (ret)
4770 return ret;
4771
3f43c48d 4772 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4773 int i = val;
4774 bool has_audio;
4775
4776 if (i == intel_dp->force_audio)
f684960e
CW
4777 return 0;
4778
1aad7ac0 4779 intel_dp->force_audio = i;
f684960e 4780
c3e5f67b 4781 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4782 has_audio = intel_dp_detect_audio(connector);
4783 else
c3e5f67b 4784 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4785
4786 if (has_audio == intel_dp->has_audio)
f684960e
CW
4787 return 0;
4788
1aad7ac0 4789 intel_dp->has_audio = has_audio;
f684960e
CW
4790 goto done;
4791 }
4792
e953fd7b 4793 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4794 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4795 bool old_range = intel_dp->limited_color_range;
ae4edb80 4796
55bc60db
VS
4797 switch (val) {
4798 case INTEL_BROADCAST_RGB_AUTO:
4799 intel_dp->color_range_auto = true;
4800 break;
4801 case INTEL_BROADCAST_RGB_FULL:
4802 intel_dp->color_range_auto = false;
0f2a2a75 4803 intel_dp->limited_color_range = false;
55bc60db
VS
4804 break;
4805 case INTEL_BROADCAST_RGB_LIMITED:
4806 intel_dp->color_range_auto = false;
0f2a2a75 4807 intel_dp->limited_color_range = true;
55bc60db
VS
4808 break;
4809 default:
4810 return -EINVAL;
4811 }
ae4edb80
DV
4812
4813 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4814 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4815 return 0;
4816
e953fd7b
CW
4817 goto done;
4818 }
4819
53b41837
YN
4820 if (is_edp(intel_dp) &&
4821 property == connector->dev->mode_config.scaling_mode_property) {
4822 if (val == DRM_MODE_SCALE_NONE) {
4823 DRM_DEBUG_KMS("no scaling not supported\n");
4824 return -EINVAL;
4825 }
4826
4827 if (intel_connector->panel.fitting_mode == val) {
4828 /* the eDP scaling property is not changed */
4829 return 0;
4830 }
4831 intel_connector->panel.fitting_mode = val;
4832
4833 goto done;
4834 }
4835
f684960e
CW
4836 return -EINVAL;
4837
4838done:
c0c36b94
CW
4839 if (intel_encoder->base.crtc)
4840 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4841
4842 return 0;
4843}
4844
a4fc5ed6 4845static void
73845adf 4846intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4847{
1d508706 4848 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4849
10e972d3 4850 kfree(intel_connector->detect_edid);
beb60608 4851
9cd300e0
JN
4852 if (!IS_ERR_OR_NULL(intel_connector->edid))
4853 kfree(intel_connector->edid);
4854
acd8db10
PZ
4855 /* Can't call is_edp() since the encoder may have been destroyed
4856 * already. */
4857 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4858 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4859
a4fc5ed6 4860 drm_connector_cleanup(connector);
55f78c43 4861 kfree(connector);
a4fc5ed6
KP
4862}
4863
00c09d70 4864void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4865{
da63a9f2
PZ
4866 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4867 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4868
a121f4e5 4869 intel_dp_aux_fini(intel_dp);
0e32b39c 4870 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4871 if (is_edp(intel_dp)) {
4872 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4873 /*
4874 * vdd might still be enabled do to the delayed vdd off.
4875 * Make sure vdd is actually turned off here.
4876 */
773538e8 4877 pps_lock(intel_dp);
4be73780 4878 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4879 pps_unlock(intel_dp);
4880
01527b31
CT
4881 if (intel_dp->edp_notifier.notifier_call) {
4882 unregister_reboot_notifier(&intel_dp->edp_notifier);
4883 intel_dp->edp_notifier.notifier_call = NULL;
4884 }
bd943159 4885 }
c8bd0e49 4886 drm_encoder_cleanup(encoder);
da63a9f2 4887 kfree(intel_dig_port);
24d05927
DV
4888}
4889
07f9cd0b
ID
4890static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4891{
4892 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4893
4894 if (!is_edp(intel_dp))
4895 return;
4896
951468f3
VS
4897 /*
4898 * vdd might still be enabled do to the delayed vdd off.
4899 * Make sure vdd is actually turned off here.
4900 */
afa4e53a 4901 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4902 pps_lock(intel_dp);
07f9cd0b 4903 edp_panel_vdd_off_sync(intel_dp);
773538e8 4904 pps_unlock(intel_dp);
07f9cd0b
ID
4905}
4906
49e6bc51
VS
4907static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4908{
4909 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4910 struct drm_device *dev = intel_dig_port->base.base.dev;
4911 struct drm_i915_private *dev_priv = dev->dev_private;
4912 enum intel_display_power_domain power_domain;
4913
4914 lockdep_assert_held(&dev_priv->pps_mutex);
4915
4916 if (!edp_have_panel_vdd(intel_dp))
4917 return;
4918
4919 /*
4920 * The VDD bit needs a power domain reference, so if the bit is
4921 * already enabled when we boot or resume, grab this reference and
4922 * schedule a vdd off, so we don't hold on to the reference
4923 * indefinitely.
4924 */
4925 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
25f78f58 4926 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
49e6bc51
VS
4927 intel_display_power_get(dev_priv, power_domain);
4928
4929 edp_panel_vdd_schedule_off(intel_dp);
4930}
4931
6d93c0c4
ID
4932static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4933{
49e6bc51
VS
4934 struct intel_dp *intel_dp;
4935
4936 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4937 return;
4938
4939 intel_dp = enc_to_intel_dp(encoder);
4940
4941 pps_lock(intel_dp);
4942
4943 /*
4944 * Read out the current power sequencer assignment,
4945 * in case the BIOS did something with it.
4946 */
4947 if (IS_VALLEYVIEW(encoder->dev))
4948 vlv_initial_power_sequencer_setup(intel_dp);
4949
4950 intel_edp_panel_vdd_sanitize(intel_dp);
4951
4952 pps_unlock(intel_dp);
6d93c0c4
ID
4953}
4954
a4fc5ed6 4955static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4956 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4957 .detect = intel_dp_detect,
beb60608 4958 .force = intel_dp_force,
a4fc5ed6 4959 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4960 .set_property = intel_dp_set_property,
2545e4a6 4961 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4962 .destroy = intel_dp_connector_destroy,
c6f95f27 4963 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4964 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4965};
4966
4967static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4968 .get_modes = intel_dp_get_modes,
4969 .mode_valid = intel_dp_mode_valid,
df0e9248 4970 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4971};
4972
a4fc5ed6 4973static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4974 .reset = intel_dp_encoder_reset,
24d05927 4975 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4976};
4977
b2c5c181 4978enum irqreturn
13cf5504
DA
4979intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4980{
4981 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4982 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4983 struct drm_device *dev = intel_dig_port->base.base.dev;
4984 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4985 enum intel_display_power_domain power_domain;
b2c5c181 4986 enum irqreturn ret = IRQ_NONE;
1c767b33 4987
0e32b39c
DA
4988 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4989 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4990
7a7f84cc
VS
4991 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4992 /*
4993 * vdd off can generate a long pulse on eDP which
4994 * would require vdd on to handle it, and thus we
4995 * would end up in an endless cycle of
4996 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4997 */
4998 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4999 port_name(intel_dig_port->port));
a8b3d52f 5000 return IRQ_HANDLED;
7a7f84cc
VS
5001 }
5002
26fbb774
VS
5003 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5004 port_name(intel_dig_port->port),
0e32b39c 5005 long_hpd ? "long" : "short");
13cf5504 5006
25f78f58 5007 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1c767b33
ID
5008 intel_display_power_get(dev_priv, power_domain);
5009
0e32b39c 5010 if (long_hpd) {
5fa836a9
MK
5011 /* indicate that we need to restart link training */
5012 intel_dp->train_set_valid = false;
2a592bec 5013
7e66bcf2
JN
5014 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5015 goto mst_fail;
0e32b39c
DA
5016
5017 if (!intel_dp_get_dpcd(intel_dp)) {
5018 goto mst_fail;
5019 }
5020
5021 intel_dp_probe_oui(intel_dp);
5022
d14e7b6d
VS
5023 if (!intel_dp_probe_mst(intel_dp)) {
5024 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5025 intel_dp_check_link_status(intel_dp);
5026 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5027 goto mst_fail;
d14e7b6d 5028 }
0e32b39c
DA
5029 } else {
5030 if (intel_dp->is_mst) {
1c767b33 5031 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5032 goto mst_fail;
5033 }
5034
5035 if (!intel_dp->is_mst) {
5b215bcf 5036 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5037 intel_dp_check_link_status(intel_dp);
5b215bcf 5038 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5039 }
5040 }
b2c5c181
DV
5041
5042 ret = IRQ_HANDLED;
5043
1c767b33 5044 goto put_power;
0e32b39c
DA
5045mst_fail:
5046 /* if we were in MST mode, and device is not there get out of MST mode */
5047 if (intel_dp->is_mst) {
5048 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5049 intel_dp->is_mst = false;
5050 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5051 }
1c767b33
ID
5052put_power:
5053 intel_display_power_put(dev_priv, power_domain);
5054
5055 return ret;
13cf5504
DA
5056}
5057
477ec328 5058/* check the VBT to see whether the eDP is on another port */
5d8a7752 5059bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5060{
5061 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5062 union child_device_config *p_child;
36e83a18 5063 int i;
5d8a7752 5064 static const short port_mapping[] = {
477ec328
RV
5065 [PORT_B] = DVO_PORT_DPB,
5066 [PORT_C] = DVO_PORT_DPC,
5067 [PORT_D] = DVO_PORT_DPD,
5068 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5069 };
36e83a18 5070
53ce81a7
VS
5071 /*
5072 * eDP not supported on g4x. so bail out early just
5073 * for a bit extra safety in case the VBT is bonkers.
5074 */
5075 if (INTEL_INFO(dev)->gen < 5)
5076 return false;
5077
3b32a35b
VS
5078 if (port == PORT_A)
5079 return true;
5080
41aa3448 5081 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5082 return false;
5083
41aa3448
RV
5084 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5085 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5086
5d8a7752 5087 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5088 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5089 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5090 return true;
5091 }
5092 return false;
5093}
5094
0e32b39c 5095void
f684960e
CW
5096intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5097{
53b41837
YN
5098 struct intel_connector *intel_connector = to_intel_connector(connector);
5099
3f43c48d 5100 intel_attach_force_audio_property(connector);
e953fd7b 5101 intel_attach_broadcast_rgb_property(connector);
55bc60db 5102 intel_dp->color_range_auto = true;
53b41837
YN
5103
5104 if (is_edp(intel_dp)) {
5105 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5106 drm_object_attach_property(
5107 &connector->base,
53b41837 5108 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5109 DRM_MODE_SCALE_ASPECT);
5110 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5111 }
f684960e
CW
5112}
5113
dada1a9f
ID
5114static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5115{
5116 intel_dp->last_power_cycle = jiffies;
5117 intel_dp->last_power_on = jiffies;
5118 intel_dp->last_backlight_off = jiffies;
5119}
5120
67a54566
DV
5121static void
5122intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5123 struct intel_dp *intel_dp)
67a54566
DV
5124{
5125 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5126 struct edp_power_seq cur, vbt, spec,
5127 *final = &intel_dp->pps_delays;
b0a08bec 5128 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
f0f59a00 5129 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5130
e39b999a
VS
5131 lockdep_assert_held(&dev_priv->pps_mutex);
5132
81ddbc69
VS
5133 /* already initialized? */
5134 if (final->t11_t12 != 0)
5135 return;
5136
b0a08bec
VK
5137 if (IS_BROXTON(dev)) {
5138 /*
5139 * TODO: BXT has 2 sets of PPS registers.
5140 * Correct Register for Broxton need to be identified
5141 * using VBT. hardcoding for now
5142 */
5143 pp_ctrl_reg = BXT_PP_CONTROL(0);
5144 pp_on_reg = BXT_PP_ON_DELAYS(0);
5145 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5146 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5147 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5148 pp_on_reg = PCH_PP_ON_DELAYS;
5149 pp_off_reg = PCH_PP_OFF_DELAYS;
5150 pp_div_reg = PCH_PP_DIVISOR;
5151 } else {
bf13e81b
JN
5152 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5153
5154 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5155 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5156 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5157 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5158 }
67a54566
DV
5159
5160 /* Workaround: Need to write PP_CONTROL with the unlock key as
5161 * the very first thing. */
b0a08bec 5162 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5163
453c5420
JB
5164 pp_on = I915_READ(pp_on_reg);
5165 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5166 if (!IS_BROXTON(dev)) {
5167 I915_WRITE(pp_ctrl_reg, pp_ctl);
5168 pp_div = I915_READ(pp_div_reg);
5169 }
67a54566
DV
5170
5171 /* Pull timing values out of registers */
5172 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5173 PANEL_POWER_UP_DELAY_SHIFT;
5174
5175 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5176 PANEL_LIGHT_ON_DELAY_SHIFT;
5177
5178 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5179 PANEL_LIGHT_OFF_DELAY_SHIFT;
5180
5181 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5182 PANEL_POWER_DOWN_DELAY_SHIFT;
5183
b0a08bec
VK
5184 if (IS_BROXTON(dev)) {
5185 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5186 BXT_POWER_CYCLE_DELAY_SHIFT;
5187 if (tmp > 0)
5188 cur.t11_t12 = (tmp - 1) * 1000;
5189 else
5190 cur.t11_t12 = 0;
5191 } else {
5192 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5193 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5194 }
67a54566
DV
5195
5196 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5197 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5198
41aa3448 5199 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5200
5201 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5202 * our hw here, which are all in 100usec. */
5203 spec.t1_t3 = 210 * 10;
5204 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5205 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5206 spec.t10 = 500 * 10;
5207 /* This one is special and actually in units of 100ms, but zero
5208 * based in the hw (so we need to add 100 ms). But the sw vbt
5209 * table multiplies it with 1000 to make it in units of 100usec,
5210 * too. */
5211 spec.t11_t12 = (510 + 100) * 10;
5212
5213 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5214 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5215
5216 /* Use the max of the register settings and vbt. If both are
5217 * unset, fall back to the spec limits. */
36b5f425 5218#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5219 spec.field : \
5220 max(cur.field, vbt.field))
5221 assign_final(t1_t3);
5222 assign_final(t8);
5223 assign_final(t9);
5224 assign_final(t10);
5225 assign_final(t11_t12);
5226#undef assign_final
5227
36b5f425 5228#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5229 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5230 intel_dp->backlight_on_delay = get_delay(t8);
5231 intel_dp->backlight_off_delay = get_delay(t9);
5232 intel_dp->panel_power_down_delay = get_delay(t10);
5233 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5234#undef get_delay
5235
f30d26e4
JN
5236 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5237 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5238 intel_dp->panel_power_cycle_delay);
5239
5240 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5241 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5242}
5243
5244static void
5245intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5246 struct intel_dp *intel_dp)
f30d26e4
JN
5247{
5248 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5249 u32 pp_on, pp_off, pp_div, port_sel = 0;
5250 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
f0f59a00 5251 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
ad933b56 5252 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5253 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5254
e39b999a 5255 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5256
b0a08bec
VK
5257 if (IS_BROXTON(dev)) {
5258 /*
5259 * TODO: BXT has 2 sets of PPS registers.
5260 * Correct Register for Broxton need to be identified
5261 * using VBT. hardcoding for now
5262 */
5263 pp_ctrl_reg = BXT_PP_CONTROL(0);
5264 pp_on_reg = BXT_PP_ON_DELAYS(0);
5265 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5266
5267 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5268 pp_on_reg = PCH_PP_ON_DELAYS;
5269 pp_off_reg = PCH_PP_OFF_DELAYS;
5270 pp_div_reg = PCH_PP_DIVISOR;
5271 } else {
bf13e81b
JN
5272 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5273
5274 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5275 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5276 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5277 }
5278
b2f19d1a
PZ
5279 /*
5280 * And finally store the new values in the power sequencer. The
5281 * backlight delays are set to 1 because we do manual waits on them. For
5282 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5283 * we'll end up waiting for the backlight off delay twice: once when we
5284 * do the manual sleep, and once when we disable the panel and wait for
5285 * the PP_STATUS bit to become zero.
5286 */
f30d26e4 5287 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5288 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5289 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5290 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5291 /* Compute the divisor for the pp clock, simply match the Bspec
5292 * formula. */
b0a08bec
VK
5293 if (IS_BROXTON(dev)) {
5294 pp_div = I915_READ(pp_ctrl_reg);
5295 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5296 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5297 << BXT_POWER_CYCLE_DELAY_SHIFT);
5298 } else {
5299 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5300 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5301 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5302 }
67a54566
DV
5303
5304 /* Haswell doesn't have any port selection bits for the panel
5305 * power sequencer any more. */
bc7d38a4 5306 if (IS_VALLEYVIEW(dev)) {
ad933b56 5307 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5308 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5309 if (port == PORT_A)
a24c144c 5310 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5311 else
a24c144c 5312 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5313 }
5314
453c5420
JB
5315 pp_on |= port_sel;
5316
5317 I915_WRITE(pp_on_reg, pp_on);
5318 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5319 if (IS_BROXTON(dev))
5320 I915_WRITE(pp_ctrl_reg, pp_div);
5321 else
5322 I915_WRITE(pp_div_reg, pp_div);
67a54566 5323
67a54566 5324 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5325 I915_READ(pp_on_reg),
5326 I915_READ(pp_off_reg),
b0a08bec
VK
5327 IS_BROXTON(dev) ?
5328 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5329 I915_READ(pp_div_reg));
f684960e
CW
5330}
5331
b33a2815
VK
5332/**
5333 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5334 * @dev: DRM device
5335 * @refresh_rate: RR to be programmed
5336 *
5337 * This function gets called when refresh rate (RR) has to be changed from
5338 * one frequency to another. Switches can be between high and low RR
5339 * supported by the panel or to any other RR based on media playback (in
5340 * this case, RR value needs to be passed from user space).
5341 *
5342 * The caller of this function needs to take a lock on dev_priv->drrs.
5343 */
96178eeb 5344static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5345{
5346 struct drm_i915_private *dev_priv = dev->dev_private;
5347 struct intel_encoder *encoder;
96178eeb
VK
5348 struct intel_digital_port *dig_port = NULL;
5349 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5350 struct intel_crtc_state *config = NULL;
439d7ac0 5351 struct intel_crtc *intel_crtc = NULL;
96178eeb 5352 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5353
5354 if (refresh_rate <= 0) {
5355 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5356 return;
5357 }
5358
96178eeb
VK
5359 if (intel_dp == NULL) {
5360 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5361 return;
5362 }
5363
1fcc9d1c 5364 /*
e4d59f6b
RV
5365 * FIXME: This needs proper synchronization with psr state for some
5366 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5367 */
439d7ac0 5368
96178eeb
VK
5369 dig_port = dp_to_dig_port(intel_dp);
5370 encoder = &dig_port->base;
723f9aab 5371 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5372
5373 if (!intel_crtc) {
5374 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5375 return;
5376 }
5377
6e3c9717 5378 config = intel_crtc->config;
439d7ac0 5379
96178eeb 5380 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5381 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5382 return;
5383 }
5384
96178eeb
VK
5385 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5386 refresh_rate)
439d7ac0
PB
5387 index = DRRS_LOW_RR;
5388
96178eeb 5389 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5390 DRM_DEBUG_KMS(
5391 "DRRS requested for previously set RR...ignoring\n");
5392 return;
5393 }
5394
5395 if (!intel_crtc->active) {
5396 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5397 return;
5398 }
5399
44395bfe 5400 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5401 switch (index) {
5402 case DRRS_HIGH_RR:
5403 intel_dp_set_m_n(intel_crtc, M1_N1);
5404 break;
5405 case DRRS_LOW_RR:
5406 intel_dp_set_m_n(intel_crtc, M2_N2);
5407 break;
5408 case DRRS_MAX_RR:
5409 default:
5410 DRM_ERROR("Unsupported refreshrate type\n");
5411 }
5412 } else if (INTEL_INFO(dev)->gen > 6) {
f0f59a00 5413 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
649636ef 5414 u32 val;
a4c30b1d 5415
649636ef 5416 val = I915_READ(reg);
439d7ac0 5417 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5418 if (IS_VALLEYVIEW(dev))
5419 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5420 else
5421 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5422 } else {
6fa7aec1
VK
5423 if (IS_VALLEYVIEW(dev))
5424 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5425 else
5426 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5427 }
5428 I915_WRITE(reg, val);
5429 }
5430
4e9ac947
VK
5431 dev_priv->drrs.refresh_rate_type = index;
5432
5433 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5434}
5435
b33a2815
VK
5436/**
5437 * intel_edp_drrs_enable - init drrs struct if supported
5438 * @intel_dp: DP struct
5439 *
5440 * Initializes frontbuffer_bits and drrs.dp
5441 */
c395578e
VK
5442void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5443{
5444 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5445 struct drm_i915_private *dev_priv = dev->dev_private;
5446 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5447 struct drm_crtc *crtc = dig_port->base.base.crtc;
5448 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5449
5450 if (!intel_crtc->config->has_drrs) {
5451 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5452 return;
5453 }
5454
5455 mutex_lock(&dev_priv->drrs.mutex);
5456 if (WARN_ON(dev_priv->drrs.dp)) {
5457 DRM_ERROR("DRRS already enabled\n");
5458 goto unlock;
5459 }
5460
5461 dev_priv->drrs.busy_frontbuffer_bits = 0;
5462
5463 dev_priv->drrs.dp = intel_dp;
5464
5465unlock:
5466 mutex_unlock(&dev_priv->drrs.mutex);
5467}
5468
b33a2815
VK
5469/**
5470 * intel_edp_drrs_disable - Disable DRRS
5471 * @intel_dp: DP struct
5472 *
5473 */
c395578e
VK
5474void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5475{
5476 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5477 struct drm_i915_private *dev_priv = dev->dev_private;
5478 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5479 struct drm_crtc *crtc = dig_port->base.base.crtc;
5480 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5481
5482 if (!intel_crtc->config->has_drrs)
5483 return;
5484
5485 mutex_lock(&dev_priv->drrs.mutex);
5486 if (!dev_priv->drrs.dp) {
5487 mutex_unlock(&dev_priv->drrs.mutex);
5488 return;
5489 }
5490
5491 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5492 intel_dp_set_drrs_state(dev_priv->dev,
5493 intel_dp->attached_connector->panel.
5494 fixed_mode->vrefresh);
5495
5496 dev_priv->drrs.dp = NULL;
5497 mutex_unlock(&dev_priv->drrs.mutex);
5498
5499 cancel_delayed_work_sync(&dev_priv->drrs.work);
5500}
5501
4e9ac947
VK
5502static void intel_edp_drrs_downclock_work(struct work_struct *work)
5503{
5504 struct drm_i915_private *dev_priv =
5505 container_of(work, typeof(*dev_priv), drrs.work.work);
5506 struct intel_dp *intel_dp;
5507
5508 mutex_lock(&dev_priv->drrs.mutex);
5509
5510 intel_dp = dev_priv->drrs.dp;
5511
5512 if (!intel_dp)
5513 goto unlock;
5514
439d7ac0 5515 /*
4e9ac947
VK
5516 * The delayed work can race with an invalidate hence we need to
5517 * recheck.
439d7ac0
PB
5518 */
5519
4e9ac947
VK
5520 if (dev_priv->drrs.busy_frontbuffer_bits)
5521 goto unlock;
439d7ac0 5522
4e9ac947
VK
5523 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5524 intel_dp_set_drrs_state(dev_priv->dev,
5525 intel_dp->attached_connector->panel.
5526 downclock_mode->vrefresh);
439d7ac0 5527
4e9ac947 5528unlock:
4e9ac947 5529 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5530}
5531
b33a2815 5532/**
0ddfd203 5533 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5534 * @dev: DRM device
5535 * @frontbuffer_bits: frontbuffer plane tracking bits
5536 *
0ddfd203
R
5537 * This function gets called everytime rendering on the given planes start.
5538 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5539 *
5540 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5541 */
a93fad0f
VK
5542void intel_edp_drrs_invalidate(struct drm_device *dev,
5543 unsigned frontbuffer_bits)
5544{
5545 struct drm_i915_private *dev_priv = dev->dev_private;
5546 struct drm_crtc *crtc;
5547 enum pipe pipe;
5548
9da7d693 5549 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5550 return;
5551
88f933a8 5552 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5553
a93fad0f 5554 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5555 if (!dev_priv->drrs.dp) {
5556 mutex_unlock(&dev_priv->drrs.mutex);
5557 return;
5558 }
5559
a93fad0f
VK
5560 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5561 pipe = to_intel_crtc(crtc)->pipe;
5562
c1d038c6
DV
5563 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5564 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5565
0ddfd203 5566 /* invalidate means busy screen hence upclock */
c1d038c6 5567 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5568 intel_dp_set_drrs_state(dev_priv->dev,
5569 dev_priv->drrs.dp->attached_connector->panel.
5570 fixed_mode->vrefresh);
a93fad0f 5571
a93fad0f
VK
5572 mutex_unlock(&dev_priv->drrs.mutex);
5573}
5574
b33a2815 5575/**
0ddfd203 5576 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5577 * @dev: DRM device
5578 * @frontbuffer_bits: frontbuffer plane tracking bits
5579 *
0ddfd203
R
5580 * This function gets called every time rendering on the given planes has
5581 * completed or flip on a crtc is completed. So DRRS should be upclocked
5582 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5583 * if no other planes are dirty.
b33a2815
VK
5584 *
5585 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5586 */
a93fad0f
VK
5587void intel_edp_drrs_flush(struct drm_device *dev,
5588 unsigned frontbuffer_bits)
5589{
5590 struct drm_i915_private *dev_priv = dev->dev_private;
5591 struct drm_crtc *crtc;
5592 enum pipe pipe;
5593
9da7d693 5594 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5595 return;
5596
88f933a8 5597 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5598
a93fad0f 5599 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5600 if (!dev_priv->drrs.dp) {
5601 mutex_unlock(&dev_priv->drrs.mutex);
5602 return;
5603 }
5604
a93fad0f
VK
5605 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5606 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5607
5608 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5609 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5610
0ddfd203 5611 /* flush means busy screen hence upclock */
c1d038c6 5612 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5613 intel_dp_set_drrs_state(dev_priv->dev,
5614 dev_priv->drrs.dp->attached_connector->panel.
5615 fixed_mode->vrefresh);
5616
5617 /*
5618 * flush also means no more activity hence schedule downclock, if all
5619 * other fbs are quiescent too
5620 */
5621 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5622 schedule_delayed_work(&dev_priv->drrs.work,
5623 msecs_to_jiffies(1000));
5624 mutex_unlock(&dev_priv->drrs.mutex);
5625}
5626
b33a2815
VK
5627/**
5628 * DOC: Display Refresh Rate Switching (DRRS)
5629 *
5630 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5631 * which enables swtching between low and high refresh rates,
5632 * dynamically, based on the usage scenario. This feature is applicable
5633 * for internal panels.
5634 *
5635 * Indication that the panel supports DRRS is given by the panel EDID, which
5636 * would list multiple refresh rates for one resolution.
5637 *
5638 * DRRS is of 2 types - static and seamless.
5639 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5640 * (may appear as a blink on screen) and is used in dock-undock scenario.
5641 * Seamless DRRS involves changing RR without any visual effect to the user
5642 * and can be used during normal system usage. This is done by programming
5643 * certain registers.
5644 *
5645 * Support for static/seamless DRRS may be indicated in the VBT based on
5646 * inputs from the panel spec.
5647 *
5648 * DRRS saves power by switching to low RR based on usage scenarios.
5649 *
5650 * eDP DRRS:-
5651 * The implementation is based on frontbuffer tracking implementation.
5652 * When there is a disturbance on the screen triggered by user activity or a
5653 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5654 * When there is no movement on screen, after a timeout of 1 second, a switch
5655 * to low RR is made.
5656 * For integration with frontbuffer tracking code,
5657 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5658 *
5659 * DRRS can be further extended to support other internal panels and also
5660 * the scenario of video playback wherein RR is set based on the rate
5661 * requested by userspace.
5662 */
5663
5664/**
5665 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5666 * @intel_connector: eDP connector
5667 * @fixed_mode: preferred mode of panel
5668 *
5669 * This function is called only once at driver load to initialize basic
5670 * DRRS stuff.
5671 *
5672 * Returns:
5673 * Downclock mode if panel supports it, else return NULL.
5674 * DRRS support is determined by the presence of downclock mode (apart
5675 * from VBT setting).
5676 */
4f9db5b5 5677static struct drm_display_mode *
96178eeb
VK
5678intel_dp_drrs_init(struct intel_connector *intel_connector,
5679 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5680{
5681 struct drm_connector *connector = &intel_connector->base;
96178eeb 5682 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5683 struct drm_i915_private *dev_priv = dev->dev_private;
5684 struct drm_display_mode *downclock_mode = NULL;
5685
9da7d693
DV
5686 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5687 mutex_init(&dev_priv->drrs.mutex);
5688
4f9db5b5
PB
5689 if (INTEL_INFO(dev)->gen <= 6) {
5690 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5691 return NULL;
5692 }
5693
5694 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5695 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5696 return NULL;
5697 }
5698
5699 downclock_mode = intel_find_panel_downclock
5700 (dev, fixed_mode, connector);
5701
5702 if (!downclock_mode) {
a1d26342 5703 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5704 return NULL;
5705 }
5706
96178eeb 5707 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5708
96178eeb 5709 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5710 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5711 return downclock_mode;
5712}
5713
ed92f0b2 5714static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5715 struct intel_connector *intel_connector)
ed92f0b2
PZ
5716{
5717 struct drm_connector *connector = &intel_connector->base;
5718 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5719 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5720 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5721 struct drm_i915_private *dev_priv = dev->dev_private;
5722 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5723 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5724 bool has_dpcd;
5725 struct drm_display_mode *scan;
5726 struct edid *edid;
6517d273 5727 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5728
5729 if (!is_edp(intel_dp))
5730 return true;
5731
49e6bc51
VS
5732 pps_lock(intel_dp);
5733 intel_edp_panel_vdd_sanitize(intel_dp);
5734 pps_unlock(intel_dp);
63635217 5735
ed92f0b2 5736 /* Cache DPCD and EDID for edp. */
ed92f0b2 5737 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5738
5739 if (has_dpcd) {
5740 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5741 dev_priv->no_aux_handshake =
5742 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5743 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5744 } else {
5745 /* if this fails, presume the device is a ghost */
5746 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5747 return false;
5748 }
5749
5750 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5751 pps_lock(intel_dp);
36b5f425 5752 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5753 pps_unlock(intel_dp);
ed92f0b2 5754
060c8778 5755 mutex_lock(&dev->mode_config.mutex);
0b99836f 5756 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5757 if (edid) {
5758 if (drm_add_edid_modes(connector, edid)) {
5759 drm_mode_connector_update_edid_property(connector,
5760 edid);
5761 drm_edid_to_eld(connector, edid);
5762 } else {
5763 kfree(edid);
5764 edid = ERR_PTR(-EINVAL);
5765 }
5766 } else {
5767 edid = ERR_PTR(-ENOENT);
5768 }
5769 intel_connector->edid = edid;
5770
5771 /* prefer fixed mode from EDID if available */
5772 list_for_each_entry(scan, &connector->probed_modes, head) {
5773 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5774 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5775 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5776 intel_connector, fixed_mode);
ed92f0b2
PZ
5777 break;
5778 }
5779 }
5780
5781 /* fallback to VBT if available for eDP */
5782 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5783 fixed_mode = drm_mode_duplicate(dev,
5784 dev_priv->vbt.lfp_lvds_vbt_mode);
5785 if (fixed_mode)
5786 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5787 }
060c8778 5788 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5789
01527b31
CT
5790 if (IS_VALLEYVIEW(dev)) {
5791 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5792 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5793
5794 /*
5795 * Figure out the current pipe for the initial backlight setup.
5796 * If the current pipe isn't valid, try the PPS pipe, and if that
5797 * fails just assume pipe A.
5798 */
5799 if (IS_CHERRYVIEW(dev))
5800 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5801 else
5802 pipe = PORT_TO_PIPE(intel_dp->DP);
5803
5804 if (pipe != PIPE_A && pipe != PIPE_B)
5805 pipe = intel_dp->pps_pipe;
5806
5807 if (pipe != PIPE_A && pipe != PIPE_B)
5808 pipe = PIPE_A;
5809
5810 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5811 pipe_name(pipe));
01527b31
CT
5812 }
5813
4f9db5b5 5814 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5815 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5816 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5817
5818 return true;
5819}
5820
16c25533 5821bool
f0fec3f2
PZ
5822intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5823 struct intel_connector *intel_connector)
a4fc5ed6 5824{
f0fec3f2
PZ
5825 struct drm_connector *connector = &intel_connector->base;
5826 struct intel_dp *intel_dp = &intel_dig_port->dp;
5827 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5828 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5829 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5830 enum port port = intel_dig_port->port;
a121f4e5 5831 int type, ret;
a4fc5ed6 5832
a4a5d2f8
VS
5833 intel_dp->pps_pipe = INVALID_PIPE;
5834
ec5b01dd 5835 /* intel_dp vfuncs */
b6b5e383
DL
5836 if (INTEL_INFO(dev)->gen >= 9)
5837 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5838 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5839 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5840 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5841 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5842 else if (HAS_PCH_SPLIT(dev))
5843 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5844 else
5845 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5846
b9ca5fad
DL
5847 if (INTEL_INFO(dev)->gen >= 9)
5848 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5849 else
5850 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5851
ad64217b
ACO
5852 if (HAS_DDI(dev))
5853 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5854
0767935e
DV
5855 /* Preserve the current hw state. */
5856 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5857 intel_dp->attached_connector = intel_connector;
3d3dc149 5858
3b32a35b 5859 if (intel_dp_is_edp(dev, port))
b329530c 5860 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5861 else
5862 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5863
f7d24902
ID
5864 /*
5865 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5866 * for DP the encoder type can be set by the caller to
5867 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5868 */
5869 if (type == DRM_MODE_CONNECTOR_eDP)
5870 intel_encoder->type = INTEL_OUTPUT_EDP;
5871
c17ed5b5
VS
5872 /* eDP only on port B and/or C on vlv/chv */
5873 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5874 port != PORT_B && port != PORT_C))
5875 return false;
5876
e7281eab
ID
5877 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5878 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5879 port_name(port));
5880
b329530c 5881 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5882 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5883
a4fc5ed6
KP
5884 connector->interlace_allowed = true;
5885 connector->doublescan_allowed = 0;
5886
f0fec3f2 5887 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5888 edp_panel_vdd_work);
a4fc5ed6 5889
df0e9248 5890 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5891 drm_connector_register(connector);
a4fc5ed6 5892
affa9354 5893 if (HAS_DDI(dev))
bcbc889b
PZ
5894 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5895 else
5896 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5897 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5898
0b99836f 5899 /* Set up the hotplug pin. */
ab9d7c30
PZ
5900 switch (port) {
5901 case PORT_A:
1d843f9d 5902 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5903 break;
5904 case PORT_B:
1d843f9d 5905 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5906 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5907 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5908 break;
5909 case PORT_C:
1d843f9d 5910 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5911 break;
5912 case PORT_D:
1d843f9d 5913 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5914 break;
26951caf
XZ
5915 case PORT_E:
5916 intel_encoder->hpd_pin = HPD_PORT_E;
5917 break;
ab9d7c30 5918 default:
ad1c0b19 5919 BUG();
5eb08b69
ZW
5920 }
5921
dada1a9f 5922 if (is_edp(intel_dp)) {
773538e8 5923 pps_lock(intel_dp);
1e74a324
VS
5924 intel_dp_init_panel_power_timestamps(intel_dp);
5925 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5926 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5927 else
36b5f425 5928 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5929 pps_unlock(intel_dp);
dada1a9f 5930 }
0095e6dc 5931
a121f4e5
VS
5932 ret = intel_dp_aux_init(intel_dp, intel_connector);
5933 if (ret)
5934 goto fail;
c1f05264 5935
0e32b39c 5936 /* init MST on ports that can support it */
0c9b3715
JN
5937 if (HAS_DP_MST(dev) &&
5938 (port == PORT_B || port == PORT_C || port == PORT_D))
5939 intel_dp_mst_encoder_init(intel_dig_port,
5940 intel_connector->base.base.id);
0e32b39c 5941
36b5f425 5942 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
5943 intel_dp_aux_fini(intel_dp);
5944 intel_dp_mst_encoder_cleanup(intel_dig_port);
5945 goto fail;
b2f246a8 5946 }
32f9d658 5947
f684960e
CW
5948 intel_dp_add_properties(intel_dp, connector);
5949
a4fc5ed6
KP
5950 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5951 * 0xd. Failure to do so will result in spurious interrupts being
5952 * generated on the port when a cable is not attached.
5953 */
5954 if (IS_G4X(dev) && !IS_GM45(dev)) {
5955 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5956 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5957 }
16c25533 5958
aa7471d2
JN
5959 i915_debugfs_connector_add(connector);
5960
16c25533 5961 return true;
a121f4e5
VS
5962
5963fail:
5964 if (is_edp(intel_dp)) {
5965 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5966 /*
5967 * vdd might still be enabled do to the delayed vdd off.
5968 * Make sure vdd is actually turned off here.
5969 */
5970 pps_lock(intel_dp);
5971 edp_panel_vdd_off_sync(intel_dp);
5972 pps_unlock(intel_dp);
5973 }
5974 drm_connector_unregister(connector);
5975 drm_connector_cleanup(connector);
5976
5977 return false;
a4fc5ed6 5978}
f0fec3f2
PZ
5979
5980void
f0f59a00
VS
5981intel_dp_init(struct drm_device *dev,
5982 i915_reg_t output_reg, enum port port)
f0fec3f2 5983{
13cf5504 5984 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5985 struct intel_digital_port *intel_dig_port;
5986 struct intel_encoder *intel_encoder;
5987 struct drm_encoder *encoder;
5988 struct intel_connector *intel_connector;
5989
b14c5679 5990 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5991 if (!intel_dig_port)
5992 return;
5993
08d9bc92 5994 intel_connector = intel_connector_alloc();
11aee0f6
SM
5995 if (!intel_connector)
5996 goto err_connector_alloc;
f0fec3f2
PZ
5997
5998 intel_encoder = &intel_dig_port->base;
5999 encoder = &intel_encoder->base;
6000
6001 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6002 DRM_MODE_ENCODER_TMDS);
6003
5bfe2ac0 6004 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6005 intel_encoder->disable = intel_disable_dp;
00c09d70 6006 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6007 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6008 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6009 if (IS_CHERRYVIEW(dev)) {
9197c88b 6010 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6011 intel_encoder->pre_enable = chv_pre_enable_dp;
6012 intel_encoder->enable = vlv_enable_dp;
580d3811 6013 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6014 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6015 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6016 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6017 intel_encoder->pre_enable = vlv_pre_enable_dp;
6018 intel_encoder->enable = vlv_enable_dp;
49277c31 6019 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6020 } else {
ecff4f3b
JN
6021 intel_encoder->pre_enable = g4x_pre_enable_dp;
6022 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6023 if (INTEL_INFO(dev)->gen >= 5)
6024 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6025 }
f0fec3f2 6026
174edf1f 6027 intel_dig_port->port = port;
f0fec3f2
PZ
6028 intel_dig_port->dp.output_reg = output_reg;
6029
00c09d70 6030 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6031 if (IS_CHERRYVIEW(dev)) {
6032 if (port == PORT_D)
6033 intel_encoder->crtc_mask = 1 << 2;
6034 else
6035 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6036 } else {
6037 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6038 }
bc079e8b 6039 intel_encoder->cloneable = 0;
f0fec3f2 6040
13cf5504 6041 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6042 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6043
11aee0f6
SM
6044 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6045 goto err_init_connector;
6046
6047 return;
6048
6049err_init_connector:
6050 drm_encoder_cleanup(encoder);
6051 kfree(intel_connector);
6052err_connector_alloc:
6053 kfree(intel_dig_port);
6054
6055 return;
f0fec3f2 6056}
0e32b39c
DA
6057
6058void intel_dp_mst_suspend(struct drm_device *dev)
6059{
6060 struct drm_i915_private *dev_priv = dev->dev_private;
6061 int i;
6062
6063 /* disable MST */
6064 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6065 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6066 if (!intel_dig_port)
6067 continue;
6068
6069 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6070 if (!intel_dig_port->dp.can_mst)
6071 continue;
6072 if (intel_dig_port->dp.is_mst)
6073 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6074 }
6075 }
6076}
6077
6078void intel_dp_mst_resume(struct drm_device *dev)
6079{
6080 struct drm_i915_private *dev_priv = dev->dev_private;
6081 int i;
6082
6083 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6084 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6085 if (!intel_dig_port)
6086 continue;
6087 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6088 int ret;
6089
6090 if (!intel_dig_port->dp.can_mst)
6091 continue;
6092
6093 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6094 if (ret != 0) {
6095 intel_dp_check_mst_status(&intel_dig_port->dp);
6096 }
6097 }
6098 }
6099}