Merge remote-tracking branches 'regulator/topic/tps65218', 'regulator/topic/tps6524x...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
760285e7
DH
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
a4fc5ed6 35#include "intel_drv.h"
760285e7 36#include <drm/i915_drm.h>
a4fc5ed6 37#include "i915_drv.h"
a4fc5ed6 38
a4fc5ed6
KP
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
9dd4ffdf
CML
41struct dp_link_dpll {
42 int link_bw;
43 struct dpll dpll;
44};
45
46static const struct dp_link_dpll gen4_dpll[] = {
47 { DP_LINK_BW_1_62,
48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49 { DP_LINK_BW_2_7,
50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51};
52
53static const struct dp_link_dpll pch_dpll[] = {
54 { DP_LINK_BW_1_62,
55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56 { DP_LINK_BW_2_7,
57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58};
59
65ce4bf5
CML
60static const struct dp_link_dpll vlv_dpll[] = {
61 { DP_LINK_BW_1_62,
58f6e632 62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
63 { DP_LINK_BW_2_7,
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65};
66
cfcb0fc9
JB
67/**
68 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
69 * @intel_dp: DP struct
70 *
71 * If a CPU or PCH DP output is attached to an eDP panel, this function
72 * will return true, and false otherwise.
73 */
74static bool is_edp(struct intel_dp *intel_dp)
75{
da63a9f2
PZ
76 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
77
78 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
79}
80
68b4d824 81static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 82{
68b4d824
ID
83 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
84
85 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
86}
87
df0e9248
CW
88static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
89{
fa90ecef 90 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
91}
92
ea5b213a 93static void intel_dp_link_down(struct intel_dp *intel_dp);
a4fc5ed6 94
a4fc5ed6 95static int
ea5b213a 96intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 97{
7183dc29 98 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
99
100 switch (max_link_bw) {
101 case DP_LINK_BW_1_62:
102 case DP_LINK_BW_2_7:
103 break;
d4eead50
ID
104 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
105 max_link_bw = DP_LINK_BW_2_7;
106 break;
a4fc5ed6 107 default:
d4eead50
ID
108 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
109 max_link_bw);
a4fc5ed6
KP
110 max_link_bw = DP_LINK_BW_1_62;
111 break;
112 }
113 return max_link_bw;
114}
115
cd9dde44
AJ
116/*
117 * The units on the numbers in the next two are... bizarre. Examples will
118 * make it clearer; this one parallels an example in the eDP spec.
119 *
120 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
121 *
122 * 270000 * 1 * 8 / 10 == 216000
123 *
124 * The actual data capacity of that configuration is 2.16Gbit/s, so the
125 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
126 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
127 * 119000. At 18bpp that's 2142000 kilobits per second.
128 *
129 * Thus the strange-looking division by 10 in intel_dp_link_required, to
130 * get the result in decakilobits instead of kilobits.
131 */
132
a4fc5ed6 133static int
c898261c 134intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 135{
cd9dde44 136 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
137}
138
fe27d53e
DA
139static int
140intel_dp_max_data_rate(int max_link_clock, int max_lanes)
141{
142 return (max_link_clock * max_lanes * 8) / 10;
143}
144
c19de8eb 145static enum drm_mode_status
a4fc5ed6
KP
146intel_dp_mode_valid(struct drm_connector *connector,
147 struct drm_display_mode *mode)
148{
df0e9248 149 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
150 struct intel_connector *intel_connector = to_intel_connector(connector);
151 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
152 int target_clock = mode->clock;
153 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 154
dd06f90e
JN
155 if (is_edp(intel_dp) && fixed_mode) {
156 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
157 return MODE_PANEL;
158
dd06f90e 159 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 160 return MODE_PANEL;
03afc4a2
DV
161
162 target_clock = fixed_mode->clock;
7de56f43
ZY
163 }
164
36008365
DV
165 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
166 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
169 mode_rate = intel_dp_link_required(target_clock, 18);
170
171 if (mode_rate > max_rate)
c4867936 172 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
173
174 if (mode->clock < 10000)
175 return MODE_CLOCK_LOW;
176
0af78a2b
DV
177 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
178 return MODE_H_ILLEGAL;
179
a4fc5ed6
KP
180 return MODE_OK;
181}
182
183static uint32_t
184pack_aux(uint8_t *src, int src_bytes)
185{
186 int i;
187 uint32_t v = 0;
188
189 if (src_bytes > 4)
190 src_bytes = 4;
191 for (i = 0; i < src_bytes; i++)
192 v |= ((uint32_t) src[i]) << ((3-i) * 8);
193 return v;
194}
195
196static void
197unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
198{
199 int i;
200 if (dst_bytes > 4)
201 dst_bytes = 4;
202 for (i = 0; i < dst_bytes; i++)
203 dst[i] = src >> ((3-i) * 8);
204}
205
fb0f8fbf
KP
206/* hrawclock is 1/4 the FSB frequency */
207static int
208intel_hrawclk(struct drm_device *dev)
209{
210 struct drm_i915_private *dev_priv = dev->dev_private;
211 uint32_t clkcfg;
212
9473c8f4
VP
213 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
214 if (IS_VALLEYVIEW(dev))
215 return 200;
216
fb0f8fbf
KP
217 clkcfg = I915_READ(CLKCFG);
218 switch (clkcfg & CLKCFG_FSB_MASK) {
219 case CLKCFG_FSB_400:
220 return 100;
221 case CLKCFG_FSB_533:
222 return 133;
223 case CLKCFG_FSB_667:
224 return 166;
225 case CLKCFG_FSB_800:
226 return 200;
227 case CLKCFG_FSB_1067:
228 return 266;
229 case CLKCFG_FSB_1333:
230 return 333;
231 /* these two are just a guess; one of them might be right */
232 case CLKCFG_FSB_1600:
233 case CLKCFG_FSB_1600_ALT:
234 return 400;
235 default:
236 return 133;
237 }
238}
239
bf13e81b
JN
240static void
241intel_dp_init_panel_power_sequencer(struct drm_device *dev,
242 struct intel_dp *intel_dp,
243 struct edp_power_seq *out);
244static void
245intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
246 struct intel_dp *intel_dp,
247 struct edp_power_seq *out);
248
249static enum pipe
250vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
251{
252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
254 struct drm_device *dev = intel_dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 enum port port = intel_dig_port->port;
257 enum pipe pipe;
258
259 /* modeset should have pipe */
260 if (crtc)
261 return to_intel_crtc(crtc)->pipe;
262
263 /* init time, try to find a pipe with this port selected */
264 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
265 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
266 PANEL_PORT_SELECT_MASK;
267 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
268 return pipe;
269 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
270 return pipe;
271 }
272
273 /* shrug */
274 return PIPE_A;
275}
276
277static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
278{
279 struct drm_device *dev = intel_dp_to_dev(intel_dp);
280
281 if (HAS_PCH_SPLIT(dev))
282 return PCH_PP_CONTROL;
283 else
284 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
285}
286
287static u32 _pp_stat_reg(struct intel_dp *intel_dp)
288{
289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
290
291 if (HAS_PCH_SPLIT(dev))
292 return PCH_PP_STATUS;
293 else
294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295}
296
ebf33b18
KP
297static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
298{
30add22d 299 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
300 struct drm_i915_private *dev_priv = dev->dev_private;
301
bf13e81b 302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
303}
304
305static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
306{
30add22d 307 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
308 struct drm_i915_private *dev_priv = dev->dev_private;
309
bf13e81b 310 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
ebf33b18
KP
311}
312
9b984dae
KP
313static void
314intel_dp_check_edp(struct intel_dp *intel_dp)
315{
30add22d 316 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 317 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 318
9b984dae
KP
319 if (!is_edp(intel_dp))
320 return;
453c5420 321
ebf33b18 322 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
323 WARN(1, "eDP powered off while attempting aux channel communication.\n");
324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
325 I915_READ(_pp_stat_reg(intel_dp)),
326 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
327 }
328}
329
9ee32fea
DV
330static uint32_t
331intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
332{
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_device *dev = intel_dig_port->base.base.dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 336 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
337 uint32_t status;
338 bool done;
339
ef04f00d 340#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 341 if (has_aux_irq)
b18ac466 342 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 343 msecs_to_jiffies_timeout(10));
9ee32fea
DV
344 else
345 done = wait_for_atomic(C, 10) == 0;
346 if (!done)
347 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
348 has_aux_irq);
349#undef C
350
351 return status;
352}
353
bc86625a
CW
354static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
355 int index)
a4fc5ed6 356{
174edf1f
PZ
357 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
358 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 359 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 360
a4fc5ed6 361 /* The clock divider is based off the hrawclk,
fb0f8fbf
KP
362 * and would like to run at 2MHz. So, take the
363 * hrawclk value and divide by 2 and use that
6176b8f9
JB
364 *
365 * Note that PCH attached eDP panels should use a 125MHz input
366 * clock divider.
a4fc5ed6 367 */
a62d0834 368 if (IS_VALLEYVIEW(dev)) {
bc86625a 369 return index ? 0 : 100;
a62d0834 370 } else if (intel_dig_port->port == PORT_A) {
bc86625a
CW
371 if (index)
372 return 0;
affa9354 373 if (HAS_DDI(dev))
bc86625a 374 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
9473c8f4 375 else if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 376 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 377 else
b84a1cf8 378 return 225; /* eDP input clock at 450Mhz */
2c55c336
JN
379 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
380 /* Workaround for non-ULT HSW */
bc86625a
CW
381 switch (index) {
382 case 0: return 63;
383 case 1: return 72;
384 default: return 0;
385 }
2c55c336 386 } else if (HAS_PCH_SPLIT(dev)) {
bc86625a 387 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 388 } else {
bc86625a 389 return index ? 0 :intel_hrawclk(dev) / 2;
2c55c336 390 }
b84a1cf8
RV
391}
392
393static int
394intel_dp_aux_ch(struct intel_dp *intel_dp,
395 uint8_t *send, int send_bytes,
396 uint8_t *recv, int recv_size)
397{
398 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
399 struct drm_device *dev = intel_dig_port->base.base.dev;
400 struct drm_i915_private *dev_priv = dev->dev_private;
401 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
402 uint32_t ch_data = ch_ctl + 4;
bc86625a 403 uint32_t aux_clock_divider;
b84a1cf8
RV
404 int i, ret, recv_bytes;
405 uint32_t status;
bc86625a 406 int try, precharge, clock = 0;
4e6b788c 407 bool has_aux_irq = HAS_AUX_IRQ(dev);
a81a507d 408 uint32_t timeout;
b84a1cf8
RV
409
410 /* dp aux is extremely sensitive to irq latency, hence request the
411 * lowest possible wakeup latency and so prevent the cpu from going into
412 * deep sleep states.
413 */
414 pm_qos_update_request(&dev_priv->pm_qos, 0);
415
416 intel_dp_check_edp(intel_dp);
5eb08b69 417
6b4e0a93
DV
418 if (IS_GEN6(dev))
419 precharge = 3;
420 else
421 precharge = 5;
422
a81a507d
BW
423 if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
424 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
425 else
426 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
427
c67a470b
PZ
428 intel_aux_display_runtime_get(dev_priv);
429
11bee43e
JB
430 /* Try to wait for any previous AUX channel activity */
431 for (try = 0; try < 3; try++) {
ef04f00d 432 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
433 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
434 break;
435 msleep(1);
436 }
437
438 if (try == 3) {
439 WARN(1, "dp_aux_ch not started status 0x%08x\n",
440 I915_READ(ch_ctl));
9ee32fea
DV
441 ret = -EBUSY;
442 goto out;
4f7f7b7e
CW
443 }
444
46a5ae9f
PZ
445 /* Only 5 data registers! */
446 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
447 ret = -E2BIG;
448 goto out;
449 }
450
bc86625a
CW
451 while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
452 /* Must try at least 3 times according to DP spec */
453 for (try = 0; try < 5; try++) {
454 /* Load the send data into the aux channel data registers */
455 for (i = 0; i < send_bytes; i += 4)
456 I915_WRITE(ch_data + i,
457 pack_aux(send + i, send_bytes - i));
458
459 /* Send the command and wait for it to complete */
460 I915_WRITE(ch_ctl,
461 DP_AUX_CH_CTL_SEND_BUSY |
462 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
a81a507d 463 timeout |
bc86625a
CW
464 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
465 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
466 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
467 DP_AUX_CH_CTL_DONE |
468 DP_AUX_CH_CTL_TIME_OUT_ERROR |
469 DP_AUX_CH_CTL_RECEIVE_ERROR);
470
471 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
472
473 /* Clear done status and any errors */
474 I915_WRITE(ch_ctl,
475 status |
476 DP_AUX_CH_CTL_DONE |
477 DP_AUX_CH_CTL_TIME_OUT_ERROR |
478 DP_AUX_CH_CTL_RECEIVE_ERROR);
479
480 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
481 DP_AUX_CH_CTL_RECEIVE_ERROR))
482 continue;
483 if (status & DP_AUX_CH_CTL_DONE)
484 break;
485 }
4f7f7b7e 486 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
487 break;
488 }
489
a4fc5ed6 490 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 491 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
492 ret = -EBUSY;
493 goto out;
a4fc5ed6
KP
494 }
495
496 /* Check for timeout or receive error.
497 * Timeouts occur when the sink is not connected
498 */
a5b3da54 499 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 500 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
501 ret = -EIO;
502 goto out;
a5b3da54 503 }
1ae8c0a5
KP
504
505 /* Timeouts occur when the device isn't connected, so they're
506 * "normal" -- don't fill the kernel log with these */
a5b3da54 507 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 508 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
509 ret = -ETIMEDOUT;
510 goto out;
a4fc5ed6
KP
511 }
512
513 /* Unload any bytes sent back from the other side */
514 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
515 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
516 if (recv_bytes > recv_size)
517 recv_bytes = recv_size;
0206e353 518
4f7f7b7e
CW
519 for (i = 0; i < recv_bytes; i += 4)
520 unpack_aux(I915_READ(ch_data + i),
521 recv + i, recv_bytes - i);
a4fc5ed6 522
9ee32fea
DV
523 ret = recv_bytes;
524out:
525 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 526 intel_aux_display_runtime_put(dev_priv);
9ee32fea
DV
527
528 return ret;
a4fc5ed6
KP
529}
530
531/* Write data to the aux channel in native mode */
532static int
ea5b213a 533intel_dp_aux_native_write(struct intel_dp *intel_dp,
a4fc5ed6
KP
534 uint16_t address, uint8_t *send, int send_bytes)
535{
536 int ret;
537 uint8_t msg[20];
538 int msg_bytes;
539 uint8_t ack;
f51a44b9 540 int retry;
a4fc5ed6 541
46a5ae9f
PZ
542 if (WARN_ON(send_bytes > 16))
543 return -E2BIG;
544
9b984dae 545 intel_dp_check_edp(intel_dp);
6b27f7f0 546 msg[0] = DP_AUX_NATIVE_WRITE << 4;
a4fc5ed6 547 msg[1] = address >> 8;
eebc863e 548 msg[2] = address & 0xff;
a4fc5ed6
KP
549 msg[3] = send_bytes - 1;
550 memcpy(&msg[4], send, send_bytes);
551 msg_bytes = send_bytes + 4;
f51a44b9 552 for (retry = 0; retry < 7; retry++) {
ea5b213a 553 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
a4fc5ed6
KP
554 if (ret < 0)
555 return ret;
6b27f7f0
TR
556 ack >>= 4;
557 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
f51a44b9 558 return send_bytes;
6b27f7f0 559 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
04eada25 560 usleep_range(400, 500);
a4fc5ed6 561 else
a5b3da54 562 return -EIO;
a4fc5ed6 563 }
f51a44b9
JN
564
565 DRM_ERROR("too many retries, giving up\n");
566 return -EIO;
a4fc5ed6
KP
567}
568
569/* Write a single byte to the aux channel in native mode */
570static int
ea5b213a 571intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
a4fc5ed6
KP
572 uint16_t address, uint8_t byte)
573{
ea5b213a 574 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
a4fc5ed6
KP
575}
576
577/* read bytes from a native aux channel */
578static int
ea5b213a 579intel_dp_aux_native_read(struct intel_dp *intel_dp,
a4fc5ed6
KP
580 uint16_t address, uint8_t *recv, int recv_bytes)
581{
582 uint8_t msg[4];
583 int msg_bytes;
584 uint8_t reply[20];
585 int reply_bytes;
586 uint8_t ack;
587 int ret;
f51a44b9 588 int retry;
a4fc5ed6 589
46a5ae9f
PZ
590 if (WARN_ON(recv_bytes > 19))
591 return -E2BIG;
592
9b984dae 593 intel_dp_check_edp(intel_dp);
6b27f7f0 594 msg[0] = DP_AUX_NATIVE_READ << 4;
a4fc5ed6
KP
595 msg[1] = address >> 8;
596 msg[2] = address & 0xff;
597 msg[3] = recv_bytes - 1;
598
599 msg_bytes = 4;
600 reply_bytes = recv_bytes + 1;
601
f51a44b9 602 for (retry = 0; retry < 7; retry++) {
ea5b213a 603 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
a4fc5ed6 604 reply, reply_bytes);
a5b3da54
KP
605 if (ret == 0)
606 return -EPROTO;
607 if (ret < 0)
a4fc5ed6 608 return ret;
6b27f7f0
TR
609 ack = reply[0] >> 4;
610 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
a4fc5ed6
KP
611 memcpy(recv, reply + 1, ret - 1);
612 return ret - 1;
613 }
6b27f7f0 614 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
04eada25 615 usleep_range(400, 500);
a4fc5ed6 616 else
a5b3da54 617 return -EIO;
a4fc5ed6 618 }
f51a44b9
JN
619
620 DRM_ERROR("too many retries, giving up\n");
621 return -EIO;
a4fc5ed6
KP
622}
623
624static int
ab2c0672
DA
625intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
626 uint8_t write_byte, uint8_t *read_byte)
a4fc5ed6 627{
ab2c0672 628 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
ea5b213a
CW
629 struct intel_dp *intel_dp = container_of(adapter,
630 struct intel_dp,
631 adapter);
ab2c0672
DA
632 uint16_t address = algo_data->address;
633 uint8_t msg[5];
634 uint8_t reply[2];
8316f337 635 unsigned retry;
ab2c0672
DA
636 int msg_bytes;
637 int reply_bytes;
638 int ret;
639
8a5e6aeb 640 ironlake_edp_panel_vdd_on(intel_dp);
9b984dae 641 intel_dp_check_edp(intel_dp);
ab2c0672
DA
642 /* Set up the command byte */
643 if (mode & MODE_I2C_READ)
6b27f7f0 644 msg[0] = DP_AUX_I2C_READ << 4;
ab2c0672 645 else
6b27f7f0 646 msg[0] = DP_AUX_I2C_WRITE << 4;
ab2c0672
DA
647
648 if (!(mode & MODE_I2C_STOP))
6b27f7f0 649 msg[0] |= DP_AUX_I2C_MOT << 4;
a4fc5ed6 650
ab2c0672
DA
651 msg[1] = address >> 8;
652 msg[2] = address;
653
654 switch (mode) {
655 case MODE_I2C_WRITE:
656 msg[3] = 0;
657 msg[4] = write_byte;
658 msg_bytes = 5;
659 reply_bytes = 1;
660 break;
661 case MODE_I2C_READ:
662 msg[3] = 0;
663 msg_bytes = 4;
664 reply_bytes = 2;
665 break;
666 default:
667 msg_bytes = 3;
668 reply_bytes = 1;
669 break;
670 }
671
58c67ce9
JN
672 /*
673 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
674 * required to retry at least seven times upon receiving AUX_DEFER
675 * before giving up the AUX transaction.
676 */
677 for (retry = 0; retry < 7; retry++) {
8316f337
DF
678 ret = intel_dp_aux_ch(intel_dp,
679 msg, msg_bytes,
680 reply, reply_bytes);
ab2c0672 681 if (ret < 0) {
3ff99164 682 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
8a5e6aeb 683 goto out;
ab2c0672 684 }
8316f337 685
6b27f7f0
TR
686 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
687 case DP_AUX_NATIVE_REPLY_ACK:
8316f337
DF
688 /* I2C-over-AUX Reply field is only valid
689 * when paired with AUX ACK.
690 */
691 break;
6b27f7f0 692 case DP_AUX_NATIVE_REPLY_NACK:
8316f337 693 DRM_DEBUG_KMS("aux_ch native nack\n");
8a5e6aeb
PZ
694 ret = -EREMOTEIO;
695 goto out;
6b27f7f0 696 case DP_AUX_NATIVE_REPLY_DEFER:
8d16f258
JN
697 /*
698 * For now, just give more slack to branch devices. We
699 * could check the DPCD for I2C bit rate capabilities,
700 * and if available, adjust the interval. We could also
701 * be more careful with DP-to-Legacy adapters where a
702 * long legacy cable may force very low I2C bit rates.
703 */
704 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
705 DP_DWN_STRM_PORT_PRESENT)
706 usleep_range(500, 600);
707 else
708 usleep_range(300, 400);
8316f337
DF
709 continue;
710 default:
711 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
712 reply[0]);
8a5e6aeb
PZ
713 ret = -EREMOTEIO;
714 goto out;
8316f337
DF
715 }
716
6b27f7f0
TR
717 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
718 case DP_AUX_I2C_REPLY_ACK:
ab2c0672
DA
719 if (mode == MODE_I2C_READ) {
720 *read_byte = reply[1];
721 }
8a5e6aeb
PZ
722 ret = reply_bytes - 1;
723 goto out;
6b27f7f0 724 case DP_AUX_I2C_REPLY_NACK:
8316f337 725 DRM_DEBUG_KMS("aux_i2c nack\n");
8a5e6aeb
PZ
726 ret = -EREMOTEIO;
727 goto out;
6b27f7f0 728 case DP_AUX_I2C_REPLY_DEFER:
8316f337 729 DRM_DEBUG_KMS("aux_i2c defer\n");
ab2c0672
DA
730 udelay(100);
731 break;
732 default:
8316f337 733 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
8a5e6aeb
PZ
734 ret = -EREMOTEIO;
735 goto out;
ab2c0672
DA
736 }
737 }
8316f337
DF
738
739 DRM_ERROR("too many retries, giving up\n");
8a5e6aeb
PZ
740 ret = -EREMOTEIO;
741
742out:
743 ironlake_edp_panel_vdd_off(intel_dp, false);
744 return ret;
a4fc5ed6
KP
745}
746
747static int
ea5b213a 748intel_dp_i2c_init(struct intel_dp *intel_dp,
55f78c43 749 struct intel_connector *intel_connector, const char *name)
a4fc5ed6 750{
0b5c541b
KP
751 int ret;
752
d54e9d28 753 DRM_DEBUG_KMS("i2c_init %s\n", name);
ea5b213a
CW
754 intel_dp->algo.running = false;
755 intel_dp->algo.address = 0;
756 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
757
0206e353 758 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
ea5b213a
CW
759 intel_dp->adapter.owner = THIS_MODULE;
760 intel_dp->adapter.class = I2C_CLASS_DDC;
0206e353 761 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
ea5b213a
CW
762 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
763 intel_dp->adapter.algo_data = &intel_dp->algo;
5bdebb18 764 intel_dp->adapter.dev.parent = intel_connector->base.kdev;
ea5b213a 765
0b5c541b 766 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
0b5c541b 767 return ret;
a4fc5ed6
KP
768}
769
c6bb3538
DV
770static void
771intel_dp_set_clock(struct intel_encoder *encoder,
772 struct intel_crtc_config *pipe_config, int link_bw)
773{
774 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
775 const struct dp_link_dpll *divisor = NULL;
776 int i, count = 0;
c6bb3538
DV
777
778 if (IS_G4X(dev)) {
9dd4ffdf
CML
779 divisor = gen4_dpll;
780 count = ARRAY_SIZE(gen4_dpll);
c6bb3538
DV
781 } else if (IS_HASWELL(dev)) {
782 /* Haswell has special-purpose DP DDI clocks. */
783 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
784 divisor = pch_dpll;
785 count = ARRAY_SIZE(pch_dpll);
c6bb3538 786 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
787 divisor = vlv_dpll;
788 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 789 }
9dd4ffdf
CML
790
791 if (divisor && count) {
792 for (i = 0; i < count; i++) {
793 if (link_bw == divisor[i].link_bw) {
794 pipe_config->dpll = divisor[i].dpll;
795 pipe_config->clock_set = true;
796 break;
797 }
798 }
c6bb3538
DV
799 }
800}
801
00c09d70 802bool
5bfe2ac0
DV
803intel_dp_compute_config(struct intel_encoder *encoder,
804 struct intel_crtc_config *pipe_config)
a4fc5ed6 805{
5bfe2ac0 806 struct drm_device *dev = encoder->base.dev;
36008365 807 struct drm_i915_private *dev_priv = dev->dev_private;
5bfe2ac0 808 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5bfe2ac0 809 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 810 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 811 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 812 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 813 int lane_count, clock;
397fe157 814 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
ea5b213a 815 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
083f9560 816 int bpp, mode_rate;
a4fc5ed6 817 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
ff9a6750 818 int link_avail, link_clock;
a4fc5ed6 819
bc7d38a4 820 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
821 pipe_config->has_pch_encoder = true;
822
03afc4a2 823 pipe_config->has_dp_encoder = true;
a4fc5ed6 824
dd06f90e
JN
825 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
826 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
827 adjusted_mode);
2dd24552
JB
828 if (!HAS_PCH_SPLIT(dev))
829 intel_gmch_panel_fitting(intel_crtc, pipe_config,
830 intel_connector->panel.fitting_mode);
831 else
b074cec8
JB
832 intel_pch_panel_fitting(intel_crtc, pipe_config,
833 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
834 }
835
cb1793ce 836 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
837 return false;
838
083f9560
DV
839 DRM_DEBUG_KMS("DP link computation with max lane count %i "
840 "max bw %02x pixel clock %iKHz\n",
241bfc38
DL
841 max_lane_count, bws[max_clock],
842 adjusted_mode->crtc_clock);
083f9560 843
36008365
DV
844 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
845 * bpc in between. */
3e7ca985 846 bpp = pipe_config->pipe_bpp;
6da7f10d
JN
847 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
848 dev_priv->vbt.edp_bpp < bpp) {
7984211e
ID
849 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
850 dev_priv->vbt.edp_bpp);
6da7f10d 851 bpp = dev_priv->vbt.edp_bpp;
7984211e 852 }
657445fe 853
36008365 854 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
855 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
856 bpp);
36008365
DV
857
858 for (clock = 0; clock <= max_clock; clock++) {
859 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
860 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
861 link_avail = intel_dp_max_data_rate(link_clock,
862 lane_count);
863
864 if (mode_rate <= link_avail) {
865 goto found;
866 }
867 }
868 }
869 }
c4867936 870
36008365 871 return false;
3685a8f3 872
36008365 873found:
55bc60db
VS
874 if (intel_dp->color_range_auto) {
875 /*
876 * See:
877 * CEA-861-E - 5.1 Default Encoding Parameters
878 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
879 */
18316c8c 880 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
881 intel_dp->color_range = DP_COLOR_RANGE_16_235;
882 else
883 intel_dp->color_range = 0;
884 }
885
3685a8f3 886 if (intel_dp->color_range)
50f3b016 887 pipe_config->limited_color_range = true;
a4fc5ed6 888
36008365
DV
889 intel_dp->link_bw = bws[clock];
890 intel_dp->lane_count = lane_count;
657445fe 891 pipe_config->pipe_bpp = bpp;
ff9a6750 892 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
a4fc5ed6 893
36008365
DV
894 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
895 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 896 pipe_config->port_clock, bpp);
36008365
DV
897 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
898 mode_rate, link_avail);
a4fc5ed6 899
03afc4a2 900 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
901 adjusted_mode->crtc_clock,
902 pipe_config->port_clock,
03afc4a2 903 &pipe_config->dp_m_n);
9d1a455b 904
c6bb3538
DV
905 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
906
03afc4a2 907 return true;
a4fc5ed6
KP
908}
909
7c62a164 910static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 911{
7c62a164
DV
912 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
913 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
914 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
915 struct drm_i915_private *dev_priv = dev->dev_private;
916 u32 dpa_ctl;
917
ff9a6750 918 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
ea9b6006
DV
919 dpa_ctl = I915_READ(DP_A);
920 dpa_ctl &= ~DP_PLL_FREQ_MASK;
921
ff9a6750 922 if (crtc->config.port_clock == 162000) {
1ce17038
DV
923 /* For a long time we've carried around a ILK-DevA w/a for the
924 * 160MHz clock. If we're really unlucky, it's still required.
925 */
926 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 927 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 928 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
929 } else {
930 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 931 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 932 }
1ce17038 933
ea9b6006
DV
934 I915_WRITE(DP_A, dpa_ctl);
935
936 POSTING_READ(DP_A);
937 udelay(500);
938}
939
b934223d 940static void intel_dp_mode_set(struct intel_encoder *encoder)
a4fc5ed6 941{
b934223d 942 struct drm_device *dev = encoder->base.dev;
417e822d 943 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 944 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 945 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d
DV
946 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
947 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
a4fc5ed6 948
417e822d 949 /*
1a2eb460 950 * There are four kinds of DP registers:
417e822d
KP
951 *
952 * IBX PCH
1a2eb460
KP
953 * SNB CPU
954 * IVB CPU
417e822d
KP
955 * CPT PCH
956 *
957 * IBX PCH and CPU are the same for almost everything,
958 * except that the CPU DP PLL is configured in this
959 * register
960 *
961 * CPT PCH is quite different, having many bits moved
962 * to the TRANS_DP_CTL register instead. That
963 * configuration happens (oddly) in ironlake_pch_enable
964 */
9c9e7927 965
417e822d
KP
966 /* Preserve the BIOS-computed detected bit. This is
967 * supposed to be read-only.
968 */
969 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 970
417e822d 971 /* Handle DP bits in common between all three register formats */
417e822d 972 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 973 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 974
e0dac65e
WF
975 if (intel_dp->has_audio) {
976 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
7c62a164 977 pipe_name(crtc->pipe));
ea5b213a 978 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
b934223d 979 intel_write_eld(&encoder->base, adjusted_mode);
e0dac65e 980 }
247d89f6 981
417e822d 982 /* Split out the IBX/CPU vs CPT settings */
32f9d658 983
bc7d38a4 984 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
985 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
986 intel_dp->DP |= DP_SYNC_HS_HIGH;
987 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
988 intel_dp->DP |= DP_SYNC_VS_HIGH;
989 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
990
6aba5b6c 991 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
992 intel_dp->DP |= DP_ENHANCED_FRAMING;
993
7c62a164 994 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 995 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 996 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 997 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
998
999 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1000 intel_dp->DP |= DP_SYNC_HS_HIGH;
1001 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1002 intel_dp->DP |= DP_SYNC_VS_HIGH;
1003 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1004
6aba5b6c 1005 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1006 intel_dp->DP |= DP_ENHANCED_FRAMING;
1007
7c62a164 1008 if (crtc->pipe == 1)
417e822d 1009 intel_dp->DP |= DP_PIPEB_SELECT;
417e822d
KP
1010 } else {
1011 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1012 }
ea9b6006 1013
bc7d38a4 1014 if (port == PORT_A && !IS_VALLEYVIEW(dev))
7c62a164 1015 ironlake_set_pll_cpu_edp(intel_dp);
a4fc5ed6
KP
1016}
1017
99ea7127
KP
1018#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1019#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1020
1021#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1022#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1023
1024#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1025#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1026
1027static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
1028 u32 mask,
1029 u32 value)
bd943159 1030{
30add22d 1031 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1032 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1033 u32 pp_stat_reg, pp_ctrl_reg;
1034
bf13e81b
JN
1035 pp_stat_reg = _pp_stat_reg(intel_dp);
1036 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1037
99ea7127 1038 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1039 mask, value,
1040 I915_READ(pp_stat_reg),
1041 I915_READ(pp_ctrl_reg));
32ce697c 1042
453c5420 1043 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1044 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1045 I915_READ(pp_stat_reg),
1046 I915_READ(pp_ctrl_reg));
32ce697c 1047 }
54c136d4
CW
1048
1049 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1050}
32ce697c 1051
99ea7127
KP
1052static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
1053{
1054 DRM_DEBUG_KMS("Wait for panel power on\n");
1055 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1056}
1057
99ea7127
KP
1058static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
1059{
1060 DRM_DEBUG_KMS("Wait for panel power off time\n");
1061 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1062}
1063
1064static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
1065{
1066 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1067 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1068}
1069
1070
832dd3c1
KP
1071/* Read the current pp_control value, unlocking the register if it
1072 * is locked
1073 */
1074
453c5420 1075static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1076{
453c5420
JB
1077 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1078 struct drm_i915_private *dev_priv = dev->dev_private;
1079 u32 control;
832dd3c1 1080
bf13e81b 1081 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1082 control &= ~PANEL_UNLOCK_MASK;
1083 control |= PANEL_UNLOCK_REGS;
1084 return control;
bd943159
KP
1085}
1086
82a4d9c0 1087void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1088{
30add22d 1089 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
1090 struct drm_i915_private *dev_priv = dev->dev_private;
1091 u32 pp;
453c5420 1092 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1093
97af61f5
KP
1094 if (!is_edp(intel_dp))
1095 return;
5d613501 1096
bd943159
KP
1097 WARN(intel_dp->want_panel_vdd,
1098 "eDP VDD already requested on\n");
1099
1100 intel_dp->want_panel_vdd = true;
99ea7127 1101
b0665d57 1102 if (ironlake_edp_have_panel_vdd(intel_dp))
bd943159 1103 return;
b0665d57 1104
e9cb81a2
PZ
1105 intel_runtime_pm_get(dev_priv);
1106
b0665d57 1107 DRM_DEBUG_KMS("Turning eDP VDD on\n");
bd943159 1108
99ea7127
KP
1109 if (!ironlake_edp_have_panel_power(intel_dp))
1110 ironlake_wait_panel_power_cycle(intel_dp);
1111
453c5420 1112 pp = ironlake_get_pp_control(intel_dp);
5d613501 1113 pp |= EDP_FORCE_VDD;
ebf33b18 1114
bf13e81b
JN
1115 pp_stat_reg = _pp_stat_reg(intel_dp);
1116 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1117
1118 I915_WRITE(pp_ctrl_reg, pp);
1119 POSTING_READ(pp_ctrl_reg);
1120 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1121 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1122 /*
1123 * If the panel wasn't on, delay before accessing aux channel
1124 */
1125 if (!ironlake_edp_have_panel_power(intel_dp)) {
bd943159 1126 DRM_DEBUG_KMS("eDP was not running\n");
f01eca2e 1127 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1128 }
5d613501
JB
1129}
1130
bd943159 1131static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1132{
30add22d 1133 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
1134 struct drm_i915_private *dev_priv = dev->dev_private;
1135 u32 pp;
453c5420 1136 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1137
a0e99e68
DV
1138 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1139
bd943159 1140 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
b0665d57
PZ
1141 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1142
453c5420 1143 pp = ironlake_get_pp_control(intel_dp);
bd943159 1144 pp &= ~EDP_FORCE_VDD;
bd943159 1145
9f08ef59
PZ
1146 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1147 pp_stat_reg = _pp_stat_reg(intel_dp);
453c5420
JB
1148
1149 I915_WRITE(pp_ctrl_reg, pp);
1150 POSTING_READ(pp_ctrl_reg);
99ea7127 1151
453c5420
JB
1152 /* Make sure sequencer is idle before allowing subsequent activity */
1153 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1154 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
90791a5c
PZ
1155
1156 if ((pp & POWER_TARGET_ON) == 0)
1157 msleep(intel_dp->panel_power_cycle_delay);
e9cb81a2
PZ
1158
1159 intel_runtime_pm_put(dev_priv);
bd943159
KP
1160 }
1161}
5d613501 1162
bd943159
KP
1163static void ironlake_panel_vdd_work(struct work_struct *__work)
1164{
1165 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1166 struct intel_dp, panel_vdd_work);
30add22d 1167 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bd943159 1168
627f7675 1169 mutex_lock(&dev->mode_config.mutex);
bd943159 1170 ironlake_panel_vdd_off_sync(intel_dp);
627f7675 1171 mutex_unlock(&dev->mode_config.mutex);
bd943159
KP
1172}
1173
82a4d9c0 1174void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1175{
97af61f5
KP
1176 if (!is_edp(intel_dp))
1177 return;
5d613501 1178
bd943159 1179 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
f2e8b18a 1180
bd943159
KP
1181 intel_dp->want_panel_vdd = false;
1182
1183 if (sync) {
1184 ironlake_panel_vdd_off_sync(intel_dp);
1185 } else {
1186 /*
1187 * Queue the timer to fire a long
1188 * time from now (relative to the power down delay)
1189 * to keep the panel power up across a sequence of operations
1190 */
1191 schedule_delayed_work(&intel_dp->panel_vdd_work,
1192 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1193 }
5d613501
JB
1194}
1195
82a4d9c0 1196void ironlake_edp_panel_on(struct intel_dp *intel_dp)
9934c132 1197{
30add22d 1198 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1199 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1200 u32 pp;
453c5420 1201 u32 pp_ctrl_reg;
9934c132 1202
97af61f5 1203 if (!is_edp(intel_dp))
bd943159 1204 return;
99ea7127
KP
1205
1206 DRM_DEBUG_KMS("Turn eDP power on\n");
1207
1208 if (ironlake_edp_have_panel_power(intel_dp)) {
1209 DRM_DEBUG_KMS("eDP power already on\n");
7d639f35 1210 return;
99ea7127 1211 }
9934c132 1212
99ea7127 1213 ironlake_wait_panel_power_cycle(intel_dp);
37c6c9b0 1214
bf13e81b 1215 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1216 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1217 if (IS_GEN5(dev)) {
1218 /* ILK workaround: disable reset around power sequence */
1219 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1220 I915_WRITE(pp_ctrl_reg, pp);
1221 POSTING_READ(pp_ctrl_reg);
05ce1a49 1222 }
37c6c9b0 1223
1c0ae80a 1224 pp |= POWER_TARGET_ON;
99ea7127
KP
1225 if (!IS_GEN5(dev))
1226 pp |= PANEL_POWER_RESET;
1227
453c5420
JB
1228 I915_WRITE(pp_ctrl_reg, pp);
1229 POSTING_READ(pp_ctrl_reg);
9934c132 1230
99ea7127 1231 ironlake_wait_panel_on(intel_dp);
9934c132 1232
05ce1a49
KP
1233 if (IS_GEN5(dev)) {
1234 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1235 I915_WRITE(pp_ctrl_reg, pp);
1236 POSTING_READ(pp_ctrl_reg);
05ce1a49 1237 }
9934c132
JB
1238}
1239
82a4d9c0 1240void ironlake_edp_panel_off(struct intel_dp *intel_dp)
9934c132 1241{
30add22d 1242 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1243 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1244 u32 pp;
453c5420 1245 u32 pp_ctrl_reg;
9934c132 1246
97af61f5
KP
1247 if (!is_edp(intel_dp))
1248 return;
37c6c9b0 1249
99ea7127 1250 DRM_DEBUG_KMS("Turn eDP power off\n");
37c6c9b0 1251
82593830
JN
1252 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1253
453c5420 1254 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1255 /* We need to switch off panel power _and_ force vdd, for otherwise some
1256 * panels get very unhappy and cease to work. */
82593830 1257 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
453c5420 1258
bf13e81b 1259 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1260
1261 I915_WRITE(pp_ctrl_reg, pp);
1262 POSTING_READ(pp_ctrl_reg);
9934c132 1263
82593830
JN
1264 intel_dp->want_panel_vdd = false;
1265
99ea7127 1266 ironlake_wait_panel_off(intel_dp);
82593830
JN
1267
1268 /* We got a reference when we enabled the VDD. */
1269 intel_runtime_pm_put(dev_priv);
9934c132
JB
1270}
1271
d6c50ff8 1272void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1273{
da63a9f2
PZ
1274 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1275 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1276 struct drm_i915_private *dev_priv = dev->dev_private;
1277 u32 pp;
453c5420 1278 u32 pp_ctrl_reg;
32f9d658 1279
f01eca2e
KP
1280 if (!is_edp(intel_dp))
1281 return;
1282
28c97730 1283 DRM_DEBUG_KMS("\n");
01cb9ea6
JB
1284 /*
1285 * If we enable the backlight right away following a panel power
1286 * on, we may see slight flicker as the panel syncs with the eDP
1287 * link. So delay a bit to make sure the image is solid before
1288 * allowing it to appear.
1289 */
f01eca2e 1290 msleep(intel_dp->backlight_on_delay);
453c5420 1291 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1292 pp |= EDP_BLC_ENABLE;
453c5420 1293
bf13e81b 1294 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1295
1296 I915_WRITE(pp_ctrl_reg, pp);
1297 POSTING_READ(pp_ctrl_reg);
035aa3de 1298
752aa88a 1299 intel_panel_enable_backlight(intel_dp->attached_connector);
32f9d658
ZW
1300}
1301
d6c50ff8 1302void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1303{
30add22d 1304 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1305 struct drm_i915_private *dev_priv = dev->dev_private;
1306 u32 pp;
453c5420 1307 u32 pp_ctrl_reg;
32f9d658 1308
f01eca2e
KP
1309 if (!is_edp(intel_dp))
1310 return;
1311
752aa88a 1312 intel_panel_disable_backlight(intel_dp->attached_connector);
035aa3de 1313
28c97730 1314 DRM_DEBUG_KMS("\n");
453c5420 1315 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1316 pp &= ~EDP_BLC_ENABLE;
453c5420 1317
bf13e81b 1318 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1319
1320 I915_WRITE(pp_ctrl_reg, pp);
1321 POSTING_READ(pp_ctrl_reg);
f01eca2e 1322 msleep(intel_dp->backlight_off_delay);
32f9d658 1323}
a4fc5ed6 1324
2bd2ad64 1325static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 1326{
da63a9f2
PZ
1327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1328 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1329 struct drm_device *dev = crtc->dev;
d240f20f
JB
1330 struct drm_i915_private *dev_priv = dev->dev_private;
1331 u32 dpa_ctl;
1332
2bd2ad64
DV
1333 assert_pipe_disabled(dev_priv,
1334 to_intel_crtc(crtc)->pipe);
1335
d240f20f
JB
1336 DRM_DEBUG_KMS("\n");
1337 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1338 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1339 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1340
1341 /* We don't adjust intel_dp->DP while tearing down the link, to
1342 * facilitate link retraining (e.g. after hotplug). Hence clear all
1343 * enable bits here to ensure that we don't enable too much. */
1344 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1345 intel_dp->DP |= DP_PLL_ENABLE;
1346 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
1347 POSTING_READ(DP_A);
1348 udelay(200);
d240f20f
JB
1349}
1350
2bd2ad64 1351static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 1352{
da63a9f2
PZ
1353 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1354 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1355 struct drm_device *dev = crtc->dev;
d240f20f
JB
1356 struct drm_i915_private *dev_priv = dev->dev_private;
1357 u32 dpa_ctl;
1358
2bd2ad64
DV
1359 assert_pipe_disabled(dev_priv,
1360 to_intel_crtc(crtc)->pipe);
1361
d240f20f 1362 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1363 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1364 "dp pll off, should be on\n");
1365 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1366
1367 /* We can't rely on the value tracked for the DP register in
1368 * intel_dp->DP because link_down must not change that (otherwise link
1369 * re-training will fail. */
298b0b39 1370 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 1371 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 1372 POSTING_READ(DP_A);
d240f20f
JB
1373 udelay(200);
1374}
1375
c7ad3810 1376/* If the sink supports it, try to set the power state appropriately */
c19b0669 1377void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
1378{
1379 int ret, i;
1380
1381 /* Should have a valid DPCD by this point */
1382 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1383 return;
1384
1385 if (mode != DRM_MODE_DPMS_ON) {
1386 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1387 DP_SET_POWER_D3);
1388 if (ret != 1)
1389 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1390 } else {
1391 /*
1392 * When turning on, we need to retry for 1ms to give the sink
1393 * time to wake up.
1394 */
1395 for (i = 0; i < 3; i++) {
1396 ret = intel_dp_aux_native_write_1(intel_dp,
1397 DP_SET_POWER,
1398 DP_SET_POWER_D0);
1399 if (ret == 1)
1400 break;
1401 msleep(1);
1402 }
1403 }
1404}
1405
19d8fe15
DV
1406static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1407 enum pipe *pipe)
d240f20f 1408{
19d8fe15 1409 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1410 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
1411 struct drm_device *dev = encoder->base.dev;
1412 struct drm_i915_private *dev_priv = dev->dev_private;
1413 u32 tmp = I915_READ(intel_dp->output_reg);
1414
1415 if (!(tmp & DP_PORT_EN))
1416 return false;
1417
bc7d38a4 1418 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 1419 *pipe = PORT_TO_PIPE_CPT(tmp);
bc7d38a4 1420 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
1421 *pipe = PORT_TO_PIPE(tmp);
1422 } else {
1423 u32 trans_sel;
1424 u32 trans_dp;
1425 int i;
1426
1427 switch (intel_dp->output_reg) {
1428 case PCH_DP_B:
1429 trans_sel = TRANS_DP_PORT_SEL_B;
1430 break;
1431 case PCH_DP_C:
1432 trans_sel = TRANS_DP_PORT_SEL_C;
1433 break;
1434 case PCH_DP_D:
1435 trans_sel = TRANS_DP_PORT_SEL_D;
1436 break;
1437 default:
1438 return true;
1439 }
1440
1441 for_each_pipe(i) {
1442 trans_dp = I915_READ(TRANS_DP_CTL(i));
1443 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1444 *pipe = i;
1445 return true;
1446 }
1447 }
19d8fe15 1448
4a0833ec
DV
1449 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1450 intel_dp->output_reg);
1451 }
d240f20f 1452
19d8fe15
DV
1453 return true;
1454}
d240f20f 1455
045ac3b5
JB
1456static void intel_dp_get_config(struct intel_encoder *encoder,
1457 struct intel_crtc_config *pipe_config)
1458{
1459 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 1460 u32 tmp, flags = 0;
63000ef6
XZ
1461 struct drm_device *dev = encoder->base.dev;
1462 struct drm_i915_private *dev_priv = dev->dev_private;
1463 enum port port = dp_to_dig_port(intel_dp)->port;
1464 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 1465 int dotclock;
045ac3b5 1466
63000ef6
XZ
1467 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1468 tmp = I915_READ(intel_dp->output_reg);
1469 if (tmp & DP_SYNC_HS_HIGH)
1470 flags |= DRM_MODE_FLAG_PHSYNC;
1471 else
1472 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 1473
63000ef6
XZ
1474 if (tmp & DP_SYNC_VS_HIGH)
1475 flags |= DRM_MODE_FLAG_PVSYNC;
1476 else
1477 flags |= DRM_MODE_FLAG_NVSYNC;
1478 } else {
1479 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1480 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1481 flags |= DRM_MODE_FLAG_PHSYNC;
1482 else
1483 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 1484
63000ef6
XZ
1485 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1486 flags |= DRM_MODE_FLAG_PVSYNC;
1487 else
1488 flags |= DRM_MODE_FLAG_NVSYNC;
1489 }
045ac3b5
JB
1490
1491 pipe_config->adjusted_mode.flags |= flags;
f1f644dc 1492
eb14cb74
VS
1493 pipe_config->has_dp_encoder = true;
1494
1495 intel_dp_get_m_n(crtc, pipe_config);
1496
18442d08 1497 if (port == PORT_A) {
f1f644dc
JB
1498 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1499 pipe_config->port_clock = 162000;
1500 else
1501 pipe_config->port_clock = 270000;
1502 }
18442d08
VS
1503
1504 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1505 &pipe_config->dp_m_n);
1506
1507 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1508 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1509
241bfc38 1510 pipe_config->adjusted_mode.crtc_clock = dotclock;
7f16e5c1 1511
c6cd2ee2
JN
1512 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1513 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1514 /*
1515 * This is a big fat ugly hack.
1516 *
1517 * Some machines in UEFI boot mode provide us a VBT that has 18
1518 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1519 * unknown we fail to light up. Yet the same BIOS boots up with
1520 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1521 * max, not what it tells us to use.
1522 *
1523 * Note: This will still be broken if the eDP panel is not lit
1524 * up by the BIOS, and thus we can't get the mode at module
1525 * load.
1526 */
1527 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1528 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1529 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1530 }
045ac3b5
JB
1531}
1532
a031d709 1533static bool is_edp_psr(struct drm_device *dev)
2293bb5c 1534{
a031d709
RV
1535 struct drm_i915_private *dev_priv = dev->dev_private;
1536
1537 return dev_priv->psr.sink_support;
2293bb5c
SK
1538}
1539
2b28bb1b
RV
1540static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1541{
1542 struct drm_i915_private *dev_priv = dev->dev_private;
1543
18b5992c 1544 if (!HAS_PSR(dev))
2b28bb1b
RV
1545 return false;
1546
18b5992c 1547 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
2b28bb1b
RV
1548}
1549
1550static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1551 struct edp_vsc_psr *vsc_psr)
1552{
1553 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1554 struct drm_device *dev = dig_port->base.base.dev;
1555 struct drm_i915_private *dev_priv = dev->dev_private;
1556 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1557 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1558 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1559 uint32_t *data = (uint32_t *) vsc_psr;
1560 unsigned int i;
1561
1562 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1563 the video DIP being updated before program video DIP data buffer
1564 registers for DIP being updated. */
1565 I915_WRITE(ctl_reg, 0);
1566 POSTING_READ(ctl_reg);
1567
1568 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1569 if (i < sizeof(struct edp_vsc_psr))
1570 I915_WRITE(data_reg + i, *data++);
1571 else
1572 I915_WRITE(data_reg + i, 0);
1573 }
1574
1575 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1576 POSTING_READ(ctl_reg);
1577}
1578
1579static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1580{
1581 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1582 struct drm_i915_private *dev_priv = dev->dev_private;
1583 struct edp_vsc_psr psr_vsc;
1584
1585 if (intel_dp->psr_setup_done)
1586 return;
1587
1588 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1589 memset(&psr_vsc, 0, sizeof(psr_vsc));
1590 psr_vsc.sdp_header.HB0 = 0;
1591 psr_vsc.sdp_header.HB1 = 0x7;
1592 psr_vsc.sdp_header.HB2 = 0x2;
1593 psr_vsc.sdp_header.HB3 = 0x8;
1594 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1595
1596 /* Avoid continuous PSR exit by masking memup and hpd */
18b5992c 1597 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
0cc4b699 1598 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
2b28bb1b
RV
1599
1600 intel_dp->psr_setup_done = true;
1601}
1602
1603static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1604{
1605 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1606 struct drm_i915_private *dev_priv = dev->dev_private;
bc86625a 1607 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
2b28bb1b
RV
1608 int precharge = 0x3;
1609 int msg_size = 5; /* Header(4) + Message(1) */
1610
1611 /* Enable PSR in sink */
1612 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1613 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1614 DP_PSR_ENABLE &
1615 ~DP_PSR_MAIN_LINK_ACTIVE);
1616 else
1617 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1618 DP_PSR_ENABLE |
1619 DP_PSR_MAIN_LINK_ACTIVE);
1620
1621 /* Setup AUX registers */
18b5992c
BW
1622 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1623 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1624 I915_WRITE(EDP_PSR_AUX_CTL(dev),
2b28bb1b
RV
1625 DP_AUX_CH_CTL_TIME_OUT_400us |
1626 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1627 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1628 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1629}
1630
1631static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1632{
1633 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1634 struct drm_i915_private *dev_priv = dev->dev_private;
1635 uint32_t max_sleep_time = 0x1f;
1636 uint32_t idle_frames = 1;
1637 uint32_t val = 0x0;
ed8546ac 1638 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
2b28bb1b
RV
1639
1640 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1641 val |= EDP_PSR_LINK_STANDBY;
1642 val |= EDP_PSR_TP2_TP3_TIME_0us;
1643 val |= EDP_PSR_TP1_TIME_0us;
1644 val |= EDP_PSR_SKIP_AUX_EXIT;
1645 } else
1646 val |= EDP_PSR_LINK_DISABLE;
1647
18b5992c 1648 I915_WRITE(EDP_PSR_CTL(dev), val |
24bd9bf5 1649 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
2b28bb1b
RV
1650 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1651 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1652 EDP_PSR_ENABLE);
1653}
1654
3f51e471
RV
1655static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1656{
1657 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1658 struct drm_device *dev = dig_port->base.base.dev;
1659 struct drm_i915_private *dev_priv = dev->dev_private;
1660 struct drm_crtc *crtc = dig_port->base.base.crtc;
1661 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1662 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1663 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1664
a031d709
RV
1665 dev_priv->psr.source_ok = false;
1666
18b5992c 1667 if (!HAS_PSR(dev)) {
3f51e471 1668 DRM_DEBUG_KMS("PSR not supported on this platform\n");
3f51e471
RV
1669 return false;
1670 }
1671
1672 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1673 (dig_port->port != PORT_A)) {
1674 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
3f51e471
RV
1675 return false;
1676 }
1677
105b7c11
RV
1678 if (!i915_enable_psr) {
1679 DRM_DEBUG_KMS("PSR disable by flag\n");
105b7c11
RV
1680 return false;
1681 }
1682
cd234b0b
CW
1683 crtc = dig_port->base.base.crtc;
1684 if (crtc == NULL) {
1685 DRM_DEBUG_KMS("crtc not active for PSR\n");
cd234b0b
CW
1686 return false;
1687 }
1688
1689 intel_crtc = to_intel_crtc(crtc);
20ddf665 1690 if (!intel_crtc_active(crtc)) {
3f51e471 1691 DRM_DEBUG_KMS("crtc not active for PSR\n");
3f51e471
RV
1692 return false;
1693 }
1694
cd234b0b 1695 obj = to_intel_framebuffer(crtc->fb)->obj;
3f51e471
RV
1696 if (obj->tiling_mode != I915_TILING_X ||
1697 obj->fence_reg == I915_FENCE_REG_NONE) {
1698 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
3f51e471
RV
1699 return false;
1700 }
1701
1702 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1703 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
3f51e471
RV
1704 return false;
1705 }
1706
1707 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1708 S3D_ENABLE) {
1709 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
3f51e471
RV
1710 return false;
1711 }
1712
ca73b4f0 1713 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3f51e471 1714 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
3f51e471
RV
1715 return false;
1716 }
1717
a031d709 1718 dev_priv->psr.source_ok = true;
3f51e471
RV
1719 return true;
1720}
1721
3d739d92 1722static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
2b28bb1b
RV
1723{
1724 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1725
3f51e471
RV
1726 if (!intel_edp_psr_match_conditions(intel_dp) ||
1727 intel_edp_is_psr_enabled(dev))
2b28bb1b
RV
1728 return;
1729
1730 /* Setup PSR once */
1731 intel_edp_psr_setup(intel_dp);
1732
1733 /* Enable PSR on the panel */
1734 intel_edp_psr_enable_sink(intel_dp);
1735
1736 /* Enable PSR on the host */
1737 intel_edp_psr_enable_source(intel_dp);
1738}
1739
3d739d92
RV
1740void intel_edp_psr_enable(struct intel_dp *intel_dp)
1741{
1742 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1743
1744 if (intel_edp_psr_match_conditions(intel_dp) &&
1745 !intel_edp_is_psr_enabled(dev))
1746 intel_edp_psr_do_enable(intel_dp);
1747}
1748
2b28bb1b
RV
1749void intel_edp_psr_disable(struct intel_dp *intel_dp)
1750{
1751 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1752 struct drm_i915_private *dev_priv = dev->dev_private;
1753
1754 if (!intel_edp_is_psr_enabled(dev))
1755 return;
1756
18b5992c
BW
1757 I915_WRITE(EDP_PSR_CTL(dev),
1758 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
2b28bb1b
RV
1759
1760 /* Wait till PSR is idle */
18b5992c 1761 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
2b28bb1b
RV
1762 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1763 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1764}
1765
3d739d92
RV
1766void intel_edp_psr_update(struct drm_device *dev)
1767{
1768 struct intel_encoder *encoder;
1769 struct intel_dp *intel_dp = NULL;
1770
1771 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1772 if (encoder->type == INTEL_OUTPUT_EDP) {
1773 intel_dp = enc_to_intel_dp(&encoder->base);
1774
a031d709 1775 if (!is_edp_psr(dev))
3d739d92
RV
1776 return;
1777
1778 if (!intel_edp_psr_match_conditions(intel_dp))
1779 intel_edp_psr_disable(intel_dp);
1780 else
1781 if (!intel_edp_is_psr_enabled(dev))
1782 intel_edp_psr_do_enable(intel_dp);
1783 }
1784}
1785
e8cb4558 1786static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 1787{
e8cb4558 1788 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866
ID
1789 enum port port = dp_to_dig_port(intel_dp)->port;
1790 struct drm_device *dev = encoder->base.dev;
6cb49835
DV
1791
1792 /* Make sure the panel is off before trying to change the mode. But also
1793 * ensure that we have vdd while we switch off the panel. */
82593830 1794 ironlake_edp_panel_vdd_on(intel_dp);
21264c63 1795 ironlake_edp_backlight_off(intel_dp);
fdbc3b1f 1796 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
35a38556 1797 ironlake_edp_panel_off(intel_dp);
3739850b
DV
1798
1799 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
982a3866 1800 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
3739850b 1801 intel_dp_link_down(intel_dp);
d240f20f
JB
1802}
1803
2bd2ad64 1804static void intel_post_disable_dp(struct intel_encoder *encoder)
d240f20f 1805{
2bd2ad64 1806 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 1807 enum port port = dp_to_dig_port(intel_dp)->port;
b2634017 1808 struct drm_device *dev = encoder->base.dev;
2bd2ad64 1809
982a3866 1810 if (port == PORT_A || IS_VALLEYVIEW(dev)) {
3739850b 1811 intel_dp_link_down(intel_dp);
b2634017
JB
1812 if (!IS_VALLEYVIEW(dev))
1813 ironlake_edp_pll_off(intel_dp);
3739850b 1814 }
2bd2ad64
DV
1815}
1816
e8cb4558 1817static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 1818{
e8cb4558
DV
1819 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1820 struct drm_device *dev = encoder->base.dev;
1821 struct drm_i915_private *dev_priv = dev->dev_private;
1822 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 1823
0c33d8d7
DV
1824 if (WARN_ON(dp_reg & DP_PORT_EN))
1825 return;
5d613501 1826
97af61f5 1827 ironlake_edp_panel_vdd_on(intel_dp);
f01eca2e 1828 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 1829 intel_dp_start_link_train(intel_dp);
97af61f5 1830 ironlake_edp_panel_on(intel_dp);
bd943159 1831 ironlake_edp_panel_vdd_off(intel_dp, true);
33a34e4e 1832 intel_dp_complete_link_train(intel_dp);
3ab9c637 1833 intel_dp_stop_link_train(intel_dp);
ab1f90f9 1834}
89b667f8 1835
ecff4f3b
JN
1836static void g4x_enable_dp(struct intel_encoder *encoder)
1837{
828f5c6e
JN
1838 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1839
ecff4f3b 1840 intel_enable_dp(encoder);
f01eca2e 1841 ironlake_edp_backlight_on(intel_dp);
ab1f90f9 1842}
89b667f8 1843
ab1f90f9
JN
1844static void vlv_enable_dp(struct intel_encoder *encoder)
1845{
828f5c6e
JN
1846 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1847
1848 ironlake_edp_backlight_on(intel_dp);
d240f20f
JB
1849}
1850
ecff4f3b 1851static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
1852{
1853 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1854 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1855
1856 if (dport->port == PORT_A)
1857 ironlake_edp_pll_on(intel_dp);
1858}
1859
1860static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 1861{
2bd2ad64 1862 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1863 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 1864 struct drm_device *dev = encoder->base.dev;
89b667f8 1865 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 1866 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 1867 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9 1868 int pipe = intel_crtc->pipe;
bf13e81b 1869 struct edp_power_seq power_seq;
ab1f90f9 1870 u32 val;
a4fc5ed6 1871
ab1f90f9 1872 mutex_lock(&dev_priv->dpio_lock);
89b667f8 1873
ab3c759a 1874 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
1875 val = 0;
1876 if (pipe)
1877 val |= (1<<21);
1878 else
1879 val &= ~(1<<21);
1880 val |= 0x001000c4;
ab3c759a
CML
1881 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1882 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1883 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 1884
ab1f90f9
JN
1885 mutex_unlock(&dev_priv->dpio_lock);
1886
2cac613b
ID
1887 if (is_edp(intel_dp)) {
1888 /* init power sequencer on this pipe and port */
1889 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1890 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1891 &power_seq);
1892 }
bf13e81b 1893
ab1f90f9
JN
1894 intel_enable_dp(encoder);
1895
e4607fcf 1896 vlv_wait_port_ready(dev_priv, dport);
89b667f8
JB
1897}
1898
ecff4f3b 1899static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
1900{
1901 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1902 struct drm_device *dev = encoder->base.dev;
1903 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
1904 struct intel_crtc *intel_crtc =
1905 to_intel_crtc(encoder->base.crtc);
e4607fcf 1906 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 1907 int pipe = intel_crtc->pipe;
89b667f8 1908
89b667f8 1909 /* Program Tx lane resets to default */
0980a60f 1910 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 1911 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
1912 DPIO_PCS_TX_LANE2_RESET |
1913 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 1914 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
1915 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1916 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1917 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1918 DPIO_PCS_CLK_SOFT_RESET);
1919
1920 /* Fix up inter-pair skew failure */
ab3c759a
CML
1921 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1922 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1923 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 1924 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
1925}
1926
1927/*
df0c237d
JB
1928 * Native read with retry for link status and receiver capability reads for
1929 * cases where the sink may still be asleep.
a4fc5ed6
KP
1930 */
1931static bool
df0c237d
JB
1932intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1933 uint8_t *recv, int recv_bytes)
a4fc5ed6 1934{
61da5fab
JB
1935 int ret, i;
1936
df0c237d
JB
1937 /*
1938 * Sinks are *supposed* to come up within 1ms from an off state,
1939 * but we're also supposed to retry 3 times per the spec.
1940 */
61da5fab 1941 for (i = 0; i < 3; i++) {
df0c237d
JB
1942 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1943 recv_bytes);
1944 if (ret == recv_bytes)
61da5fab
JB
1945 return true;
1946 msleep(1);
1947 }
a4fc5ed6 1948
61da5fab 1949 return false;
a4fc5ed6
KP
1950}
1951
1952/*
1953 * Fetch AUX CH registers 0x202 - 0x207 which contain
1954 * link status information
1955 */
1956static bool
93f62dad 1957intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 1958{
df0c237d
JB
1959 return intel_dp_aux_native_read_retry(intel_dp,
1960 DP_LANE0_1_STATUS,
93f62dad 1961 link_status,
df0c237d 1962 DP_LINK_STATUS_SIZE);
a4fc5ed6
KP
1963}
1964
a4fc5ed6
KP
1965/*
1966 * These are source-specific values; current Intel hardware supports
1967 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1968 */
a4fc5ed6
KP
1969
1970static uint8_t
1a2eb460 1971intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 1972{
30add22d 1973 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 1974 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 1975
8f93f4f1 1976 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
e2fa6fba 1977 return DP_TRAIN_VOLTAGE_SWING_1200;
bc7d38a4 1978 else if (IS_GEN7(dev) && port == PORT_A)
1a2eb460 1979 return DP_TRAIN_VOLTAGE_SWING_800;
bc7d38a4 1980 else if (HAS_PCH_CPT(dev) && port != PORT_A)
1a2eb460
KP
1981 return DP_TRAIN_VOLTAGE_SWING_1200;
1982 else
1983 return DP_TRAIN_VOLTAGE_SWING_800;
1984}
1985
1986static uint8_t
1987intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1988{
30add22d 1989 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 1990 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 1991
8f93f4f1
PZ
1992 if (IS_BROADWELL(dev)) {
1993 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1994 case DP_TRAIN_VOLTAGE_SWING_400:
1995 case DP_TRAIN_VOLTAGE_SWING_600:
1996 return DP_TRAIN_PRE_EMPHASIS_6;
1997 case DP_TRAIN_VOLTAGE_SWING_800:
1998 return DP_TRAIN_PRE_EMPHASIS_3_5;
1999 case DP_TRAIN_VOLTAGE_SWING_1200:
2000 default:
2001 return DP_TRAIN_PRE_EMPHASIS_0;
2002 }
2003 } else if (IS_HASWELL(dev)) {
d6c0d722
PZ
2004 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2005 case DP_TRAIN_VOLTAGE_SWING_400:
2006 return DP_TRAIN_PRE_EMPHASIS_9_5;
2007 case DP_TRAIN_VOLTAGE_SWING_600:
2008 return DP_TRAIN_PRE_EMPHASIS_6;
2009 case DP_TRAIN_VOLTAGE_SWING_800:
2010 return DP_TRAIN_PRE_EMPHASIS_3_5;
2011 case DP_TRAIN_VOLTAGE_SWING_1200:
2012 default:
2013 return DP_TRAIN_PRE_EMPHASIS_0;
2014 }
e2fa6fba
P
2015 } else if (IS_VALLEYVIEW(dev)) {
2016 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2017 case DP_TRAIN_VOLTAGE_SWING_400:
2018 return DP_TRAIN_PRE_EMPHASIS_9_5;
2019 case DP_TRAIN_VOLTAGE_SWING_600:
2020 return DP_TRAIN_PRE_EMPHASIS_6;
2021 case DP_TRAIN_VOLTAGE_SWING_800:
2022 return DP_TRAIN_PRE_EMPHASIS_3_5;
2023 case DP_TRAIN_VOLTAGE_SWING_1200:
2024 default:
2025 return DP_TRAIN_PRE_EMPHASIS_0;
2026 }
bc7d38a4 2027 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
2028 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2029 case DP_TRAIN_VOLTAGE_SWING_400:
2030 return DP_TRAIN_PRE_EMPHASIS_6;
2031 case DP_TRAIN_VOLTAGE_SWING_600:
2032 case DP_TRAIN_VOLTAGE_SWING_800:
2033 return DP_TRAIN_PRE_EMPHASIS_3_5;
2034 default:
2035 return DP_TRAIN_PRE_EMPHASIS_0;
2036 }
2037 } else {
2038 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2039 case DP_TRAIN_VOLTAGE_SWING_400:
2040 return DP_TRAIN_PRE_EMPHASIS_6;
2041 case DP_TRAIN_VOLTAGE_SWING_600:
2042 return DP_TRAIN_PRE_EMPHASIS_6;
2043 case DP_TRAIN_VOLTAGE_SWING_800:
2044 return DP_TRAIN_PRE_EMPHASIS_3_5;
2045 case DP_TRAIN_VOLTAGE_SWING_1200:
2046 default:
2047 return DP_TRAIN_PRE_EMPHASIS_0;
2048 }
a4fc5ed6
KP
2049 }
2050}
2051
e2fa6fba
P
2052static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2053{
2054 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2055 struct drm_i915_private *dev_priv = dev->dev_private;
2056 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2057 struct intel_crtc *intel_crtc =
2058 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2059 unsigned long demph_reg_value, preemph_reg_value,
2060 uniqtranscale_reg_value;
2061 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2062 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2063 int pipe = intel_crtc->pipe;
e2fa6fba
P
2064
2065 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2066 case DP_TRAIN_PRE_EMPHASIS_0:
2067 preemph_reg_value = 0x0004000;
2068 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2069 case DP_TRAIN_VOLTAGE_SWING_400:
2070 demph_reg_value = 0x2B405555;
2071 uniqtranscale_reg_value = 0x552AB83A;
2072 break;
2073 case DP_TRAIN_VOLTAGE_SWING_600:
2074 demph_reg_value = 0x2B404040;
2075 uniqtranscale_reg_value = 0x5548B83A;
2076 break;
2077 case DP_TRAIN_VOLTAGE_SWING_800:
2078 demph_reg_value = 0x2B245555;
2079 uniqtranscale_reg_value = 0x5560B83A;
2080 break;
2081 case DP_TRAIN_VOLTAGE_SWING_1200:
2082 demph_reg_value = 0x2B405555;
2083 uniqtranscale_reg_value = 0x5598DA3A;
2084 break;
2085 default:
2086 return 0;
2087 }
2088 break;
2089 case DP_TRAIN_PRE_EMPHASIS_3_5:
2090 preemph_reg_value = 0x0002000;
2091 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2092 case DP_TRAIN_VOLTAGE_SWING_400:
2093 demph_reg_value = 0x2B404040;
2094 uniqtranscale_reg_value = 0x5552B83A;
2095 break;
2096 case DP_TRAIN_VOLTAGE_SWING_600:
2097 demph_reg_value = 0x2B404848;
2098 uniqtranscale_reg_value = 0x5580B83A;
2099 break;
2100 case DP_TRAIN_VOLTAGE_SWING_800:
2101 demph_reg_value = 0x2B404040;
2102 uniqtranscale_reg_value = 0x55ADDA3A;
2103 break;
2104 default:
2105 return 0;
2106 }
2107 break;
2108 case DP_TRAIN_PRE_EMPHASIS_6:
2109 preemph_reg_value = 0x0000000;
2110 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2111 case DP_TRAIN_VOLTAGE_SWING_400:
2112 demph_reg_value = 0x2B305555;
2113 uniqtranscale_reg_value = 0x5570B83A;
2114 break;
2115 case DP_TRAIN_VOLTAGE_SWING_600:
2116 demph_reg_value = 0x2B2B4040;
2117 uniqtranscale_reg_value = 0x55ADDA3A;
2118 break;
2119 default:
2120 return 0;
2121 }
2122 break;
2123 case DP_TRAIN_PRE_EMPHASIS_9_5:
2124 preemph_reg_value = 0x0006000;
2125 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2126 case DP_TRAIN_VOLTAGE_SWING_400:
2127 demph_reg_value = 0x1B405555;
2128 uniqtranscale_reg_value = 0x55ADDA3A;
2129 break;
2130 default:
2131 return 0;
2132 }
2133 break;
2134 default:
2135 return 0;
2136 }
2137
0980a60f 2138 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
2139 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2140 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2141 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 2142 uniqtranscale_reg_value);
ab3c759a
CML
2143 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2144 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2145 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2146 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 2147 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
2148
2149 return 0;
2150}
2151
a4fc5ed6 2152static void
0301b3ac
JN
2153intel_get_adjust_train(struct intel_dp *intel_dp,
2154 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
2155{
2156 uint8_t v = 0;
2157 uint8_t p = 0;
2158 int lane;
1a2eb460
KP
2159 uint8_t voltage_max;
2160 uint8_t preemph_max;
a4fc5ed6 2161
33a34e4e 2162 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
2163 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2164 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
2165
2166 if (this_v > v)
2167 v = this_v;
2168 if (this_p > p)
2169 p = this_p;
2170 }
2171
1a2eb460 2172 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
2173 if (v >= voltage_max)
2174 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 2175
1a2eb460
KP
2176 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2177 if (p >= preemph_max)
2178 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
2179
2180 for (lane = 0; lane < 4; lane++)
33a34e4e 2181 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
2182}
2183
2184static uint32_t
f0a3424e 2185intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 2186{
3cf2efb1 2187 uint32_t signal_levels = 0;
a4fc5ed6 2188
3cf2efb1 2189 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
a4fc5ed6
KP
2190 case DP_TRAIN_VOLTAGE_SWING_400:
2191 default:
2192 signal_levels |= DP_VOLTAGE_0_4;
2193 break;
2194 case DP_TRAIN_VOLTAGE_SWING_600:
2195 signal_levels |= DP_VOLTAGE_0_6;
2196 break;
2197 case DP_TRAIN_VOLTAGE_SWING_800:
2198 signal_levels |= DP_VOLTAGE_0_8;
2199 break;
2200 case DP_TRAIN_VOLTAGE_SWING_1200:
2201 signal_levels |= DP_VOLTAGE_1_2;
2202 break;
2203 }
3cf2efb1 2204 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
a4fc5ed6
KP
2205 case DP_TRAIN_PRE_EMPHASIS_0:
2206 default:
2207 signal_levels |= DP_PRE_EMPHASIS_0;
2208 break;
2209 case DP_TRAIN_PRE_EMPHASIS_3_5:
2210 signal_levels |= DP_PRE_EMPHASIS_3_5;
2211 break;
2212 case DP_TRAIN_PRE_EMPHASIS_6:
2213 signal_levels |= DP_PRE_EMPHASIS_6;
2214 break;
2215 case DP_TRAIN_PRE_EMPHASIS_9_5:
2216 signal_levels |= DP_PRE_EMPHASIS_9_5;
2217 break;
2218 }
2219 return signal_levels;
2220}
2221
e3421a18
ZW
2222/* Gen6's DP voltage swing and pre-emphasis control */
2223static uint32_t
2224intel_gen6_edp_signal_levels(uint8_t train_set)
2225{
3c5a62b5
YL
2226 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2227 DP_TRAIN_PRE_EMPHASIS_MASK);
2228 switch (signal_levels) {
e3421a18 2229 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
2230 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2231 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2232 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2233 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
e3421a18 2234 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
3c5a62b5
YL
2235 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2236 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
e3421a18 2237 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
3c5a62b5
YL
2238 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2239 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
e3421a18 2240 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
2241 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2242 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 2243 default:
3c5a62b5
YL
2244 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2245 "0x%x\n", signal_levels);
2246 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
2247 }
2248}
2249
1a2eb460
KP
2250/* Gen7's DP voltage swing and pre-emphasis control */
2251static uint32_t
2252intel_gen7_edp_signal_levels(uint8_t train_set)
2253{
2254 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2255 DP_TRAIN_PRE_EMPHASIS_MASK);
2256 switch (signal_levels) {
2257 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2258 return EDP_LINK_TRAIN_400MV_0DB_IVB;
2259 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2260 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2261 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2262 return EDP_LINK_TRAIN_400MV_6DB_IVB;
2263
2264 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2265 return EDP_LINK_TRAIN_600MV_0DB_IVB;
2266 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2267 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2268
2269 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2270 return EDP_LINK_TRAIN_800MV_0DB_IVB;
2271 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2272 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2273
2274 default:
2275 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2276 "0x%x\n", signal_levels);
2277 return EDP_LINK_TRAIN_500MV_0DB_IVB;
2278 }
2279}
2280
d6c0d722
PZ
2281/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2282static uint32_t
f0a3424e 2283intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 2284{
d6c0d722
PZ
2285 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2286 DP_TRAIN_PRE_EMPHASIS_MASK);
2287 switch (signal_levels) {
2288 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2289 return DDI_BUF_EMP_400MV_0DB_HSW;
2290 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2291 return DDI_BUF_EMP_400MV_3_5DB_HSW;
2292 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2293 return DDI_BUF_EMP_400MV_6DB_HSW;
2294 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2295 return DDI_BUF_EMP_400MV_9_5DB_HSW;
a4fc5ed6 2296
d6c0d722
PZ
2297 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2298 return DDI_BUF_EMP_600MV_0DB_HSW;
2299 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2300 return DDI_BUF_EMP_600MV_3_5DB_HSW;
2301 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2302 return DDI_BUF_EMP_600MV_6DB_HSW;
a4fc5ed6 2303
d6c0d722
PZ
2304 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2305 return DDI_BUF_EMP_800MV_0DB_HSW;
2306 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2307 return DDI_BUF_EMP_800MV_3_5DB_HSW;
2308 default:
2309 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2310 "0x%x\n", signal_levels);
2311 return DDI_BUF_EMP_400MV_0DB_HSW;
a4fc5ed6 2312 }
a4fc5ed6
KP
2313}
2314
8f93f4f1
PZ
2315static uint32_t
2316intel_bdw_signal_levels(uint8_t train_set)
2317{
2318 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2319 DP_TRAIN_PRE_EMPHASIS_MASK);
2320 switch (signal_levels) {
2321 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2322 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2323 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2324 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
2325 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2326 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
2327
2328 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2329 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
2330 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2331 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
2332 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2333 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
2334
2335 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2336 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
2337 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2338 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
2339
2340 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2341 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
2342
2343 default:
2344 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2345 "0x%x\n", signal_levels);
2346 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
2347 }
2348}
2349
f0a3424e
PZ
2350/* Properly updates "DP" with the correct signal levels. */
2351static void
2352intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2353{
2354 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 2355 enum port port = intel_dig_port->port;
f0a3424e
PZ
2356 struct drm_device *dev = intel_dig_port->base.base.dev;
2357 uint32_t signal_levels, mask;
2358 uint8_t train_set = intel_dp->train_set[0];
2359
8f93f4f1
PZ
2360 if (IS_BROADWELL(dev)) {
2361 signal_levels = intel_bdw_signal_levels(train_set);
2362 mask = DDI_BUF_EMP_MASK;
2363 } else if (IS_HASWELL(dev)) {
f0a3424e
PZ
2364 signal_levels = intel_hsw_signal_levels(train_set);
2365 mask = DDI_BUF_EMP_MASK;
e2fa6fba
P
2366 } else if (IS_VALLEYVIEW(dev)) {
2367 signal_levels = intel_vlv_signal_levels(intel_dp);
2368 mask = 0;
bc7d38a4 2369 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
2370 signal_levels = intel_gen7_edp_signal_levels(train_set);
2371 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 2372 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
2373 signal_levels = intel_gen6_edp_signal_levels(train_set);
2374 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2375 } else {
2376 signal_levels = intel_gen4_signal_levels(train_set);
2377 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2378 }
2379
2380 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2381
2382 *DP = (*DP & ~mask) | signal_levels;
2383}
2384
a4fc5ed6 2385static bool
ea5b213a 2386intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 2387 uint32_t *DP,
58e10eb9 2388 uint8_t dp_train_pat)
a4fc5ed6 2389{
174edf1f
PZ
2390 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2391 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 2392 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 2393 enum port port = intel_dig_port->port;
2cdfe6c8
JN
2394 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2395 int ret, len;
a4fc5ed6 2396
22b8bf17 2397 if (HAS_DDI(dev)) {
3ab9c637 2398 uint32_t temp = I915_READ(DP_TP_CTL(port));
d6c0d722
PZ
2399
2400 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2401 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2402 else
2403 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2404
2405 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2406 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2407 case DP_TRAINING_PATTERN_DISABLE:
d6c0d722
PZ
2408 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2409
2410 break;
2411 case DP_TRAINING_PATTERN_1:
2412 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2413 break;
2414 case DP_TRAINING_PATTERN_2:
2415 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2416 break;
2417 case DP_TRAINING_PATTERN_3:
2418 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2419 break;
2420 }
174edf1f 2421 I915_WRITE(DP_TP_CTL(port), temp);
d6c0d722 2422
bc7d38a4 2423 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
70aff66c 2424 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
47ea7542
PZ
2425
2426 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2427 case DP_TRAINING_PATTERN_DISABLE:
70aff66c 2428 *DP |= DP_LINK_TRAIN_OFF_CPT;
47ea7542
PZ
2429 break;
2430 case DP_TRAINING_PATTERN_1:
70aff66c 2431 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
47ea7542
PZ
2432 break;
2433 case DP_TRAINING_PATTERN_2:
70aff66c 2434 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
47ea7542
PZ
2435 break;
2436 case DP_TRAINING_PATTERN_3:
2437 DRM_ERROR("DP training pattern 3 not supported\n");
70aff66c 2438 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
47ea7542
PZ
2439 break;
2440 }
2441
2442 } else {
70aff66c 2443 *DP &= ~DP_LINK_TRAIN_MASK;
47ea7542
PZ
2444
2445 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2446 case DP_TRAINING_PATTERN_DISABLE:
70aff66c 2447 *DP |= DP_LINK_TRAIN_OFF;
47ea7542
PZ
2448 break;
2449 case DP_TRAINING_PATTERN_1:
70aff66c 2450 *DP |= DP_LINK_TRAIN_PAT_1;
47ea7542
PZ
2451 break;
2452 case DP_TRAINING_PATTERN_2:
70aff66c 2453 *DP |= DP_LINK_TRAIN_PAT_2;
47ea7542
PZ
2454 break;
2455 case DP_TRAINING_PATTERN_3:
2456 DRM_ERROR("DP training pattern 3 not supported\n");
70aff66c 2457 *DP |= DP_LINK_TRAIN_PAT_2;
47ea7542
PZ
2458 break;
2459 }
2460 }
2461
70aff66c 2462 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 2463 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 2464
2cdfe6c8
JN
2465 buf[0] = dp_train_pat;
2466 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 2467 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
2468 /* don't write DP_TRAINING_LANEx_SET on disable */
2469 len = 1;
2470 } else {
2471 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2472 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2473 len = intel_dp->lane_count + 1;
47ea7542 2474 }
a4fc5ed6 2475
2cdfe6c8
JN
2476 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
2477 buf, len);
2478
2479 return ret == len;
a4fc5ed6
KP
2480}
2481
70aff66c
JN
2482static bool
2483intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2484 uint8_t dp_train_pat)
2485{
953d22e8 2486 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
2487 intel_dp_set_signal_levels(intel_dp, DP);
2488 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2489}
2490
2491static bool
2492intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 2493 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
2494{
2495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2496 struct drm_device *dev = intel_dig_port->base.base.dev;
2497 struct drm_i915_private *dev_priv = dev->dev_private;
2498 int ret;
2499
2500 intel_get_adjust_train(intel_dp, link_status);
2501 intel_dp_set_signal_levels(intel_dp, DP);
2502
2503 I915_WRITE(intel_dp->output_reg, *DP);
2504 POSTING_READ(intel_dp->output_reg);
2505
2506 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2507 intel_dp->train_set,
2508 intel_dp->lane_count);
2509
2510 return ret == intel_dp->lane_count;
2511}
2512
3ab9c637
ID
2513static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2514{
2515 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2516 struct drm_device *dev = intel_dig_port->base.base.dev;
2517 struct drm_i915_private *dev_priv = dev->dev_private;
2518 enum port port = intel_dig_port->port;
2519 uint32_t val;
2520
2521 if (!HAS_DDI(dev))
2522 return;
2523
2524 val = I915_READ(DP_TP_CTL(port));
2525 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2526 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2527 I915_WRITE(DP_TP_CTL(port), val);
2528
2529 /*
2530 * On PORT_A we can have only eDP in SST mode. There the only reason
2531 * we need to set idle transmission mode is to work around a HW issue
2532 * where we enable the pipe while not in idle link-training mode.
2533 * In this case there is requirement to wait for a minimum number of
2534 * idle patterns to be sent.
2535 */
2536 if (port == PORT_A)
2537 return;
2538
2539 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2540 1))
2541 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2542}
2543
33a34e4e 2544/* Enable corresponding port and start training pattern 1 */
c19b0669 2545void
33a34e4e 2546intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 2547{
da63a9f2 2548 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 2549 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
2550 int i;
2551 uint8_t voltage;
cdb0e95b 2552 int voltage_tries, loop_tries;
ea5b213a 2553 uint32_t DP = intel_dp->DP;
6aba5b6c 2554 uint8_t link_config[2];
a4fc5ed6 2555
affa9354 2556 if (HAS_DDI(dev))
c19b0669
PZ
2557 intel_ddi_prepare_link_retrain(encoder);
2558
3cf2efb1 2559 /* Write the link configuration data */
6aba5b6c
JN
2560 link_config[0] = intel_dp->link_bw;
2561 link_config[1] = intel_dp->lane_count;
2562 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2563 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2564 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
2565
2566 link_config[0] = 0;
2567 link_config[1] = DP_SET_ANSI_8B10B;
2568 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
2569
2570 DP |= DP_PORT_EN;
1a2eb460 2571
70aff66c
JN
2572 /* clock recovery */
2573 if (!intel_dp_reset_link_train(intel_dp, &DP,
2574 DP_TRAINING_PATTERN_1 |
2575 DP_LINK_SCRAMBLING_DISABLE)) {
2576 DRM_ERROR("failed to enable link training\n");
2577 return;
2578 }
2579
a4fc5ed6 2580 voltage = 0xff;
cdb0e95b
KP
2581 voltage_tries = 0;
2582 loop_tries = 0;
a4fc5ed6 2583 for (;;) {
70aff66c 2584 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 2585
a7c9655f 2586 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
2587 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2588 DRM_ERROR("failed to get link status\n");
a4fc5ed6 2589 break;
93f62dad 2590 }
a4fc5ed6 2591
01916270 2592 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 2593 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
2594 break;
2595 }
2596
2597 /* Check to see if we've tried the max voltage */
2598 for (i = 0; i < intel_dp->lane_count; i++)
2599 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 2600 break;
3b4f819d 2601 if (i == intel_dp->lane_count) {
b06fbda3
DV
2602 ++loop_tries;
2603 if (loop_tries == 5) {
3def84b3 2604 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
2605 break;
2606 }
70aff66c
JN
2607 intel_dp_reset_link_train(intel_dp, &DP,
2608 DP_TRAINING_PATTERN_1 |
2609 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
2610 voltage_tries = 0;
2611 continue;
2612 }
a4fc5ed6 2613
3cf2efb1 2614 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 2615 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 2616 ++voltage_tries;
b06fbda3 2617 if (voltage_tries == 5) {
3def84b3 2618 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
2619 break;
2620 }
2621 } else
2622 voltage_tries = 0;
2623 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 2624
70aff66c
JN
2625 /* Update training set as requested by target */
2626 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2627 DRM_ERROR("failed to update link training\n");
2628 break;
2629 }
a4fc5ed6
KP
2630 }
2631
33a34e4e
JB
2632 intel_dp->DP = DP;
2633}
2634
c19b0669 2635void
33a34e4e
JB
2636intel_dp_complete_link_train(struct intel_dp *intel_dp)
2637{
33a34e4e 2638 bool channel_eq = false;
37f80975 2639 int tries, cr_tries;
33a34e4e
JB
2640 uint32_t DP = intel_dp->DP;
2641
a4fc5ed6 2642 /* channel equalization */
70aff66c
JN
2643 if (!intel_dp_set_link_train(intel_dp, &DP,
2644 DP_TRAINING_PATTERN_2 |
2645 DP_LINK_SCRAMBLING_DISABLE)) {
2646 DRM_ERROR("failed to start channel equalization\n");
2647 return;
2648 }
2649
a4fc5ed6 2650 tries = 0;
37f80975 2651 cr_tries = 0;
a4fc5ed6
KP
2652 channel_eq = false;
2653 for (;;) {
70aff66c 2654 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 2655
37f80975
JB
2656 if (cr_tries > 5) {
2657 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
2658 break;
2659 }
2660
a7c9655f 2661 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
2662 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2663 DRM_ERROR("failed to get link status\n");
a4fc5ed6 2664 break;
70aff66c 2665 }
a4fc5ed6 2666
37f80975 2667 /* Make sure clock is still ok */
01916270 2668 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 2669 intel_dp_start_link_train(intel_dp);
70aff66c
JN
2670 intel_dp_set_link_train(intel_dp, &DP,
2671 DP_TRAINING_PATTERN_2 |
2672 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
2673 cr_tries++;
2674 continue;
2675 }
2676
1ffdff13 2677 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
2678 channel_eq = true;
2679 break;
2680 }
a4fc5ed6 2681
37f80975
JB
2682 /* Try 5 times, then try clock recovery if that fails */
2683 if (tries > 5) {
2684 intel_dp_link_down(intel_dp);
2685 intel_dp_start_link_train(intel_dp);
70aff66c
JN
2686 intel_dp_set_link_train(intel_dp, &DP,
2687 DP_TRAINING_PATTERN_2 |
2688 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
2689 tries = 0;
2690 cr_tries++;
2691 continue;
2692 }
a4fc5ed6 2693
70aff66c
JN
2694 /* Update training set as requested by target */
2695 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2696 DRM_ERROR("failed to update link training\n");
2697 break;
2698 }
3cf2efb1 2699 ++tries;
869184a6 2700 }
3cf2efb1 2701
3ab9c637
ID
2702 intel_dp_set_idle_link_train(intel_dp);
2703
2704 intel_dp->DP = DP;
2705
d6c0d722 2706 if (channel_eq)
07f42258 2707 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 2708
3ab9c637
ID
2709}
2710
2711void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2712{
70aff66c 2713 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 2714 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
2715}
2716
2717static void
ea5b213a 2718intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 2719{
da63a9f2 2720 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 2721 enum port port = intel_dig_port->port;
da63a9f2 2722 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 2723 struct drm_i915_private *dev_priv = dev->dev_private;
ab527efc
DV
2724 struct intel_crtc *intel_crtc =
2725 to_intel_crtc(intel_dig_port->base.base.crtc);
ea5b213a 2726 uint32_t DP = intel_dp->DP;
a4fc5ed6 2727
c19b0669
PZ
2728 /*
2729 * DDI code has a strict mode set sequence and we should try to respect
2730 * it, otherwise we might hang the machine in many different ways. So we
2731 * really should be disabling the port only on a complete crtc_disable
2732 * sequence. This function is just called under two conditions on DDI
2733 * code:
2734 * - Link train failed while doing crtc_enable, and on this case we
2735 * really should respect the mode set sequence and wait for a
2736 * crtc_disable.
2737 * - Someone turned the monitor off and intel_dp_check_link_status
2738 * called us. We don't need to disable the whole port on this case, so
2739 * when someone turns the monitor on again,
2740 * intel_ddi_prepare_link_retrain will take care of redoing the link
2741 * train.
2742 */
affa9354 2743 if (HAS_DDI(dev))
c19b0669
PZ
2744 return;
2745
0c33d8d7 2746 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
2747 return;
2748
28c97730 2749 DRM_DEBUG_KMS("\n");
32f9d658 2750
bc7d38a4 2751 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 2752 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 2753 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18
ZW
2754 } else {
2755 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 2756 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 2757 }
fe255d00 2758 POSTING_READ(intel_dp->output_reg);
5eb08b69 2759
ab527efc
DV
2760 /* We don't really know why we're doing this */
2761 intel_wait_for_vblank(dev, intel_crtc->pipe);
5eb08b69 2762
493a7081 2763 if (HAS_PCH_IBX(dev) &&
1b39d6f3 2764 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
da63a9f2 2765 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
31acbcc4 2766
5bddd17f
EA
2767 /* Hardware workaround: leaving our transcoder select
2768 * set to transcoder B while it's off will prevent the
2769 * corresponding HDMI output on transcoder A.
2770 *
2771 * Combine this with another hardware workaround:
2772 * transcoder select bit can only be cleared while the
2773 * port is enabled.
2774 */
2775 DP &= ~DP_PIPEB_SELECT;
2776 I915_WRITE(intel_dp->output_reg, DP);
2777
2778 /* Changes to enable or select take place the vblank
2779 * after being written.
2780 */
ff50afe9
DV
2781 if (WARN_ON(crtc == NULL)) {
2782 /* We should never try to disable a port without a crtc
2783 * attached. For paranoia keep the code around for a
2784 * bit. */
31acbcc4
CW
2785 POSTING_READ(intel_dp->output_reg);
2786 msleep(50);
2787 } else
ab527efc 2788 intel_wait_for_vblank(dev, intel_crtc->pipe);
5bddd17f
EA
2789 }
2790
832afda6 2791 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
2792 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2793 POSTING_READ(intel_dp->output_reg);
f01eca2e 2794 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
2795}
2796
26d61aad
KP
2797static bool
2798intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 2799{
a031d709
RV
2800 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2801 struct drm_device *dev = dig_port->base.base.dev;
2802 struct drm_i915_private *dev_priv = dev->dev_private;
2803
577c7a50
DL
2804 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2805
92fd8fd1 2806 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
edb39244
AJ
2807 sizeof(intel_dp->dpcd)) == 0)
2808 return false; /* aux transfer failed */
92fd8fd1 2809
577c7a50
DL
2810 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2811 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2812 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2813
edb39244
AJ
2814 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2815 return false; /* DPCD not present */
2816
2293bb5c
SK
2817 /* Check if the panel supports PSR */
2818 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939
JN
2819 if (is_edp(intel_dp)) {
2820 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2821 intel_dp->psr_dpcd,
2822 sizeof(intel_dp->psr_dpcd));
a031d709
RV
2823 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2824 dev_priv->psr.sink_support = true;
50003939 2825 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 2826 }
50003939
JN
2827 }
2828
edb39244
AJ
2829 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2830 DP_DWN_STRM_PORT_PRESENT))
2831 return true; /* native DP sink */
2832
2833 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2834 return true; /* no per-port downstream info */
2835
2836 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2837 intel_dp->downstream_ports,
2838 DP_MAX_DOWNSTREAM_PORTS) == 0)
2839 return false; /* downstream port status fetch failed */
2840
2841 return true;
92fd8fd1
KP
2842}
2843
0d198328
AJ
2844static void
2845intel_dp_probe_oui(struct intel_dp *intel_dp)
2846{
2847 u8 buf[3];
2848
2849 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2850 return;
2851
351cfc34
DV
2852 ironlake_edp_panel_vdd_on(intel_dp);
2853
0d198328
AJ
2854 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2855 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2856 buf[0], buf[1], buf[2]);
2857
2858 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2859 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2860 buf[0], buf[1], buf[2]);
351cfc34
DV
2861
2862 ironlake_edp_panel_vdd_off(intel_dp, false);
0d198328
AJ
2863}
2864
a60f0e38
JB
2865static bool
2866intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2867{
2868 int ret;
2869
2870 ret = intel_dp_aux_native_read_retry(intel_dp,
2871 DP_DEVICE_SERVICE_IRQ_VECTOR,
2872 sink_irq_vector, 1);
2873 if (!ret)
2874 return false;
2875
2876 return true;
2877}
2878
2879static void
2880intel_dp_handle_test_request(struct intel_dp *intel_dp)
2881{
2882 /* NAK by default */
9324cf7f 2883 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
2884}
2885
a4fc5ed6
KP
2886/*
2887 * According to DP spec
2888 * 5.1.2:
2889 * 1. Read DPCD
2890 * 2. Configure link according to Receiver Capabilities
2891 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2892 * 4. Check link status on receipt of hot-plug interrupt
2893 */
2894
00c09d70 2895void
ea5b213a 2896intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 2897{
da63a9f2 2898 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 2899 u8 sink_irq_vector;
93f62dad 2900 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 2901
da63a9f2 2902 if (!intel_encoder->connectors_active)
d2b996ac 2903 return;
59cd09e1 2904
da63a9f2 2905 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
2906 return;
2907
92fd8fd1 2908 /* Try to read receiver status if the link appears to be up */
93f62dad 2909 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
2910 return;
2911 }
2912
92fd8fd1 2913 /* Now read the DPCD to see if it's actually running */
26d61aad 2914 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
2915 return;
2916 }
2917
a60f0e38
JB
2918 /* Try to read the source of the interrupt */
2919 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2920 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2921 /* Clear interrupt source */
2922 intel_dp_aux_native_write_1(intel_dp,
2923 DP_DEVICE_SERVICE_IRQ_VECTOR,
2924 sink_irq_vector);
2925
2926 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2927 intel_dp_handle_test_request(intel_dp);
2928 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2929 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2930 }
2931
1ffdff13 2932 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 2933 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
da63a9f2 2934 drm_get_encoder_name(&intel_encoder->base));
33a34e4e
JB
2935 intel_dp_start_link_train(intel_dp);
2936 intel_dp_complete_link_train(intel_dp);
3ab9c637 2937 intel_dp_stop_link_train(intel_dp);
33a34e4e 2938 }
a4fc5ed6 2939}
a4fc5ed6 2940
caf9ab24 2941/* XXX this is probably wrong for multiple downstream ports */
71ba9000 2942static enum drm_connector_status
26d61aad 2943intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 2944{
caf9ab24 2945 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
2946 uint8_t type;
2947
2948 if (!intel_dp_get_dpcd(intel_dp))
2949 return connector_status_disconnected;
2950
2951 /* if there's no downstream port, we're done */
2952 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 2953 return connector_status_connected;
caf9ab24
AJ
2954
2955 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
2956 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2957 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 2958 uint8_t reg;
caf9ab24 2959 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
23235177 2960 &reg, 1))
caf9ab24 2961 return connector_status_unknown;
23235177
AJ
2962 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2963 : connector_status_disconnected;
caf9ab24
AJ
2964 }
2965
2966 /* If no HPD, poke DDC gently */
2967 if (drm_probe_ddc(&intel_dp->adapter))
26d61aad 2968 return connector_status_connected;
caf9ab24
AJ
2969
2970 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
2971 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
2972 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2973 if (type == DP_DS_PORT_TYPE_VGA ||
2974 type == DP_DS_PORT_TYPE_NON_EDID)
2975 return connector_status_unknown;
2976 } else {
2977 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2978 DP_DWN_STRM_PORT_TYPE_MASK;
2979 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
2980 type == DP_DWN_STRM_PORT_TYPE_OTHER)
2981 return connector_status_unknown;
2982 }
caf9ab24
AJ
2983
2984 /* Anything else is out of spec, warn and ignore */
2985 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 2986 return connector_status_disconnected;
71ba9000
AJ
2987}
2988
5eb08b69 2989static enum drm_connector_status
a9756bb5 2990ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 2991{
30add22d 2992 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
2993 struct drm_i915_private *dev_priv = dev->dev_private;
2994 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5eb08b69
ZW
2995 enum drm_connector_status status;
2996
fe16d949
CW
2997 /* Can't disconnect eDP, but you can close the lid... */
2998 if (is_edp(intel_dp)) {
30add22d 2999 status = intel_panel_detect(dev);
fe16d949
CW
3000 if (status == connector_status_unknown)
3001 status = connector_status_connected;
3002 return status;
3003 }
01cb9ea6 3004
1b469639
DL
3005 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3006 return connector_status_disconnected;
3007
26d61aad 3008 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
3009}
3010
a4fc5ed6 3011static enum drm_connector_status
a9756bb5 3012g4x_dp_detect(struct intel_dp *intel_dp)
a4fc5ed6 3013{
30add22d 3014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a4fc5ed6 3015 struct drm_i915_private *dev_priv = dev->dev_private;
34f2be46 3016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
10f76a38 3017 uint32_t bit;
5eb08b69 3018
35aad75f
JB
3019 /* Can't disconnect eDP, but you can close the lid... */
3020 if (is_edp(intel_dp)) {
3021 enum drm_connector_status status;
3022
3023 status = intel_panel_detect(dev);
3024 if (status == connector_status_unknown)
3025 status = connector_status_connected;
3026 return status;
3027 }
3028
232a6ee9
TP
3029 if (IS_VALLEYVIEW(dev)) {
3030 switch (intel_dig_port->port) {
3031 case PORT_B:
3032 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3033 break;
3034 case PORT_C:
3035 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3036 break;
3037 case PORT_D:
3038 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3039 break;
3040 default:
3041 return connector_status_unknown;
3042 }
3043 } else {
3044 switch (intel_dig_port->port) {
3045 case PORT_B:
3046 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
3047 break;
3048 case PORT_C:
3049 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
3050 break;
3051 case PORT_D:
3052 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3053 break;
3054 default:
3055 return connector_status_unknown;
3056 }
a4fc5ed6
KP
3057 }
3058
10f76a38 3059 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
a4fc5ed6
KP
3060 return connector_status_disconnected;
3061
26d61aad 3062 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
3063}
3064
8c241fef
KP
3065static struct edid *
3066intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3067{
9cd300e0 3068 struct intel_connector *intel_connector = to_intel_connector(connector);
d6f24d0f 3069
9cd300e0
JN
3070 /* use cached edid if we have one */
3071 if (intel_connector->edid) {
9cd300e0
JN
3072 /* invalid edid */
3073 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
3074 return NULL;
3075
55e9edeb 3076 return drm_edid_duplicate(intel_connector->edid);
d6f24d0f 3077 }
8c241fef 3078
9cd300e0 3079 return drm_get_edid(connector, adapter);
8c241fef
KP
3080}
3081
3082static int
3083intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
3084{
9cd300e0 3085 struct intel_connector *intel_connector = to_intel_connector(connector);
8c241fef 3086
9cd300e0
JN
3087 /* use cached edid if we have one */
3088 if (intel_connector->edid) {
3089 /* invalid edid */
3090 if (IS_ERR(intel_connector->edid))
3091 return 0;
3092
3093 return intel_connector_update_modes(connector,
3094 intel_connector->edid);
d6f24d0f
JB
3095 }
3096
9cd300e0 3097 return intel_ddc_get_modes(connector, adapter);
8c241fef
KP
3098}
3099
a9756bb5
ZW
3100static enum drm_connector_status
3101intel_dp_detect(struct drm_connector *connector, bool force)
3102{
3103 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
3104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3105 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 3106 struct drm_device *dev = connector->dev;
c8c8fb33 3107 struct drm_i915_private *dev_priv = dev->dev_private;
a9756bb5
ZW
3108 enum drm_connector_status status;
3109 struct edid *edid = NULL;
3110
c8c8fb33
PZ
3111 intel_runtime_pm_get(dev_priv);
3112
164c8598
CW
3113 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3114 connector->base.id, drm_get_connector_name(connector));
3115
a9756bb5
ZW
3116 intel_dp->has_audio = false;
3117
3118 if (HAS_PCH_SPLIT(dev))
3119 status = ironlake_dp_detect(intel_dp);
3120 else
3121 status = g4x_dp_detect(intel_dp);
1b9be9d0 3122
a9756bb5 3123 if (status != connector_status_connected)
c8c8fb33 3124 goto out;
a9756bb5 3125
0d198328
AJ
3126 intel_dp_probe_oui(intel_dp);
3127
c3e5f67b
DV
3128 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3129 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
f684960e 3130 } else {
8c241fef 3131 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
f684960e
CW
3132 if (edid) {
3133 intel_dp->has_audio = drm_detect_monitor_audio(edid);
f684960e
CW
3134 kfree(edid);
3135 }
a9756bb5
ZW
3136 }
3137
d63885da
PZ
3138 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3139 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
3140 status = connector_status_connected;
3141
3142out:
3143 intel_runtime_pm_put(dev_priv);
3144 return status;
a4fc5ed6
KP
3145}
3146
3147static int intel_dp_get_modes(struct drm_connector *connector)
3148{
df0e9248 3149 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e 3150 struct intel_connector *intel_connector = to_intel_connector(connector);
fa90ecef 3151 struct drm_device *dev = connector->dev;
32f9d658 3152 int ret;
a4fc5ed6
KP
3153
3154 /* We should parse the EDID data and find out if it has an audio sink
3155 */
3156
8c241fef 3157 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
f8779fda 3158 if (ret)
32f9d658
ZW
3159 return ret;
3160
f8779fda 3161 /* if eDP has no EDID, fall back to fixed mode */
dd06f90e 3162 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
f8779fda 3163 struct drm_display_mode *mode;
dd06f90e
JN
3164 mode = drm_mode_duplicate(dev,
3165 intel_connector->panel.fixed_mode);
f8779fda 3166 if (mode) {
32f9d658
ZW
3167 drm_mode_probed_add(connector, mode);
3168 return 1;
3169 }
3170 }
3171 return 0;
a4fc5ed6
KP
3172}
3173
1aad7ac0
CW
3174static bool
3175intel_dp_detect_audio(struct drm_connector *connector)
3176{
3177 struct intel_dp *intel_dp = intel_attached_dp(connector);
3178 struct edid *edid;
3179 bool has_audio = false;
3180
8c241fef 3181 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
1aad7ac0
CW
3182 if (edid) {
3183 has_audio = drm_detect_monitor_audio(edid);
1aad7ac0
CW
3184 kfree(edid);
3185 }
3186
3187 return has_audio;
3188}
3189
f684960e
CW
3190static int
3191intel_dp_set_property(struct drm_connector *connector,
3192 struct drm_property *property,
3193 uint64_t val)
3194{
e953fd7b 3195 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 3196 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
3197 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3198 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
3199 int ret;
3200
662595df 3201 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
3202 if (ret)
3203 return ret;
3204
3f43c48d 3205 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
3206 int i = val;
3207 bool has_audio;
3208
3209 if (i == intel_dp->force_audio)
f684960e
CW
3210 return 0;
3211
1aad7ac0 3212 intel_dp->force_audio = i;
f684960e 3213
c3e5f67b 3214 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
3215 has_audio = intel_dp_detect_audio(connector);
3216 else
c3e5f67b 3217 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
3218
3219 if (has_audio == intel_dp->has_audio)
f684960e
CW
3220 return 0;
3221
1aad7ac0 3222 intel_dp->has_audio = has_audio;
f684960e
CW
3223 goto done;
3224 }
3225
e953fd7b 3226 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
3227 bool old_auto = intel_dp->color_range_auto;
3228 uint32_t old_range = intel_dp->color_range;
3229
55bc60db
VS
3230 switch (val) {
3231 case INTEL_BROADCAST_RGB_AUTO:
3232 intel_dp->color_range_auto = true;
3233 break;
3234 case INTEL_BROADCAST_RGB_FULL:
3235 intel_dp->color_range_auto = false;
3236 intel_dp->color_range = 0;
3237 break;
3238 case INTEL_BROADCAST_RGB_LIMITED:
3239 intel_dp->color_range_auto = false;
3240 intel_dp->color_range = DP_COLOR_RANGE_16_235;
3241 break;
3242 default:
3243 return -EINVAL;
3244 }
ae4edb80
DV
3245
3246 if (old_auto == intel_dp->color_range_auto &&
3247 old_range == intel_dp->color_range)
3248 return 0;
3249
e953fd7b
CW
3250 goto done;
3251 }
3252
53b41837
YN
3253 if (is_edp(intel_dp) &&
3254 property == connector->dev->mode_config.scaling_mode_property) {
3255 if (val == DRM_MODE_SCALE_NONE) {
3256 DRM_DEBUG_KMS("no scaling not supported\n");
3257 return -EINVAL;
3258 }
3259
3260 if (intel_connector->panel.fitting_mode == val) {
3261 /* the eDP scaling property is not changed */
3262 return 0;
3263 }
3264 intel_connector->panel.fitting_mode = val;
3265
3266 goto done;
3267 }
3268
f684960e
CW
3269 return -EINVAL;
3270
3271done:
c0c36b94
CW
3272 if (intel_encoder->base.crtc)
3273 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
3274
3275 return 0;
3276}
3277
a4fc5ed6 3278static void
73845adf 3279intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 3280{
1d508706 3281 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 3282
9cd300e0
JN
3283 if (!IS_ERR_OR_NULL(intel_connector->edid))
3284 kfree(intel_connector->edid);
3285
acd8db10
PZ
3286 /* Can't call is_edp() since the encoder may have been destroyed
3287 * already. */
3288 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 3289 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 3290
a4fc5ed6 3291 drm_connector_cleanup(connector);
55f78c43 3292 kfree(connector);
a4fc5ed6
KP
3293}
3294
00c09d70 3295void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 3296{
da63a9f2
PZ
3297 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3298 struct intel_dp *intel_dp = &intel_dig_port->dp;
bd173813 3299 struct drm_device *dev = intel_dp_to_dev(intel_dp);
24d05927
DV
3300
3301 i2c_del_adapter(&intel_dp->adapter);
3302 drm_encoder_cleanup(encoder);
bd943159
KP
3303 if (is_edp(intel_dp)) {
3304 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
bd173813 3305 mutex_lock(&dev->mode_config.mutex);
bd943159 3306 ironlake_panel_vdd_off_sync(intel_dp);
bd173813 3307 mutex_unlock(&dev->mode_config.mutex);
bd943159 3308 }
da63a9f2 3309 kfree(intel_dig_port);
24d05927
DV
3310}
3311
a4fc5ed6 3312static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 3313 .dpms = intel_connector_dpms,
a4fc5ed6
KP
3314 .detect = intel_dp_detect,
3315 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 3316 .set_property = intel_dp_set_property,
73845adf 3317 .destroy = intel_dp_connector_destroy,
a4fc5ed6
KP
3318};
3319
3320static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3321 .get_modes = intel_dp_get_modes,
3322 .mode_valid = intel_dp_mode_valid,
df0e9248 3323 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
3324};
3325
a4fc5ed6 3326static const struct drm_encoder_funcs intel_dp_enc_funcs = {
24d05927 3327 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
3328};
3329
995b6762 3330static void
21d40d37 3331intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 3332{
fa90ecef 3333 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
c8110e52 3334
885a5014 3335 intel_dp_check_link_status(intel_dp);
c8110e52 3336}
6207937d 3337
e3421a18
ZW
3338/* Return which DP Port should be selected for Transcoder DP control */
3339int
0206e353 3340intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
3341{
3342 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
3343 struct intel_encoder *intel_encoder;
3344 struct intel_dp *intel_dp;
e3421a18 3345
fa90ecef
PZ
3346 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3347 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 3348
fa90ecef
PZ
3349 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3350 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 3351 return intel_dp->output_reg;
e3421a18 3352 }
ea5b213a 3353
e3421a18
ZW
3354 return -1;
3355}
3356
36e83a18 3357/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 3358bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
3359{
3360 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 3361 union child_device_config *p_child;
36e83a18 3362 int i;
5d8a7752
VS
3363 static const short port_mapping[] = {
3364 [PORT_B] = PORT_IDPB,
3365 [PORT_C] = PORT_IDPC,
3366 [PORT_D] = PORT_IDPD,
3367 };
36e83a18 3368
3b32a35b
VS
3369 if (port == PORT_A)
3370 return true;
3371
41aa3448 3372 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
3373 return false;
3374
41aa3448
RV
3375 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3376 p_child = dev_priv->vbt.child_dev + i;
36e83a18 3377
5d8a7752 3378 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
3379 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3380 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
3381 return true;
3382 }
3383 return false;
3384}
3385
f684960e
CW
3386static void
3387intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3388{
53b41837
YN
3389 struct intel_connector *intel_connector = to_intel_connector(connector);
3390
3f43c48d 3391 intel_attach_force_audio_property(connector);
e953fd7b 3392 intel_attach_broadcast_rgb_property(connector);
55bc60db 3393 intel_dp->color_range_auto = true;
53b41837
YN
3394
3395 if (is_edp(intel_dp)) {
3396 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
3397 drm_object_attach_property(
3398 &connector->base,
53b41837 3399 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
3400 DRM_MODE_SCALE_ASPECT);
3401 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 3402 }
f684960e
CW
3403}
3404
67a54566
DV
3405static void
3406intel_dp_init_panel_power_sequencer(struct drm_device *dev,
f30d26e4
JN
3407 struct intel_dp *intel_dp,
3408 struct edp_power_seq *out)
67a54566
DV
3409{
3410 struct drm_i915_private *dev_priv = dev->dev_private;
3411 struct edp_power_seq cur, vbt, spec, final;
3412 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 3413 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420
JB
3414
3415 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 3416 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
3417 pp_on_reg = PCH_PP_ON_DELAYS;
3418 pp_off_reg = PCH_PP_OFF_DELAYS;
3419 pp_div_reg = PCH_PP_DIVISOR;
3420 } else {
bf13e81b
JN
3421 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3422
3423 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3424 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3425 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3426 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 3427 }
67a54566
DV
3428
3429 /* Workaround: Need to write PP_CONTROL with the unlock key as
3430 * the very first thing. */
453c5420 3431 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 3432 I915_WRITE(pp_ctrl_reg, pp);
67a54566 3433
453c5420
JB
3434 pp_on = I915_READ(pp_on_reg);
3435 pp_off = I915_READ(pp_off_reg);
3436 pp_div = I915_READ(pp_div_reg);
67a54566
DV
3437
3438 /* Pull timing values out of registers */
3439 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3440 PANEL_POWER_UP_DELAY_SHIFT;
3441
3442 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3443 PANEL_LIGHT_ON_DELAY_SHIFT;
3444
3445 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3446 PANEL_LIGHT_OFF_DELAY_SHIFT;
3447
3448 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3449 PANEL_POWER_DOWN_DELAY_SHIFT;
3450
3451 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3452 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3453
3454 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3455 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3456
41aa3448 3457 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
3458
3459 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3460 * our hw here, which are all in 100usec. */
3461 spec.t1_t3 = 210 * 10;
3462 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3463 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3464 spec.t10 = 500 * 10;
3465 /* This one is special and actually in units of 100ms, but zero
3466 * based in the hw (so we need to add 100 ms). But the sw vbt
3467 * table multiplies it with 1000 to make it in units of 100usec,
3468 * too. */
3469 spec.t11_t12 = (510 + 100) * 10;
3470
3471 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3472 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3473
3474 /* Use the max of the register settings and vbt. If both are
3475 * unset, fall back to the spec limits. */
3476#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
3477 spec.field : \
3478 max(cur.field, vbt.field))
3479 assign_final(t1_t3);
3480 assign_final(t8);
3481 assign_final(t9);
3482 assign_final(t10);
3483 assign_final(t11_t12);
3484#undef assign_final
3485
3486#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
3487 intel_dp->panel_power_up_delay = get_delay(t1_t3);
3488 intel_dp->backlight_on_delay = get_delay(t8);
3489 intel_dp->backlight_off_delay = get_delay(t9);
3490 intel_dp->panel_power_down_delay = get_delay(t10);
3491 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3492#undef get_delay
3493
f30d26e4
JN
3494 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3495 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3496 intel_dp->panel_power_cycle_delay);
3497
3498 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3499 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3500
3501 if (out)
3502 *out = final;
3503}
3504
3505static void
3506intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3507 struct intel_dp *intel_dp,
3508 struct edp_power_seq *seq)
3509{
3510 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
3511 u32 pp_on, pp_off, pp_div, port_sel = 0;
3512 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3513 int pp_on_reg, pp_off_reg, pp_div_reg;
3514
3515 if (HAS_PCH_SPLIT(dev)) {
3516 pp_on_reg = PCH_PP_ON_DELAYS;
3517 pp_off_reg = PCH_PP_OFF_DELAYS;
3518 pp_div_reg = PCH_PP_DIVISOR;
3519 } else {
bf13e81b
JN
3520 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3521
3522 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3523 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3524 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
3525 }
3526
67a54566 3527 /* And finally store the new values in the power sequencer. */
f30d26e4
JN
3528 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3529 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
3530 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3531 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
3532 /* Compute the divisor for the pp clock, simply match the Bspec
3533 * formula. */
453c5420 3534 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 3535 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
3536 << PANEL_POWER_CYCLE_DELAY_SHIFT);
3537
3538 /* Haswell doesn't have any port selection bits for the panel
3539 * power sequencer any more. */
bc7d38a4 3540 if (IS_VALLEYVIEW(dev)) {
bf13e81b
JN
3541 if (dp_to_dig_port(intel_dp)->port == PORT_B)
3542 port_sel = PANEL_PORT_SELECT_DPB_VLV;
3543 else
3544 port_sel = PANEL_PORT_SELECT_DPC_VLV;
bc7d38a4
ID
3545 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3546 if (dp_to_dig_port(intel_dp)->port == PORT_A)
a24c144c 3547 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 3548 else
a24c144c 3549 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
3550 }
3551
453c5420
JB
3552 pp_on |= port_sel;
3553
3554 I915_WRITE(pp_on_reg, pp_on);
3555 I915_WRITE(pp_off_reg, pp_off);
3556 I915_WRITE(pp_div_reg, pp_div);
67a54566 3557
67a54566 3558 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
3559 I915_READ(pp_on_reg),
3560 I915_READ(pp_off_reg),
3561 I915_READ(pp_div_reg));
f684960e
CW
3562}
3563
ed92f0b2
PZ
3564static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3565 struct intel_connector *intel_connector)
3566{
3567 struct drm_connector *connector = &intel_connector->base;
3568 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3569 struct drm_device *dev = intel_dig_port->base.base.dev;
3570 struct drm_i915_private *dev_priv = dev->dev_private;
3571 struct drm_display_mode *fixed_mode = NULL;
3572 struct edp_power_seq power_seq = { 0 };
3573 bool has_dpcd;
3574 struct drm_display_mode *scan;
3575 struct edid *edid;
3576
3577 if (!is_edp(intel_dp))
3578 return true;
3579
3580 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3581
3582 /* Cache DPCD and EDID for edp. */
3583 ironlake_edp_panel_vdd_on(intel_dp);
3584 has_dpcd = intel_dp_get_dpcd(intel_dp);
3585 ironlake_edp_panel_vdd_off(intel_dp, false);
3586
3587 if (has_dpcd) {
3588 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3589 dev_priv->no_aux_handshake =
3590 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3591 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3592 } else {
3593 /* if this fails, presume the device is a ghost */
3594 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
3595 return false;
3596 }
3597
3598 /* We now know it's not a ghost, init power sequence regs. */
3599 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3600 &power_seq);
3601
ed92f0b2
PZ
3602 edid = drm_get_edid(connector, &intel_dp->adapter);
3603 if (edid) {
3604 if (drm_add_edid_modes(connector, edid)) {
3605 drm_mode_connector_update_edid_property(connector,
3606 edid);
3607 drm_edid_to_eld(connector, edid);
3608 } else {
3609 kfree(edid);
3610 edid = ERR_PTR(-EINVAL);
3611 }
3612 } else {
3613 edid = ERR_PTR(-ENOENT);
3614 }
3615 intel_connector->edid = edid;
3616
3617 /* prefer fixed mode from EDID if available */
3618 list_for_each_entry(scan, &connector->probed_modes, head) {
3619 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3620 fixed_mode = drm_mode_duplicate(dev, scan);
3621 break;
3622 }
3623 }
3624
3625 /* fallback to VBT if available for eDP */
3626 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3627 fixed_mode = drm_mode_duplicate(dev,
3628 dev_priv->vbt.lfp_lvds_vbt_mode);
3629 if (fixed_mode)
3630 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3631 }
3632
ed92f0b2
PZ
3633 intel_panel_init(&intel_connector->panel, fixed_mode);
3634 intel_panel_setup_backlight(connector);
3635
3636 return true;
3637}
3638
16c25533 3639bool
f0fec3f2
PZ
3640intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3641 struct intel_connector *intel_connector)
a4fc5ed6 3642{
f0fec3f2
PZ
3643 struct drm_connector *connector = &intel_connector->base;
3644 struct intel_dp *intel_dp = &intel_dig_port->dp;
3645 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3646 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 3647 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 3648 enum port port = intel_dig_port->port;
5eb08b69 3649 const char *name = NULL;
b2a14755 3650 int type, error;
a4fc5ed6 3651
0767935e
DV
3652 /* Preserve the current hw state. */
3653 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 3654 intel_dp->attached_connector = intel_connector;
3d3dc149 3655
3b32a35b 3656 if (intel_dp_is_edp(dev, port))
b329530c 3657 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
3658 else
3659 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 3660
f7d24902
ID
3661 /*
3662 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3663 * for DP the encoder type can be set by the caller to
3664 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3665 */
3666 if (type == DRM_MODE_CONNECTOR_eDP)
3667 intel_encoder->type = INTEL_OUTPUT_EDP;
3668
e7281eab
ID
3669 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3670 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3671 port_name(port));
3672
b329530c 3673 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
3674 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3675
a4fc5ed6
KP
3676 connector->interlace_allowed = true;
3677 connector->doublescan_allowed = 0;
3678
f0fec3f2
PZ
3679 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3680 ironlake_panel_vdd_work);
a4fc5ed6 3681
df0e9248 3682 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6
KP
3683 drm_sysfs_connector_add(connector);
3684
affa9354 3685 if (HAS_DDI(dev))
bcbc889b
PZ
3686 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3687 else
3688 intel_connector->get_hw_state = intel_connector_get_hw_state;
3689
9ed35ab1
PZ
3690 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3691 if (HAS_DDI(dev)) {
3692 switch (intel_dig_port->port) {
3693 case PORT_A:
3694 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3695 break;
3696 case PORT_B:
3697 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3698 break;
3699 case PORT_C:
3700 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3701 break;
3702 case PORT_D:
3703 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3704 break;
3705 default:
3706 BUG();
3707 }
3708 }
e8cb4558 3709
a4fc5ed6 3710 /* Set up the DDC bus. */
ab9d7c30
PZ
3711 switch (port) {
3712 case PORT_A:
1d843f9d 3713 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
3714 name = "DPDDC-A";
3715 break;
3716 case PORT_B:
1d843f9d 3717 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
3718 name = "DPDDC-B";
3719 break;
3720 case PORT_C:
1d843f9d 3721 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
3722 name = "DPDDC-C";
3723 break;
3724 case PORT_D:
1d843f9d 3725 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
3726 name = "DPDDC-D";
3727 break;
3728 default:
ad1c0b19 3729 BUG();
5eb08b69
ZW
3730 }
3731
b2a14755
PZ
3732 error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3733 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3734 error, port_name(port));
c1f05264 3735
2b28bb1b
RV
3736 intel_dp->psr_setup_done = false;
3737
b2f246a8 3738 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
15b1d171
PZ
3739 i2c_del_adapter(&intel_dp->adapter);
3740 if (is_edp(intel_dp)) {
3741 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3742 mutex_lock(&dev->mode_config.mutex);
3743 ironlake_panel_vdd_off_sync(intel_dp);
3744 mutex_unlock(&dev->mode_config.mutex);
3745 }
b2f246a8
PZ
3746 drm_sysfs_connector_remove(connector);
3747 drm_connector_cleanup(connector);
16c25533 3748 return false;
b2f246a8 3749 }
32f9d658 3750
f684960e
CW
3751 intel_dp_add_properties(intel_dp, connector);
3752
a4fc5ed6
KP
3753 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3754 * 0xd. Failure to do so will result in spurious interrupts being
3755 * generated on the port when a cable is not attached.
3756 */
3757 if (IS_G4X(dev) && !IS_GM45(dev)) {
3758 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3759 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3760 }
16c25533
PZ
3761
3762 return true;
a4fc5ed6 3763}
f0fec3f2
PZ
3764
3765void
3766intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3767{
3768 struct intel_digital_port *intel_dig_port;
3769 struct intel_encoder *intel_encoder;
3770 struct drm_encoder *encoder;
3771 struct intel_connector *intel_connector;
3772
b14c5679 3773 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
3774 if (!intel_dig_port)
3775 return;
3776
b14c5679 3777 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
3778 if (!intel_connector) {
3779 kfree(intel_dig_port);
3780 return;
3781 }
3782
3783 intel_encoder = &intel_dig_port->base;
3784 encoder = &intel_encoder->base;
3785
3786 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3787 DRM_MODE_ENCODER_TMDS);
3788
5bfe2ac0 3789 intel_encoder->compute_config = intel_dp_compute_config;
b934223d 3790 intel_encoder->mode_set = intel_dp_mode_set;
00c09d70
PZ
3791 intel_encoder->disable = intel_disable_dp;
3792 intel_encoder->post_disable = intel_post_disable_dp;
3793 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 3794 intel_encoder->get_config = intel_dp_get_config;
ab1f90f9 3795 if (IS_VALLEYVIEW(dev)) {
ecff4f3b 3796 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
3797 intel_encoder->pre_enable = vlv_pre_enable_dp;
3798 intel_encoder->enable = vlv_enable_dp;
3799 } else {
ecff4f3b
JN
3800 intel_encoder->pre_enable = g4x_pre_enable_dp;
3801 intel_encoder->enable = g4x_enable_dp;
ab1f90f9 3802 }
f0fec3f2 3803
174edf1f 3804 intel_dig_port->port = port;
f0fec3f2
PZ
3805 intel_dig_port->dp.output_reg = output_reg;
3806
00c09d70 3807 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
f0fec3f2
PZ
3808 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3809 intel_encoder->cloneable = false;
3810 intel_encoder->hot_plug = intel_dp_hot_plug;
3811
15b1d171
PZ
3812 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3813 drm_encoder_cleanup(encoder);
3814 kfree(intel_dig_port);
b2f246a8 3815 kfree(intel_connector);
15b1d171 3816 }
f0fec3f2 3817}