drm/i915: Cleanup SHOTPLUG_CTL status bits definitions
[linux-block.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
760285e7 35#include <drm/drmP.h>
79e53945 36#include "intel_drv.h"
760285e7 37#include <drm/i915_drm.h>
79e53945 38#include "i915_drv.h"
e5510fac 39#include "i915_trace.h"
760285e7
DH
40#include <drm/drm_dp_helper.h>
41#include <drm/drm_crtc_helper.h>
c0f372b3 42#include <linux/dma_remapping.h>
79e53945 43
0206e353 44bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
3dec0095 45static void intel_increase_pllclock(struct drm_crtc *crtc);
6b383a7f 46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
79e53945
JB
47
48typedef struct {
0206e353
AJ
49 /* given values */
50 int n;
51 int m1, m2;
52 int p1, p2;
53 /* derived values */
54 int dot;
55 int vco;
56 int m;
57 int p;
79e53945
JB
58} intel_clock_t;
59
60typedef struct {
0206e353 61 int min, max;
79e53945
JB
62} intel_range_t;
63
64typedef struct {
0206e353
AJ
65 int dot_limit;
66 int p2_slow, p2_fast;
79e53945
JB
67} intel_p2_t;
68
69#define INTEL_P2_NUM 2
d4906093
ML
70typedef struct intel_limit intel_limit_t;
71struct intel_limit {
0206e353
AJ
72 intel_range_t dot, vco, n, m, m1, m2, p, p1;
73 intel_p2_t p2;
74 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
cec2f356 75 int, int, intel_clock_t *, intel_clock_t *);
d4906093 76};
79e53945 77
2377b741
JB
78/* FDI */
79#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
80
d2acd215
DV
81int
82intel_pch_rawclk(struct drm_device *dev)
83{
84 struct drm_i915_private *dev_priv = dev->dev_private;
85
86 WARN_ON(!HAS_PCH_SPLIT(dev));
87
88 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
89}
90
d4906093
ML
91static bool
92intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
93 int target, int refclk, intel_clock_t *match_clock,
94 intel_clock_t *best_clock);
d4906093
ML
95static bool
96intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
97 int target, int refclk, intel_clock_t *match_clock,
98 intel_clock_t *best_clock);
79e53945 99
a4fc5ed6
KP
100static bool
101intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
cec2f356
SP
102 int target, int refclk, intel_clock_t *match_clock,
103 intel_clock_t *best_clock);
5eb08b69 104static bool
f2b115e6 105intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
cec2f356
SP
106 int target, int refclk, intel_clock_t *match_clock,
107 intel_clock_t *best_clock);
a4fc5ed6 108
a0c4da24
JB
109static bool
110intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
111 int target, int refclk, intel_clock_t *match_clock,
112 intel_clock_t *best_clock);
113
021357ac
CW
114static inline u32 /* units of 100MHz */
115intel_fdi_link_freq(struct drm_device *dev)
116{
8b99e68c
CW
117 if (IS_GEN5(dev)) {
118 struct drm_i915_private *dev_priv = dev->dev_private;
119 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
120 } else
121 return 27;
021357ac
CW
122}
123
e4b36699 124static const intel_limit_t intel_limits_i8xx_dvo = {
0206e353
AJ
125 .dot = { .min = 25000, .max = 350000 },
126 .vco = { .min = 930000, .max = 1400000 },
127 .n = { .min = 3, .max = 16 },
128 .m = { .min = 96, .max = 140 },
129 .m1 = { .min = 18, .max = 26 },
130 .m2 = { .min = 6, .max = 16 },
131 .p = { .min = 4, .max = 128 },
132 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
133 .p2 = { .dot_limit = 165000,
134 .p2_slow = 4, .p2_fast = 2 },
d4906093 135 .find_pll = intel_find_best_PLL,
e4b36699
KP
136};
137
138static const intel_limit_t intel_limits_i8xx_lvds = {
0206e353
AJ
139 .dot = { .min = 25000, .max = 350000 },
140 .vco = { .min = 930000, .max = 1400000 },
141 .n = { .min = 3, .max = 16 },
142 .m = { .min = 96, .max = 140 },
143 .m1 = { .min = 18, .max = 26 },
144 .m2 = { .min = 6, .max = 16 },
145 .p = { .min = 4, .max = 128 },
146 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
147 .p2 = { .dot_limit = 165000,
148 .p2_slow = 14, .p2_fast = 7 },
d4906093 149 .find_pll = intel_find_best_PLL,
e4b36699 150};
273e27ca 151
e4b36699 152static const intel_limit_t intel_limits_i9xx_sdvo = {
0206e353
AJ
153 .dot = { .min = 20000, .max = 400000 },
154 .vco = { .min = 1400000, .max = 2800000 },
155 .n = { .min = 1, .max = 6 },
156 .m = { .min = 70, .max = 120 },
157 .m1 = { .min = 10, .max = 22 },
158 .m2 = { .min = 5, .max = 9 },
159 .p = { .min = 5, .max = 80 },
160 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
161 .p2 = { .dot_limit = 200000,
162 .p2_slow = 10, .p2_fast = 5 },
d4906093 163 .find_pll = intel_find_best_PLL,
e4b36699
KP
164};
165
166static const intel_limit_t intel_limits_i9xx_lvds = {
0206e353
AJ
167 .dot = { .min = 20000, .max = 400000 },
168 .vco = { .min = 1400000, .max = 2800000 },
169 .n = { .min = 1, .max = 6 },
170 .m = { .min = 70, .max = 120 },
171 .m1 = { .min = 10, .max = 22 },
172 .m2 = { .min = 5, .max = 9 },
173 .p = { .min = 7, .max = 98 },
174 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
175 .p2 = { .dot_limit = 112000,
176 .p2_slow = 14, .p2_fast = 7 },
d4906093 177 .find_pll = intel_find_best_PLL,
e4b36699
KP
178};
179
273e27ca 180
e4b36699 181static const intel_limit_t intel_limits_g4x_sdvo = {
273e27ca
EA
182 .dot = { .min = 25000, .max = 270000 },
183 .vco = { .min = 1750000, .max = 3500000},
184 .n = { .min = 1, .max = 4 },
185 .m = { .min = 104, .max = 138 },
186 .m1 = { .min = 17, .max = 23 },
187 .m2 = { .min = 5, .max = 11 },
188 .p = { .min = 10, .max = 30 },
189 .p1 = { .min = 1, .max = 3},
190 .p2 = { .dot_limit = 270000,
191 .p2_slow = 10,
192 .p2_fast = 10
044c7c41 193 },
d4906093 194 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
195};
196
197static const intel_limit_t intel_limits_g4x_hdmi = {
273e27ca
EA
198 .dot = { .min = 22000, .max = 400000 },
199 .vco = { .min = 1750000, .max = 3500000},
200 .n = { .min = 1, .max = 4 },
201 .m = { .min = 104, .max = 138 },
202 .m1 = { .min = 16, .max = 23 },
203 .m2 = { .min = 5, .max = 11 },
204 .p = { .min = 5, .max = 80 },
205 .p1 = { .min = 1, .max = 8},
206 .p2 = { .dot_limit = 165000,
207 .p2_slow = 10, .p2_fast = 5 },
d4906093 208 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
209};
210
211static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
212 .dot = { .min = 20000, .max = 115000 },
213 .vco = { .min = 1750000, .max = 3500000 },
214 .n = { .min = 1, .max = 3 },
215 .m = { .min = 104, .max = 138 },
216 .m1 = { .min = 17, .max = 23 },
217 .m2 = { .min = 5, .max = 11 },
218 .p = { .min = 28, .max = 112 },
219 .p1 = { .min = 2, .max = 8 },
220 .p2 = { .dot_limit = 0,
221 .p2_slow = 14, .p2_fast = 14
044c7c41 222 },
d4906093 223 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
224};
225
226static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
227 .dot = { .min = 80000, .max = 224000 },
228 .vco = { .min = 1750000, .max = 3500000 },
229 .n = { .min = 1, .max = 3 },
230 .m = { .min = 104, .max = 138 },
231 .m1 = { .min = 17, .max = 23 },
232 .m2 = { .min = 5, .max = 11 },
233 .p = { .min = 14, .max = 42 },
234 .p1 = { .min = 2, .max = 6 },
235 .p2 = { .dot_limit = 0,
236 .p2_slow = 7, .p2_fast = 7
044c7c41 237 },
d4906093 238 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
239};
240
241static const intel_limit_t intel_limits_g4x_display_port = {
0206e353
AJ
242 .dot = { .min = 161670, .max = 227000 },
243 .vco = { .min = 1750000, .max = 3500000},
244 .n = { .min = 1, .max = 2 },
245 .m = { .min = 97, .max = 108 },
246 .m1 = { .min = 0x10, .max = 0x12 },
247 .m2 = { .min = 0x05, .max = 0x06 },
248 .p = { .min = 10, .max = 20 },
249 .p1 = { .min = 1, .max = 2},
250 .p2 = { .dot_limit = 0,
273e27ca 251 .p2_slow = 10, .p2_fast = 10 },
0206e353 252 .find_pll = intel_find_pll_g4x_dp,
e4b36699
KP
253};
254
f2b115e6 255static const intel_limit_t intel_limits_pineview_sdvo = {
0206e353
AJ
256 .dot = { .min = 20000, .max = 400000},
257 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 258 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
259 .n = { .min = 3, .max = 6 },
260 .m = { .min = 2, .max = 256 },
273e27ca 261 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
262 .m1 = { .min = 0, .max = 0 },
263 .m2 = { .min = 0, .max = 254 },
264 .p = { .min = 5, .max = 80 },
265 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
266 .p2 = { .dot_limit = 200000,
267 .p2_slow = 10, .p2_fast = 5 },
6115707b 268 .find_pll = intel_find_best_PLL,
e4b36699
KP
269};
270
f2b115e6 271static const intel_limit_t intel_limits_pineview_lvds = {
0206e353
AJ
272 .dot = { .min = 20000, .max = 400000 },
273 .vco = { .min = 1700000, .max = 3500000 },
274 .n = { .min = 3, .max = 6 },
275 .m = { .min = 2, .max = 256 },
276 .m1 = { .min = 0, .max = 0 },
277 .m2 = { .min = 0, .max = 254 },
278 .p = { .min = 7, .max = 112 },
279 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
280 .p2 = { .dot_limit = 112000,
281 .p2_slow = 14, .p2_fast = 14 },
6115707b 282 .find_pll = intel_find_best_PLL,
e4b36699
KP
283};
284
273e27ca
EA
285/* Ironlake / Sandybridge
286 *
287 * We calculate clock using (register_value + 2) for N/M1/M2, so here
288 * the range value for them is (actual_value - 2).
289 */
b91ad0ec 290static const intel_limit_t intel_limits_ironlake_dac = {
273e27ca
EA
291 .dot = { .min = 25000, .max = 350000 },
292 .vco = { .min = 1760000, .max = 3510000 },
293 .n = { .min = 1, .max = 5 },
294 .m = { .min = 79, .max = 127 },
295 .m1 = { .min = 12, .max = 22 },
296 .m2 = { .min = 5, .max = 9 },
297 .p = { .min = 5, .max = 80 },
298 .p1 = { .min = 1, .max = 8 },
299 .p2 = { .dot_limit = 225000,
300 .p2_slow = 10, .p2_fast = 5 },
4547668a 301 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
302};
303
b91ad0ec 304static const intel_limit_t intel_limits_ironlake_single_lvds = {
273e27ca
EA
305 .dot = { .min = 25000, .max = 350000 },
306 .vco = { .min = 1760000, .max = 3510000 },
307 .n = { .min = 1, .max = 3 },
308 .m = { .min = 79, .max = 118 },
309 .m1 = { .min = 12, .max = 22 },
310 .m2 = { .min = 5, .max = 9 },
311 .p = { .min = 28, .max = 112 },
312 .p1 = { .min = 2, .max = 8 },
313 .p2 = { .dot_limit = 225000,
314 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
315 .find_pll = intel_g4x_find_best_PLL,
316};
317
318static const intel_limit_t intel_limits_ironlake_dual_lvds = {
273e27ca
EA
319 .dot = { .min = 25000, .max = 350000 },
320 .vco = { .min = 1760000, .max = 3510000 },
321 .n = { .min = 1, .max = 3 },
322 .m = { .min = 79, .max = 127 },
323 .m1 = { .min = 12, .max = 22 },
324 .m2 = { .min = 5, .max = 9 },
325 .p = { .min = 14, .max = 56 },
326 .p1 = { .min = 2, .max = 8 },
327 .p2 = { .dot_limit = 225000,
328 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
329 .find_pll = intel_g4x_find_best_PLL,
330};
331
273e27ca 332/* LVDS 100mhz refclk limits. */
b91ad0ec 333static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
334 .dot = { .min = 25000, .max = 350000 },
335 .vco = { .min = 1760000, .max = 3510000 },
336 .n = { .min = 1, .max = 2 },
337 .m = { .min = 79, .max = 126 },
338 .m1 = { .min = 12, .max = 22 },
339 .m2 = { .min = 5, .max = 9 },
340 .p = { .min = 28, .max = 112 },
0206e353 341 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
342 .p2 = { .dot_limit = 225000,
343 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
344 .find_pll = intel_g4x_find_best_PLL,
345};
346
347static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
348 .dot = { .min = 25000, .max = 350000 },
349 .vco = { .min = 1760000, .max = 3510000 },
350 .n = { .min = 1, .max = 3 },
351 .m = { .min = 79, .max = 126 },
352 .m1 = { .min = 12, .max = 22 },
353 .m2 = { .min = 5, .max = 9 },
354 .p = { .min = 14, .max = 42 },
0206e353 355 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
356 .p2 = { .dot_limit = 225000,
357 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
358 .find_pll = intel_g4x_find_best_PLL,
359};
360
361static const intel_limit_t intel_limits_ironlake_display_port = {
0206e353
AJ
362 .dot = { .min = 25000, .max = 350000 },
363 .vco = { .min = 1760000, .max = 3510000},
364 .n = { .min = 1, .max = 2 },
365 .m = { .min = 81, .max = 90 },
366 .m1 = { .min = 12, .max = 22 },
367 .m2 = { .min = 5, .max = 9 },
368 .p = { .min = 10, .max = 20 },
369 .p1 = { .min = 1, .max = 2},
370 .p2 = { .dot_limit = 0,
273e27ca 371 .p2_slow = 10, .p2_fast = 10 },
0206e353 372 .find_pll = intel_find_pll_ironlake_dp,
79e53945
JB
373};
374
a0c4da24
JB
375static const intel_limit_t intel_limits_vlv_dac = {
376 .dot = { .min = 25000, .max = 270000 },
377 .vco = { .min = 4000000, .max = 6000000 },
378 .n = { .min = 1, .max = 7 },
379 .m = { .min = 22, .max = 450 }, /* guess */
380 .m1 = { .min = 2, .max = 3 },
381 .m2 = { .min = 11, .max = 156 },
382 .p = { .min = 10, .max = 30 },
383 .p1 = { .min = 2, .max = 3 },
384 .p2 = { .dot_limit = 270000,
385 .p2_slow = 2, .p2_fast = 20 },
386 .find_pll = intel_vlv_find_best_pll,
387};
388
389static const intel_limit_t intel_limits_vlv_hdmi = {
390 .dot = { .min = 20000, .max = 165000 },
17dc9257 391 .vco = { .min = 4000000, .max = 5994000},
a0c4da24
JB
392 .n = { .min = 1, .max = 7 },
393 .m = { .min = 60, .max = 300 }, /* guess */
394 .m1 = { .min = 2, .max = 3 },
395 .m2 = { .min = 11, .max = 156 },
396 .p = { .min = 10, .max = 30 },
397 .p1 = { .min = 2, .max = 3 },
398 .p2 = { .dot_limit = 270000,
399 .p2_slow = 2, .p2_fast = 20 },
400 .find_pll = intel_vlv_find_best_pll,
401};
402
403static const intel_limit_t intel_limits_vlv_dp = {
74a4dd2e
VP
404 .dot = { .min = 25000, .max = 270000 },
405 .vco = { .min = 4000000, .max = 6000000 },
a0c4da24 406 .n = { .min = 1, .max = 7 },
74a4dd2e 407 .m = { .min = 22, .max = 450 },
a0c4da24
JB
408 .m1 = { .min = 2, .max = 3 },
409 .m2 = { .min = 11, .max = 156 },
410 .p = { .min = 10, .max = 30 },
411 .p1 = { .min = 2, .max = 3 },
412 .p2 = { .dot_limit = 270000,
413 .p2_slow = 2, .p2_fast = 20 },
414 .find_pll = intel_vlv_find_best_pll,
415};
416
57f350b6
JB
417u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
418{
09153000 419 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
57f350b6 420
57f350b6
JB
421 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
422 DRM_ERROR("DPIO idle wait timed out\n");
09153000 423 return 0;
57f350b6
JB
424 }
425
426 I915_WRITE(DPIO_REG, reg);
427 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
428 DPIO_BYTE);
429 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
430 DRM_ERROR("DPIO read wait timed out\n");
09153000 431 return 0;
57f350b6 432 }
57f350b6 433
09153000 434 return I915_READ(DPIO_DATA);
57f350b6
JB
435}
436
a0c4da24
JB
437static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
438 u32 val)
439{
09153000 440 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
a0c4da24 441
a0c4da24
JB
442 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
443 DRM_ERROR("DPIO idle wait timed out\n");
09153000 444 return;
a0c4da24
JB
445 }
446
447 I915_WRITE(DPIO_DATA, val);
448 I915_WRITE(DPIO_REG, reg);
449 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
450 DPIO_BYTE);
451 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
452 DRM_ERROR("DPIO write wait timed out\n");
a0c4da24
JB
453}
454
57f350b6
JB
455static void vlv_init_dpio(struct drm_device *dev)
456{
457 struct drm_i915_private *dev_priv = dev->dev_private;
458
459 /* Reset the DPIO config */
460 I915_WRITE(DPIO_CTL, 0);
461 POSTING_READ(DPIO_CTL);
462 I915_WRITE(DPIO_CTL, 1);
463 POSTING_READ(DPIO_CTL);
464}
465
1b894b59
CW
466static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
467 int refclk)
2c07245f 468{
b91ad0ec 469 struct drm_device *dev = crtc->dev;
2c07245f 470 const intel_limit_t *limit;
b91ad0ec
ZW
471
472 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1974cad0 473 if (intel_is_dual_link_lvds(dev)) {
b91ad0ec 474 /* LVDS dual channel */
1b894b59 475 if (refclk == 100000)
b91ad0ec
ZW
476 limit = &intel_limits_ironlake_dual_lvds_100m;
477 else
478 limit = &intel_limits_ironlake_dual_lvds;
479 } else {
1b894b59 480 if (refclk == 100000)
b91ad0ec
ZW
481 limit = &intel_limits_ironlake_single_lvds_100m;
482 else
483 limit = &intel_limits_ironlake_single_lvds;
484 }
485 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
547dc041 486 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
4547668a 487 limit = &intel_limits_ironlake_display_port;
2c07245f 488 else
b91ad0ec 489 limit = &intel_limits_ironlake_dac;
2c07245f
ZW
490
491 return limit;
492}
493
044c7c41
ML
494static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
495{
496 struct drm_device *dev = crtc->dev;
044c7c41
ML
497 const intel_limit_t *limit;
498
499 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1974cad0 500 if (intel_is_dual_link_lvds(dev))
044c7c41 501 /* LVDS with dual channel */
e4b36699 502 limit = &intel_limits_g4x_dual_channel_lvds;
044c7c41
ML
503 else
504 /* LVDS with dual channel */
e4b36699 505 limit = &intel_limits_g4x_single_channel_lvds;
044c7c41
ML
506 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
507 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
e4b36699 508 limit = &intel_limits_g4x_hdmi;
044c7c41 509 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
e4b36699 510 limit = &intel_limits_g4x_sdvo;
0206e353 511 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
e4b36699 512 limit = &intel_limits_g4x_display_port;
044c7c41 513 } else /* The option is for other outputs */
e4b36699 514 limit = &intel_limits_i9xx_sdvo;
044c7c41
ML
515
516 return limit;
517}
518
1b894b59 519static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
79e53945
JB
520{
521 struct drm_device *dev = crtc->dev;
522 const intel_limit_t *limit;
523
bad720ff 524 if (HAS_PCH_SPLIT(dev))
1b894b59 525 limit = intel_ironlake_limit(crtc, refclk);
2c07245f 526 else if (IS_G4X(dev)) {
044c7c41 527 limit = intel_g4x_limit(crtc);
f2b115e6 528 } else if (IS_PINEVIEW(dev)) {
2177832f 529 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
f2b115e6 530 limit = &intel_limits_pineview_lvds;
2177832f 531 else
f2b115e6 532 limit = &intel_limits_pineview_sdvo;
a0c4da24
JB
533 } else if (IS_VALLEYVIEW(dev)) {
534 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
535 limit = &intel_limits_vlv_dac;
536 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
537 limit = &intel_limits_vlv_hdmi;
538 else
539 limit = &intel_limits_vlv_dp;
a6c45cf0
CW
540 } else if (!IS_GEN2(dev)) {
541 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
542 limit = &intel_limits_i9xx_lvds;
543 else
544 limit = &intel_limits_i9xx_sdvo;
79e53945
JB
545 } else {
546 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
e4b36699 547 limit = &intel_limits_i8xx_lvds;
79e53945 548 else
e4b36699 549 limit = &intel_limits_i8xx_dvo;
79e53945
JB
550 }
551 return limit;
552}
553
f2b115e6
AJ
554/* m1 is reserved as 0 in Pineview, n is a ring counter */
555static void pineview_clock(int refclk, intel_clock_t *clock)
79e53945 556{
2177832f
SL
557 clock->m = clock->m2 + 2;
558 clock->p = clock->p1 * clock->p2;
559 clock->vco = refclk * clock->m / clock->n;
560 clock->dot = clock->vco / clock->p;
561}
562
563static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
564{
f2b115e6
AJ
565 if (IS_PINEVIEW(dev)) {
566 pineview_clock(refclk, clock);
2177832f
SL
567 return;
568 }
79e53945
JB
569 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
570 clock->p = clock->p1 * clock->p2;
571 clock->vco = refclk * clock->m / (clock->n + 2);
572 clock->dot = clock->vco / clock->p;
573}
574
79e53945
JB
575/**
576 * Returns whether any output on the specified pipe is of the specified type
577 */
4ef69c7a 578bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
79e53945 579{
4ef69c7a 580 struct drm_device *dev = crtc->dev;
4ef69c7a
CW
581 struct intel_encoder *encoder;
582
6c2b7c12
DV
583 for_each_encoder_on_crtc(dev, crtc, encoder)
584 if (encoder->type == type)
4ef69c7a
CW
585 return true;
586
587 return false;
79e53945
JB
588}
589
7c04d1d9 590#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
591/**
592 * Returns whether the given set of divisors are valid for a given refclk with
593 * the given connectors.
594 */
595
1b894b59
CW
596static bool intel_PLL_is_valid(struct drm_device *dev,
597 const intel_limit_t *limit,
598 const intel_clock_t *clock)
79e53945 599{
79e53945 600 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 601 INTELPllInvalid("p1 out of range\n");
79e53945 602 if (clock->p < limit->p.min || limit->p.max < clock->p)
0206e353 603 INTELPllInvalid("p out of range\n");
79e53945 604 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 605 INTELPllInvalid("m2 out of range\n");
79e53945 606 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 607 INTELPllInvalid("m1 out of range\n");
f2b115e6 608 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
0206e353 609 INTELPllInvalid("m1 <= m2\n");
79e53945 610 if (clock->m < limit->m.min || limit->m.max < clock->m)
0206e353 611 INTELPllInvalid("m out of range\n");
79e53945 612 if (clock->n < limit->n.min || limit->n.max < clock->n)
0206e353 613 INTELPllInvalid("n out of range\n");
79e53945 614 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 615 INTELPllInvalid("vco out of range\n");
79e53945
JB
616 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
617 * connector, etc., rather than just a single range.
618 */
619 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 620 INTELPllInvalid("dot out of range\n");
79e53945
JB
621
622 return true;
623}
624
d4906093
ML
625static bool
626intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
627 int target, int refclk, intel_clock_t *match_clock,
628 intel_clock_t *best_clock)
d4906093 629
79e53945
JB
630{
631 struct drm_device *dev = crtc->dev;
79e53945 632 intel_clock_t clock;
79e53945
JB
633 int err = target;
634
a210b028 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
79e53945 636 /*
a210b028
DV
637 * For LVDS just rely on its current settings for dual-channel.
638 * We haven't figured out how to reliably set up different
639 * single/dual channel state, if we even can.
79e53945 640 */
1974cad0 641 if (intel_is_dual_link_lvds(dev))
79e53945
JB
642 clock.p2 = limit->p2.p2_fast;
643 else
644 clock.p2 = limit->p2.p2_slow;
645 } else {
646 if (target < limit->p2.dot_limit)
647 clock.p2 = limit->p2.p2_slow;
648 else
649 clock.p2 = limit->p2.p2_fast;
650 }
651
0206e353 652 memset(best_clock, 0, sizeof(*best_clock));
79e53945 653
42158660
ZY
654 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
655 clock.m1++) {
656 for (clock.m2 = limit->m2.min;
657 clock.m2 <= limit->m2.max; clock.m2++) {
f2b115e6
AJ
658 /* m1 is always 0 in Pineview */
659 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
42158660
ZY
660 break;
661 for (clock.n = limit->n.min;
662 clock.n <= limit->n.max; clock.n++) {
663 for (clock.p1 = limit->p1.min;
664 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
665 int this_err;
666
2177832f 667 intel_clock(dev, refclk, &clock);
1b894b59
CW
668 if (!intel_PLL_is_valid(dev, limit,
669 &clock))
79e53945 670 continue;
cec2f356
SP
671 if (match_clock &&
672 clock.p != match_clock->p)
673 continue;
79e53945
JB
674
675 this_err = abs(clock.dot - target);
676 if (this_err < err) {
677 *best_clock = clock;
678 err = this_err;
679 }
680 }
681 }
682 }
683 }
684
685 return (err != target);
686}
687
d4906093
ML
688static bool
689intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
690 int target, int refclk, intel_clock_t *match_clock,
691 intel_clock_t *best_clock)
d4906093
ML
692{
693 struct drm_device *dev = crtc->dev;
d4906093
ML
694 intel_clock_t clock;
695 int max_n;
696 bool found;
6ba770dc
AJ
697 /* approximately equals target * 0.00585 */
698 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
699 found = false;
700
701 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4547668a
ZY
702 int lvds_reg;
703
c619eed4 704 if (HAS_PCH_SPLIT(dev))
4547668a
ZY
705 lvds_reg = PCH_LVDS;
706 else
707 lvds_reg = LVDS;
1974cad0 708 if (intel_is_dual_link_lvds(dev))
d4906093
ML
709 clock.p2 = limit->p2.p2_fast;
710 else
711 clock.p2 = limit->p2.p2_slow;
712 } else {
713 if (target < limit->p2.dot_limit)
714 clock.p2 = limit->p2.p2_slow;
715 else
716 clock.p2 = limit->p2.p2_fast;
717 }
718
719 memset(best_clock, 0, sizeof(*best_clock));
720 max_n = limit->n.max;
f77f13e2 721 /* based on hardware requirement, prefer smaller n to precision */
d4906093 722 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 723 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
724 for (clock.m1 = limit->m1.max;
725 clock.m1 >= limit->m1.min; clock.m1--) {
726 for (clock.m2 = limit->m2.max;
727 clock.m2 >= limit->m2.min; clock.m2--) {
728 for (clock.p1 = limit->p1.max;
729 clock.p1 >= limit->p1.min; clock.p1--) {
730 int this_err;
731
2177832f 732 intel_clock(dev, refclk, &clock);
1b894b59
CW
733 if (!intel_PLL_is_valid(dev, limit,
734 &clock))
d4906093 735 continue;
cec2f356
SP
736 if (match_clock &&
737 clock.p != match_clock->p)
738 continue;
1b894b59
CW
739
740 this_err = abs(clock.dot - target);
d4906093
ML
741 if (this_err < err_most) {
742 *best_clock = clock;
743 err_most = this_err;
744 max_n = clock.n;
745 found = true;
746 }
747 }
748 }
749 }
750 }
2c07245f
ZW
751 return found;
752}
753
5eb08b69 754static bool
f2b115e6 755intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
756 int target, int refclk, intel_clock_t *match_clock,
757 intel_clock_t *best_clock)
5eb08b69
ZW
758{
759 struct drm_device *dev = crtc->dev;
760 intel_clock_t clock;
4547668a 761
5eb08b69
ZW
762 if (target < 200000) {
763 clock.n = 1;
764 clock.p1 = 2;
765 clock.p2 = 10;
766 clock.m1 = 12;
767 clock.m2 = 9;
768 } else {
769 clock.n = 2;
770 clock.p1 = 1;
771 clock.p2 = 10;
772 clock.m1 = 14;
773 clock.m2 = 8;
774 }
775 intel_clock(dev, refclk, &clock);
776 memcpy(best_clock, &clock, sizeof(intel_clock_t));
777 return true;
778}
779
a4fc5ed6
KP
780/* DisplayPort has only two frequencies, 162MHz and 270MHz */
781static bool
782intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
783 int target, int refclk, intel_clock_t *match_clock,
784 intel_clock_t *best_clock)
a4fc5ed6 785{
5eddb70b
CW
786 intel_clock_t clock;
787 if (target < 200000) {
788 clock.p1 = 2;
789 clock.p2 = 10;
790 clock.n = 2;
791 clock.m1 = 23;
792 clock.m2 = 8;
793 } else {
794 clock.p1 = 1;
795 clock.p2 = 10;
796 clock.n = 1;
797 clock.m1 = 14;
798 clock.m2 = 2;
799 }
800 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
801 clock.p = (clock.p1 * clock.p2);
802 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
803 clock.vco = 0;
804 memcpy(best_clock, &clock, sizeof(intel_clock_t));
805 return true;
a4fc5ed6 806}
a0c4da24
JB
807static bool
808intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
809 int target, int refclk, intel_clock_t *match_clock,
810 intel_clock_t *best_clock)
811{
812 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
813 u32 m, n, fastclk;
814 u32 updrate, minupdate, fracbits, p;
815 unsigned long bestppm, ppm, absppm;
816 int dotclk, flag;
817
af447bd3 818 flag = 0;
a0c4da24
JB
819 dotclk = target * 1000;
820 bestppm = 1000000;
821 ppm = absppm = 0;
822 fastclk = dotclk / (2*100);
823 updrate = 0;
824 minupdate = 19200;
825 fracbits = 1;
826 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
827 bestm1 = bestm2 = bestp1 = bestp2 = 0;
828
829 /* based on hardware requirement, prefer smaller n to precision */
830 for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
831 updrate = refclk / n;
832 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
833 for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
834 if (p2 > 10)
835 p2 = p2 - 1;
836 p = p1 * p2;
837 /* based on hardware requirement, prefer bigger m1,m2 values */
838 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
839 m2 = (((2*(fastclk * p * n / m1 )) +
840 refclk) / (2*refclk));
841 m = m1 * m2;
842 vco = updrate * m;
843 if (vco >= limit->vco.min && vco < limit->vco.max) {
844 ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
845 absppm = (ppm > 0) ? ppm : (-ppm);
846 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
847 bestppm = 0;
848 flag = 1;
849 }
850 if (absppm < bestppm - 10) {
851 bestppm = absppm;
852 flag = 1;
853 }
854 if (flag) {
855 bestn = n;
856 bestm1 = m1;
857 bestm2 = m2;
858 bestp1 = p1;
859 bestp2 = p2;
860 flag = 0;
861 }
862 }
863 }
864 }
865 }
866 }
867 best_clock->n = bestn;
868 best_clock->m1 = bestm1;
869 best_clock->m2 = bestm2;
870 best_clock->p1 = bestp1;
871 best_clock->p2 = bestp2;
872
873 return true;
874}
a4fc5ed6 875
a5c961d1
PZ
876enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
877 enum pipe pipe)
878{
879 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
880 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
881
882 return intel_crtc->cpu_transcoder;
883}
884
a928d536
PZ
885static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
886{
887 struct drm_i915_private *dev_priv = dev->dev_private;
888 u32 frame, frame_reg = PIPEFRAME(pipe);
889
890 frame = I915_READ(frame_reg);
891
892 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
893 DRM_DEBUG_KMS("vblank wait timed out\n");
894}
895
9d0498a2
JB
896/**
897 * intel_wait_for_vblank - wait for vblank on a given pipe
898 * @dev: drm device
899 * @pipe: pipe to wait for
900 *
901 * Wait for vblank to occur on a given pipe. Needed for various bits of
902 * mode setting code.
903 */
904void intel_wait_for_vblank(struct drm_device *dev, int pipe)
79e53945 905{
9d0498a2 906 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 907 int pipestat_reg = PIPESTAT(pipe);
9d0498a2 908
a928d536
PZ
909 if (INTEL_INFO(dev)->gen >= 5) {
910 ironlake_wait_for_vblank(dev, pipe);
911 return;
912 }
913
300387c0
CW
914 /* Clear existing vblank status. Note this will clear any other
915 * sticky status fields as well.
916 *
917 * This races with i915_driver_irq_handler() with the result
918 * that either function could miss a vblank event. Here it is not
919 * fatal, as we will either wait upon the next vblank interrupt or
920 * timeout. Generally speaking intel_wait_for_vblank() is only
921 * called during modeset at which time the GPU should be idle and
922 * should *not* be performing page flips and thus not waiting on
923 * vblanks...
924 * Currently, the result of us stealing a vblank from the irq
925 * handler is that a single frame will be skipped during swapbuffers.
926 */
927 I915_WRITE(pipestat_reg,
928 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
929
9d0498a2 930 /* Wait for vblank interrupt bit to set */
481b6af3
CW
931 if (wait_for(I915_READ(pipestat_reg) &
932 PIPE_VBLANK_INTERRUPT_STATUS,
933 50))
9d0498a2
JB
934 DRM_DEBUG_KMS("vblank wait timed out\n");
935}
936
ab7ad7f6
KP
937/*
938 * intel_wait_for_pipe_off - wait for pipe to turn off
9d0498a2
JB
939 * @dev: drm device
940 * @pipe: pipe to wait for
941 *
942 * After disabling a pipe, we can't wait for vblank in the usual way,
943 * spinning on the vblank interrupt status bit, since we won't actually
944 * see an interrupt when the pipe is disabled.
945 *
ab7ad7f6
KP
946 * On Gen4 and above:
947 * wait for the pipe register state bit to turn off
948 *
949 * Otherwise:
950 * wait for the display line value to settle (it usually
951 * ends up stopping at the start of the next frame).
58e10eb9 952 *
9d0498a2 953 */
58e10eb9 954void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
9d0498a2
JB
955{
956 struct drm_i915_private *dev_priv = dev->dev_private;
702e7a56
PZ
957 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
958 pipe);
ab7ad7f6
KP
959
960 if (INTEL_INFO(dev)->gen >= 4) {
702e7a56 961 int reg = PIPECONF(cpu_transcoder);
ab7ad7f6
KP
962
963 /* Wait for the Pipe State to go off */
58e10eb9
CW
964 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
965 100))
284637d9 966 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 967 } else {
837ba00f 968 u32 last_line, line_mask;
58e10eb9 969 int reg = PIPEDSL(pipe);
ab7ad7f6
KP
970 unsigned long timeout = jiffies + msecs_to_jiffies(100);
971
837ba00f
PZ
972 if (IS_GEN2(dev))
973 line_mask = DSL_LINEMASK_GEN2;
974 else
975 line_mask = DSL_LINEMASK_GEN3;
976
ab7ad7f6
KP
977 /* Wait for the display line to settle */
978 do {
837ba00f 979 last_line = I915_READ(reg) & line_mask;
ab7ad7f6 980 mdelay(5);
837ba00f 981 } while (((I915_READ(reg) & line_mask) != last_line) &&
ab7ad7f6
KP
982 time_after(timeout, jiffies));
983 if (time_after(jiffies, timeout))
284637d9 984 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 985 }
79e53945
JB
986}
987
b24e7179
JB
988static const char *state_string(bool enabled)
989{
990 return enabled ? "on" : "off";
991}
992
993/* Only for pre-ILK configs */
994static void assert_pll(struct drm_i915_private *dev_priv,
995 enum pipe pipe, bool state)
996{
997 int reg;
998 u32 val;
999 bool cur_state;
1000
1001 reg = DPLL(pipe);
1002 val = I915_READ(reg);
1003 cur_state = !!(val & DPLL_VCO_ENABLE);
1004 WARN(cur_state != state,
1005 "PLL state assertion failure (expected %s, current %s)\n",
1006 state_string(state), state_string(cur_state));
1007}
1008#define assert_pll_enabled(d, p) assert_pll(d, p, true)
1009#define assert_pll_disabled(d, p) assert_pll(d, p, false)
1010
040484af
JB
1011/* For ILK+ */
1012static void assert_pch_pll(struct drm_i915_private *dev_priv,
92b27b08
CW
1013 struct intel_pch_pll *pll,
1014 struct intel_crtc *crtc,
1015 bool state)
040484af 1016{
040484af
JB
1017 u32 val;
1018 bool cur_state;
1019
9d82aa17
ED
1020 if (HAS_PCH_LPT(dev_priv->dev)) {
1021 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1022 return;
1023 }
1024
92b27b08
CW
1025 if (WARN (!pll,
1026 "asserting PCH PLL %s with no PLL\n", state_string(state)))
ee7b9f93 1027 return;
ee7b9f93 1028
92b27b08
CW
1029 val = I915_READ(pll->pll_reg);
1030 cur_state = !!(val & DPLL_VCO_ENABLE);
1031 WARN(cur_state != state,
1032 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
1033 pll->pll_reg, state_string(state), state_string(cur_state), val);
1034
1035 /* Make sure the selected PLL is correctly attached to the transcoder */
1036 if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
d3ccbe86
JB
1037 u32 pch_dpll;
1038
1039 pch_dpll = I915_READ(PCH_DPLL_SEL);
92b27b08
CW
1040 cur_state = pll->pll_reg == _PCH_DPLL_B;
1041 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
1042 "PLL[%d] not attached to this transcoder %d: %08x\n",
1043 cur_state, crtc->pipe, pch_dpll)) {
1044 cur_state = !!(val >> (4*crtc->pipe + 3));
1045 WARN(cur_state != state,
1046 "PLL[%d] not %s on this transcoder %d: %08x\n",
1047 pll->pll_reg == _PCH_DPLL_B,
1048 state_string(state),
1049 crtc->pipe,
1050 val);
1051 }
d3ccbe86 1052 }
040484af 1053}
92b27b08
CW
1054#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
1055#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
040484af
JB
1056
1057static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1058 enum pipe pipe, bool state)
1059{
1060 int reg;
1061 u32 val;
1062 bool cur_state;
ad80a810
PZ
1063 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1064 pipe);
040484af 1065
affa9354
PZ
1066 if (HAS_DDI(dev_priv->dev)) {
1067 /* DDI does not have a specific FDI_TX register */
ad80a810 1068 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
bf507ef7 1069 val = I915_READ(reg);
ad80a810 1070 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
bf507ef7
ED
1071 } else {
1072 reg = FDI_TX_CTL(pipe);
1073 val = I915_READ(reg);
1074 cur_state = !!(val & FDI_TX_ENABLE);
1075 }
040484af
JB
1076 WARN(cur_state != state,
1077 "FDI TX state assertion failure (expected %s, current %s)\n",
1078 state_string(state), state_string(cur_state));
1079}
1080#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1081#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1082
1083static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1084 enum pipe pipe, bool state)
1085{
1086 int reg;
1087 u32 val;
1088 bool cur_state;
1089
d63fa0dc
PZ
1090 reg = FDI_RX_CTL(pipe);
1091 val = I915_READ(reg);
1092 cur_state = !!(val & FDI_RX_ENABLE);
040484af
JB
1093 WARN(cur_state != state,
1094 "FDI RX state assertion failure (expected %s, current %s)\n",
1095 state_string(state), state_string(cur_state));
1096}
1097#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1098#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1099
1100static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1101 enum pipe pipe)
1102{
1103 int reg;
1104 u32 val;
1105
1106 /* ILK FDI PLL is always enabled */
1107 if (dev_priv->info->gen == 5)
1108 return;
1109
bf507ef7 1110 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
affa9354 1111 if (HAS_DDI(dev_priv->dev))
bf507ef7
ED
1112 return;
1113
040484af
JB
1114 reg = FDI_TX_CTL(pipe);
1115 val = I915_READ(reg);
1116 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1117}
1118
1119static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1120 enum pipe pipe)
1121{
1122 int reg;
1123 u32 val;
1124
1125 reg = FDI_RX_CTL(pipe);
1126 val = I915_READ(reg);
1127 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1128}
1129
ea0760cf
JB
1130static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1131 enum pipe pipe)
1132{
1133 int pp_reg, lvds_reg;
1134 u32 val;
1135 enum pipe panel_pipe = PIPE_A;
0de3b485 1136 bool locked = true;
ea0760cf
JB
1137
1138 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1139 pp_reg = PCH_PP_CONTROL;
1140 lvds_reg = PCH_LVDS;
1141 } else {
1142 pp_reg = PP_CONTROL;
1143 lvds_reg = LVDS;
1144 }
1145
1146 val = I915_READ(pp_reg);
1147 if (!(val & PANEL_POWER_ON) ||
1148 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1149 locked = false;
1150
1151 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1152 panel_pipe = PIPE_B;
1153
1154 WARN(panel_pipe == pipe && locked,
1155 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1156 pipe_name(pipe));
ea0760cf
JB
1157}
1158
b840d907
JB
1159void assert_pipe(struct drm_i915_private *dev_priv,
1160 enum pipe pipe, bool state)
b24e7179
JB
1161{
1162 int reg;
1163 u32 val;
63d7bbe9 1164 bool cur_state;
702e7a56
PZ
1165 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1166 pipe);
b24e7179 1167
8e636784
DV
1168 /* if we need the pipe A quirk it must be always on */
1169 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1170 state = true;
1171
702e7a56 1172 reg = PIPECONF(cpu_transcoder);
b24e7179 1173 val = I915_READ(reg);
63d7bbe9
JB
1174 cur_state = !!(val & PIPECONF_ENABLE);
1175 WARN(cur_state != state,
1176 "pipe %c assertion failure (expected %s, current %s)\n",
9db4a9c7 1177 pipe_name(pipe), state_string(state), state_string(cur_state));
b24e7179
JB
1178}
1179
931872fc
CW
1180static void assert_plane(struct drm_i915_private *dev_priv,
1181 enum plane plane, bool state)
b24e7179
JB
1182{
1183 int reg;
1184 u32 val;
931872fc 1185 bool cur_state;
b24e7179
JB
1186
1187 reg = DSPCNTR(plane);
1188 val = I915_READ(reg);
931872fc
CW
1189 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1190 WARN(cur_state != state,
1191 "plane %c assertion failure (expected %s, current %s)\n",
1192 plane_name(plane), state_string(state), state_string(cur_state));
b24e7179
JB
1193}
1194
931872fc
CW
1195#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1196#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1197
b24e7179
JB
1198static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1199 enum pipe pipe)
1200{
1201 int reg, i;
1202 u32 val;
1203 int cur_pipe;
1204
19ec1358 1205 /* Planes are fixed to pipes on ILK+ */
28c05794
AJ
1206 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1207 reg = DSPCNTR(pipe);
1208 val = I915_READ(reg);
1209 WARN((val & DISPLAY_PLANE_ENABLE),
1210 "plane %c assertion failure, should be disabled but not\n",
1211 plane_name(pipe));
19ec1358 1212 return;
28c05794 1213 }
19ec1358 1214
b24e7179
JB
1215 /* Need to check both planes against the pipe */
1216 for (i = 0; i < 2; i++) {
1217 reg = DSPCNTR(i);
1218 val = I915_READ(reg);
1219 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1220 DISPPLANE_SEL_PIPE_SHIFT;
1221 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1222 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1223 plane_name(i), pipe_name(pipe));
b24e7179
JB
1224 }
1225}
1226
92f2584a
JB
1227static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1228{
1229 u32 val;
1230 bool enabled;
1231
9d82aa17
ED
1232 if (HAS_PCH_LPT(dev_priv->dev)) {
1233 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1234 return;
1235 }
1236
92f2584a
JB
1237 val = I915_READ(PCH_DREF_CONTROL);
1238 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1239 DREF_SUPERSPREAD_SOURCE_MASK));
1240 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1241}
1242
1243static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1244 enum pipe pipe)
1245{
1246 int reg;
1247 u32 val;
1248 bool enabled;
1249
1250 reg = TRANSCONF(pipe);
1251 val = I915_READ(reg);
1252 enabled = !!(val & TRANS_ENABLE);
9db4a9c7
JB
1253 WARN(enabled,
1254 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1255 pipe_name(pipe));
92f2584a
JB
1256}
1257
4e634389
KP
1258static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1259 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1260{
1261 if ((val & DP_PORT_EN) == 0)
1262 return false;
1263
1264 if (HAS_PCH_CPT(dev_priv->dev)) {
1265 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1266 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1267 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1268 return false;
1269 } else {
1270 if ((val & DP_PIPE_MASK) != (pipe << 30))
1271 return false;
1272 }
1273 return true;
1274}
1275
1519b995
KP
1276static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1277 enum pipe pipe, u32 val)
1278{
1279 if ((val & PORT_ENABLE) == 0)
1280 return false;
1281
1282 if (HAS_PCH_CPT(dev_priv->dev)) {
1283 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1284 return false;
1285 } else {
1286 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1287 return false;
1288 }
1289 return true;
1290}
1291
1292static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1293 enum pipe pipe, u32 val)
1294{
1295 if ((val & LVDS_PORT_EN) == 0)
1296 return false;
1297
1298 if (HAS_PCH_CPT(dev_priv->dev)) {
1299 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1300 return false;
1301 } else {
1302 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1303 return false;
1304 }
1305 return true;
1306}
1307
1308static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1309 enum pipe pipe, u32 val)
1310{
1311 if ((val & ADPA_DAC_ENABLE) == 0)
1312 return false;
1313 if (HAS_PCH_CPT(dev_priv->dev)) {
1314 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1315 return false;
1316 } else {
1317 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1318 return false;
1319 }
1320 return true;
1321}
1322
291906f1 1323static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0575e92 1324 enum pipe pipe, int reg, u32 port_sel)
291906f1 1325{
47a05eca 1326 u32 val = I915_READ(reg);
4e634389 1327 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1328 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1329 reg, pipe_name(pipe));
de9a35ab 1330
75c5da27
DV
1331 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1332 && (val & DP_PIPEB_SELECT),
de9a35ab 1333 "IBX PCH dp port still using transcoder B\n");
291906f1
JB
1334}
1335
1336static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1337 enum pipe pipe, int reg)
1338{
47a05eca 1339 u32 val = I915_READ(reg);
b70ad586 1340 WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
23c99e77 1341 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1342 reg, pipe_name(pipe));
de9a35ab 1343
75c5da27
DV
1344 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
1345 && (val & SDVO_PIPE_B_SELECT),
de9a35ab 1346 "IBX PCH hdmi port still using transcoder B\n");
291906f1
JB
1347}
1348
1349static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1350 enum pipe pipe)
1351{
1352 int reg;
1353 u32 val;
291906f1 1354
f0575e92
KP
1355 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1356 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1357 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1
JB
1358
1359 reg = PCH_ADPA;
1360 val = I915_READ(reg);
b70ad586 1361 WARN(adpa_pipe_enabled(dev_priv, pipe, val),
291906f1 1362 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1363 pipe_name(pipe));
291906f1
JB
1364
1365 reg = PCH_LVDS;
1366 val = I915_READ(reg);
b70ad586 1367 WARN(lvds_pipe_enabled(dev_priv, pipe, val),
291906f1 1368 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1369 pipe_name(pipe));
291906f1
JB
1370
1371 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1372 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1373 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1374}
1375
63d7bbe9
JB
1376/**
1377 * intel_enable_pll - enable a PLL
1378 * @dev_priv: i915 private structure
1379 * @pipe: pipe PLL to enable
1380 *
1381 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1382 * make sure the PLL reg is writable first though, since the panel write
1383 * protect mechanism may be enabled.
1384 *
1385 * Note! This is for pre-ILK only.
7434a255
TR
1386 *
1387 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
63d7bbe9
JB
1388 */
1389static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1390{
1391 int reg;
1392 u32 val;
1393
1394 /* No really, not for ILK+ */
a0c4da24 1395 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
63d7bbe9
JB
1396
1397 /* PLL is protected by panel, make sure we can write it */
1398 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1399 assert_panel_unlocked(dev_priv, pipe);
1400
1401 reg = DPLL(pipe);
1402 val = I915_READ(reg);
1403 val |= DPLL_VCO_ENABLE;
1404
1405 /* We do this three times for luck */
1406 I915_WRITE(reg, val);
1407 POSTING_READ(reg);
1408 udelay(150); /* wait for warmup */
1409 I915_WRITE(reg, val);
1410 POSTING_READ(reg);
1411 udelay(150); /* wait for warmup */
1412 I915_WRITE(reg, val);
1413 POSTING_READ(reg);
1414 udelay(150); /* wait for warmup */
1415}
1416
1417/**
1418 * intel_disable_pll - disable a PLL
1419 * @dev_priv: i915 private structure
1420 * @pipe: pipe PLL to disable
1421 *
1422 * Disable the PLL for @pipe, making sure the pipe is off first.
1423 *
1424 * Note! This is for pre-ILK only.
1425 */
1426static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1427{
1428 int reg;
1429 u32 val;
1430
1431 /* Don't disable pipe A or pipe A PLLs if needed */
1432 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1433 return;
1434
1435 /* Make sure the pipe isn't still relying on us */
1436 assert_pipe_disabled(dev_priv, pipe);
1437
1438 reg = DPLL(pipe);
1439 val = I915_READ(reg);
1440 val &= ~DPLL_VCO_ENABLE;
1441 I915_WRITE(reg, val);
1442 POSTING_READ(reg);
1443}
1444
a416edef
ED
1445/* SBI access */
1446static void
1447intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1448{
09153000 1449 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
a416edef 1450
39fb50f6 1451 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
a416edef
ED
1452 100)) {
1453 DRM_ERROR("timeout waiting for SBI to become ready\n");
09153000 1454 return;
a416edef
ED
1455 }
1456
1457 I915_WRITE(SBI_ADDR,
1458 (reg << 16));
1459 I915_WRITE(SBI_DATA,
1460 value);
1461 I915_WRITE(SBI_CTL_STAT,
1462 SBI_BUSY |
1463 SBI_CTL_OP_CRWR);
1464
39fb50f6 1465 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
a416edef
ED
1466 100)) {
1467 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
09153000 1468 return;
a416edef 1469 }
a416edef
ED
1470}
1471
1472static u32
1473intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1474{
09153000 1475 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
a416edef 1476
39fb50f6 1477 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
a416edef
ED
1478 100)) {
1479 DRM_ERROR("timeout waiting for SBI to become ready\n");
09153000 1480 return 0;
a416edef
ED
1481 }
1482
1483 I915_WRITE(SBI_ADDR,
1484 (reg << 16));
1485 I915_WRITE(SBI_CTL_STAT,
1486 SBI_BUSY |
1487 SBI_CTL_OP_CRRD);
1488
39fb50f6 1489 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
a416edef
ED
1490 100)) {
1491 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
09153000 1492 return 0;
a416edef
ED
1493 }
1494
09153000 1495 return I915_READ(SBI_DATA);
a416edef
ED
1496}
1497
92f2584a 1498/**
b6b4e185 1499 * ironlake_enable_pch_pll - enable PCH PLL
92f2584a
JB
1500 * @dev_priv: i915 private structure
1501 * @pipe: pipe PLL to enable
1502 *
1503 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1504 * drives the transcoder clock.
1505 */
b6b4e185 1506static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
92f2584a 1507{
ee7b9f93 1508 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
48da64a8 1509 struct intel_pch_pll *pll;
92f2584a
JB
1510 int reg;
1511 u32 val;
1512
48da64a8 1513 /* PCH PLLs only available on ILK, SNB and IVB */
92f2584a 1514 BUG_ON(dev_priv->info->gen < 5);
48da64a8
CW
1515 pll = intel_crtc->pch_pll;
1516 if (pll == NULL)
1517 return;
1518
1519 if (WARN_ON(pll->refcount == 0))
1520 return;
ee7b9f93
JB
1521
1522 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1523 pll->pll_reg, pll->active, pll->on,
1524 intel_crtc->base.base.id);
92f2584a
JB
1525
1526 /* PCH refclock must be enabled first */
1527 assert_pch_refclk_enabled(dev_priv);
1528
ee7b9f93 1529 if (pll->active++ && pll->on) {
92b27b08 1530 assert_pch_pll_enabled(dev_priv, pll, NULL);
ee7b9f93
JB
1531 return;
1532 }
1533
1534 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1535
1536 reg = pll->pll_reg;
92f2584a
JB
1537 val = I915_READ(reg);
1538 val |= DPLL_VCO_ENABLE;
1539 I915_WRITE(reg, val);
1540 POSTING_READ(reg);
1541 udelay(200);
ee7b9f93
JB
1542
1543 pll->on = true;
92f2584a
JB
1544}
1545
ee7b9f93 1546static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
92f2584a 1547{
ee7b9f93
JB
1548 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1549 struct intel_pch_pll *pll = intel_crtc->pch_pll;
92f2584a 1550 int reg;
ee7b9f93 1551 u32 val;
4c609cb8 1552
92f2584a
JB
1553 /* PCH only available on ILK+ */
1554 BUG_ON(dev_priv->info->gen < 5);
ee7b9f93
JB
1555 if (pll == NULL)
1556 return;
92f2584a 1557
48da64a8
CW
1558 if (WARN_ON(pll->refcount == 0))
1559 return;
7a419866 1560
ee7b9f93
JB
1561 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1562 pll->pll_reg, pll->active, pll->on,
1563 intel_crtc->base.base.id);
7a419866 1564
48da64a8 1565 if (WARN_ON(pll->active == 0)) {
92b27b08 1566 assert_pch_pll_disabled(dev_priv, pll, NULL);
48da64a8
CW
1567 return;
1568 }
1569
ee7b9f93 1570 if (--pll->active) {
92b27b08 1571 assert_pch_pll_enabled(dev_priv, pll, NULL);
7a419866 1572 return;
ee7b9f93
JB
1573 }
1574
1575 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1576
1577 /* Make sure transcoder isn't still depending on us */
1578 assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
7a419866 1579
ee7b9f93 1580 reg = pll->pll_reg;
92f2584a
JB
1581 val = I915_READ(reg);
1582 val &= ~DPLL_VCO_ENABLE;
1583 I915_WRITE(reg, val);
1584 POSTING_READ(reg);
1585 udelay(200);
ee7b9f93
JB
1586
1587 pll->on = false;
92f2584a
JB
1588}
1589
b8a4f404
PZ
1590static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1591 enum pipe pipe)
040484af 1592{
23670b32 1593 struct drm_device *dev = dev_priv->dev;
7c26e5c6 1594 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
23670b32 1595 uint32_t reg, val, pipeconf_val;
040484af
JB
1596
1597 /* PCH only available on ILK+ */
1598 BUG_ON(dev_priv->info->gen < 5);
1599
1600 /* Make sure PCH DPLL is enabled */
92b27b08
CW
1601 assert_pch_pll_enabled(dev_priv,
1602 to_intel_crtc(crtc)->pch_pll,
1603 to_intel_crtc(crtc));
040484af
JB
1604
1605 /* FDI must be feeding us bits for PCH ports */
1606 assert_fdi_tx_enabled(dev_priv, pipe);
1607 assert_fdi_rx_enabled(dev_priv, pipe);
1608
23670b32
DV
1609 if (HAS_PCH_CPT(dev)) {
1610 /* Workaround: Set the timing override bit before enabling the
1611 * pch transcoder. */
1612 reg = TRANS_CHICKEN2(pipe);
1613 val = I915_READ(reg);
1614 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1615 I915_WRITE(reg, val);
59c859d6 1616 }
23670b32 1617
040484af
JB
1618 reg = TRANSCONF(pipe);
1619 val = I915_READ(reg);
5f7f726d 1620 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c
JB
1621
1622 if (HAS_PCH_IBX(dev_priv->dev)) {
1623 /*
1624 * make the BPC in transcoder be consistent with
1625 * that in pipeconf reg.
1626 */
1627 val &= ~PIPE_BPC_MASK;
5f7f726d 1628 val |= pipeconf_val & PIPE_BPC_MASK;
e9bcff5c 1629 }
5f7f726d
PZ
1630
1631 val &= ~TRANS_INTERLACE_MASK;
1632 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
7c26e5c6
PZ
1633 if (HAS_PCH_IBX(dev_priv->dev) &&
1634 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1635 val |= TRANS_LEGACY_INTERLACED_ILK;
1636 else
1637 val |= TRANS_INTERLACED;
5f7f726d
PZ
1638 else
1639 val |= TRANS_PROGRESSIVE;
1640
040484af
JB
1641 I915_WRITE(reg, val | TRANS_ENABLE);
1642 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1643 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1644}
1645
8fb033d7 1646static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
937bb610 1647 enum transcoder cpu_transcoder)
040484af 1648{
8fb033d7 1649 u32 val, pipeconf_val;
8fb033d7
PZ
1650
1651 /* PCH only available on ILK+ */
1652 BUG_ON(dev_priv->info->gen < 5);
1653
8fb033d7 1654 /* FDI must be feeding us bits for PCH ports */
1a240d4d 1655 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
937bb610 1656 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
8fb033d7 1657
223a6fdf
PZ
1658 /* Workaround: set timing override bit. */
1659 val = I915_READ(_TRANSA_CHICKEN2);
23670b32 1660 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
223a6fdf
PZ
1661 I915_WRITE(_TRANSA_CHICKEN2, val);
1662
25f3ef11 1663 val = TRANS_ENABLE;
937bb610 1664 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
8fb033d7 1665
9a76b1c6
PZ
1666 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1667 PIPECONF_INTERLACED_ILK)
a35f2679 1668 val |= TRANS_INTERLACED;
8fb033d7
PZ
1669 else
1670 val |= TRANS_PROGRESSIVE;
1671
25f3ef11 1672 I915_WRITE(TRANSCONF(TRANSCODER_A), val);
937bb610
PZ
1673 if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
1674 DRM_ERROR("Failed to enable PCH transcoder\n");
8fb033d7
PZ
1675}
1676
b8a4f404
PZ
1677static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1678 enum pipe pipe)
040484af 1679{
23670b32
DV
1680 struct drm_device *dev = dev_priv->dev;
1681 uint32_t reg, val;
040484af
JB
1682
1683 /* FDI relies on the transcoder */
1684 assert_fdi_tx_disabled(dev_priv, pipe);
1685 assert_fdi_rx_disabled(dev_priv, pipe);
1686
291906f1
JB
1687 /* Ports must be off as well */
1688 assert_pch_ports_disabled(dev_priv, pipe);
1689
040484af
JB
1690 reg = TRANSCONF(pipe);
1691 val = I915_READ(reg);
1692 val &= ~TRANS_ENABLE;
1693 I915_WRITE(reg, val);
1694 /* wait for PCH transcoder off, transcoder state */
1695 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4c9c18c2 1696 DRM_ERROR("failed to disable transcoder %d\n", pipe);
23670b32
DV
1697
1698 if (!HAS_PCH_IBX(dev)) {
1699 /* Workaround: Clear the timing override chicken bit again. */
1700 reg = TRANS_CHICKEN2(pipe);
1701 val = I915_READ(reg);
1702 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1703 I915_WRITE(reg, val);
1704 }
040484af
JB
1705}
1706
ab4d966c 1707static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
8fb033d7 1708{
8fb033d7
PZ
1709 u32 val;
1710
8a52fd9f 1711 val = I915_READ(_TRANSACONF);
8fb033d7 1712 val &= ~TRANS_ENABLE;
8a52fd9f 1713 I915_WRITE(_TRANSACONF, val);
8fb033d7 1714 /* wait for PCH transcoder off, transcoder state */
8a52fd9f
PZ
1715 if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
1716 DRM_ERROR("Failed to disable PCH transcoder\n");
223a6fdf
PZ
1717
1718 /* Workaround: clear timing override bit. */
1719 val = I915_READ(_TRANSA_CHICKEN2);
23670b32 1720 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
223a6fdf 1721 I915_WRITE(_TRANSA_CHICKEN2, val);
040484af
JB
1722}
1723
b24e7179 1724/**
309cfea8 1725 * intel_enable_pipe - enable a pipe, asserting requirements
b24e7179
JB
1726 * @dev_priv: i915 private structure
1727 * @pipe: pipe to enable
040484af 1728 * @pch_port: on ILK+, is this pipe driving a PCH port or not
b24e7179
JB
1729 *
1730 * Enable @pipe, making sure that various hardware specific requirements
1731 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1732 *
1733 * @pipe should be %PIPE_A or %PIPE_B.
1734 *
1735 * Will wait until the pipe is actually running (i.e. first vblank) before
1736 * returning.
1737 */
040484af
JB
1738static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1739 bool pch_port)
b24e7179 1740{
702e7a56
PZ
1741 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1742 pipe);
1a240d4d 1743 enum pipe pch_transcoder;
b24e7179
JB
1744 int reg;
1745 u32 val;
1746
cc391bbb
PZ
1747 if (IS_HASWELL(dev_priv->dev))
1748 pch_transcoder = TRANSCODER_A;
1749 else
1750 pch_transcoder = pipe;
1751
b24e7179
JB
1752 /*
1753 * A pipe without a PLL won't actually be able to drive bits from
1754 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1755 * need the check.
1756 */
1757 if (!HAS_PCH_SPLIT(dev_priv->dev))
1758 assert_pll_enabled(dev_priv, pipe);
040484af
JB
1759 else {
1760 if (pch_port) {
1761 /* if driving the PCH, we need FDI enabled */
cc391bbb 1762 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1a240d4d
DV
1763 assert_fdi_tx_pll_enabled(dev_priv,
1764 (enum pipe) cpu_transcoder);
040484af
JB
1765 }
1766 /* FIXME: assert CPU port conditions for SNB+ */
1767 }
b24e7179 1768
702e7a56 1769 reg = PIPECONF(cpu_transcoder);
b24e7179 1770 val = I915_READ(reg);
00d70b15
CW
1771 if (val & PIPECONF_ENABLE)
1772 return;
1773
1774 I915_WRITE(reg, val | PIPECONF_ENABLE);
b24e7179
JB
1775 intel_wait_for_vblank(dev_priv->dev, pipe);
1776}
1777
1778/**
309cfea8 1779 * intel_disable_pipe - disable a pipe, asserting requirements
b24e7179
JB
1780 * @dev_priv: i915 private structure
1781 * @pipe: pipe to disable
1782 *
1783 * Disable @pipe, making sure that various hardware specific requirements
1784 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1785 *
1786 * @pipe should be %PIPE_A or %PIPE_B.
1787 *
1788 * Will wait until the pipe has shut down before returning.
1789 */
1790static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1791 enum pipe pipe)
1792{
702e7a56
PZ
1793 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1794 pipe);
b24e7179
JB
1795 int reg;
1796 u32 val;
1797
1798 /*
1799 * Make sure planes won't keep trying to pump pixels to us,
1800 * or we might hang the display.
1801 */
1802 assert_planes_disabled(dev_priv, pipe);
1803
1804 /* Don't disable pipe A or pipe A PLLs if needed */
1805 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1806 return;
1807
702e7a56 1808 reg = PIPECONF(cpu_transcoder);
b24e7179 1809 val = I915_READ(reg);
00d70b15
CW
1810 if ((val & PIPECONF_ENABLE) == 0)
1811 return;
1812
1813 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
b24e7179
JB
1814 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1815}
1816
d74362c9
KP
1817/*
1818 * Plane regs are double buffered, going from enabled->disabled needs a
1819 * trigger in order to latch. The display address reg provides this.
1820 */
6f1d69b0 1821void intel_flush_display_plane(struct drm_i915_private *dev_priv,
d74362c9
KP
1822 enum plane plane)
1823{
14f86147
DL
1824 if (dev_priv->info->gen >= 4)
1825 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1826 else
1827 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
d74362c9
KP
1828}
1829
b24e7179
JB
1830/**
1831 * intel_enable_plane - enable a display plane on a given pipe
1832 * @dev_priv: i915 private structure
1833 * @plane: plane to enable
1834 * @pipe: pipe being fed
1835 *
1836 * Enable @plane on @pipe, making sure that @pipe is running first.
1837 */
1838static void intel_enable_plane(struct drm_i915_private *dev_priv,
1839 enum plane plane, enum pipe pipe)
1840{
1841 int reg;
1842 u32 val;
1843
1844 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1845 assert_pipe_enabled(dev_priv, pipe);
1846
1847 reg = DSPCNTR(plane);
1848 val = I915_READ(reg);
00d70b15
CW
1849 if (val & DISPLAY_PLANE_ENABLE)
1850 return;
1851
1852 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
d74362c9 1853 intel_flush_display_plane(dev_priv, plane);
b24e7179
JB
1854 intel_wait_for_vblank(dev_priv->dev, pipe);
1855}
1856
b24e7179
JB
1857/**
1858 * intel_disable_plane - disable a display plane
1859 * @dev_priv: i915 private structure
1860 * @plane: plane to disable
1861 * @pipe: pipe consuming the data
1862 *
1863 * Disable @plane; should be an independent operation.
1864 */
1865static void intel_disable_plane(struct drm_i915_private *dev_priv,
1866 enum plane plane, enum pipe pipe)
1867{
1868 int reg;
1869 u32 val;
1870
1871 reg = DSPCNTR(plane);
1872 val = I915_READ(reg);
00d70b15
CW
1873 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1874 return;
1875
1876 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
b24e7179
JB
1877 intel_flush_display_plane(dev_priv, plane);
1878 intel_wait_for_vblank(dev_priv->dev, pipe);
1879}
1880
127bd2ac 1881int
48b956c5 1882intel_pin_and_fence_fb_obj(struct drm_device *dev,
05394f39 1883 struct drm_i915_gem_object *obj,
919926ae 1884 struct intel_ring_buffer *pipelined)
6b95a207 1885{
ce453d81 1886 struct drm_i915_private *dev_priv = dev->dev_private;
6b95a207
KH
1887 u32 alignment;
1888 int ret;
1889
05394f39 1890 switch (obj->tiling_mode) {
6b95a207 1891 case I915_TILING_NONE:
534843da
CW
1892 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1893 alignment = 128 * 1024;
a6c45cf0 1894 else if (INTEL_INFO(dev)->gen >= 4)
534843da
CW
1895 alignment = 4 * 1024;
1896 else
1897 alignment = 64 * 1024;
6b95a207
KH
1898 break;
1899 case I915_TILING_X:
1900 /* pin() will align the object as required by fence */
1901 alignment = 0;
1902 break;
1903 case I915_TILING_Y:
1904 /* FIXME: Is this true? */
1905 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1906 return -EINVAL;
1907 default:
1908 BUG();
1909 }
1910
ce453d81 1911 dev_priv->mm.interruptible = false;
2da3b9b9 1912 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
48b956c5 1913 if (ret)
ce453d81 1914 goto err_interruptible;
6b95a207
KH
1915
1916 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1917 * fence, whereas 965+ only requires a fence if using
1918 * framebuffer compression. For simplicity, we always install
1919 * a fence as the cost is not that onerous.
1920 */
06d98131 1921 ret = i915_gem_object_get_fence(obj);
9a5a53b3
CW
1922 if (ret)
1923 goto err_unpin;
1690e1eb 1924
9a5a53b3 1925 i915_gem_object_pin_fence(obj);
6b95a207 1926
ce453d81 1927 dev_priv->mm.interruptible = true;
6b95a207 1928 return 0;
48b956c5
CW
1929
1930err_unpin:
1931 i915_gem_object_unpin(obj);
ce453d81
CW
1932err_interruptible:
1933 dev_priv->mm.interruptible = true;
48b956c5 1934 return ret;
6b95a207
KH
1935}
1936
1690e1eb
CW
1937void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1938{
1939 i915_gem_object_unpin_fence(obj);
1940 i915_gem_object_unpin(obj);
1941}
1942
c2c75131
DV
1943/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1944 * is assumed to be a power-of-two. */
5a35e99e
DL
1945unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
1946 unsigned int bpp,
1947 unsigned int pitch)
c2c75131
DV
1948{
1949 int tile_rows, tiles;
1950
1951 tile_rows = *y / 8;
1952 *y %= 8;
1953 tiles = *x / (512/bpp);
1954 *x %= 512/bpp;
1955
1956 return tile_rows * pitch * 8 + tiles * 4096;
1957}
1958
17638cd6
JB
1959static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1960 int x, int y)
81255565
JB
1961{
1962 struct drm_device *dev = crtc->dev;
1963 struct drm_i915_private *dev_priv = dev->dev_private;
1964 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1965 struct intel_framebuffer *intel_fb;
05394f39 1966 struct drm_i915_gem_object *obj;
81255565 1967 int plane = intel_crtc->plane;
e506a0c6 1968 unsigned long linear_offset;
81255565 1969 u32 dspcntr;
5eddb70b 1970 u32 reg;
81255565
JB
1971
1972 switch (plane) {
1973 case 0:
1974 case 1:
1975 break;
1976 default:
1977 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1978 return -EINVAL;
1979 }
1980
1981 intel_fb = to_intel_framebuffer(fb);
1982 obj = intel_fb->obj;
81255565 1983
5eddb70b
CW
1984 reg = DSPCNTR(plane);
1985 dspcntr = I915_READ(reg);
81255565
JB
1986 /* Mask out pixel format bits in case we change it */
1987 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
57779d06
VS
1988 switch (fb->pixel_format) {
1989 case DRM_FORMAT_C8:
81255565
JB
1990 dspcntr |= DISPPLANE_8BPP;
1991 break;
57779d06
VS
1992 case DRM_FORMAT_XRGB1555:
1993 case DRM_FORMAT_ARGB1555:
1994 dspcntr |= DISPPLANE_BGRX555;
81255565 1995 break;
57779d06
VS
1996 case DRM_FORMAT_RGB565:
1997 dspcntr |= DISPPLANE_BGRX565;
1998 break;
1999 case DRM_FORMAT_XRGB8888:
2000 case DRM_FORMAT_ARGB8888:
2001 dspcntr |= DISPPLANE_BGRX888;
2002 break;
2003 case DRM_FORMAT_XBGR8888:
2004 case DRM_FORMAT_ABGR8888:
2005 dspcntr |= DISPPLANE_RGBX888;
2006 break;
2007 case DRM_FORMAT_XRGB2101010:
2008 case DRM_FORMAT_ARGB2101010:
2009 dspcntr |= DISPPLANE_BGRX101010;
2010 break;
2011 case DRM_FORMAT_XBGR2101010:
2012 case DRM_FORMAT_ABGR2101010:
2013 dspcntr |= DISPPLANE_RGBX101010;
81255565
JB
2014 break;
2015 default:
57779d06 2016 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
81255565
JB
2017 return -EINVAL;
2018 }
57779d06 2019
a6c45cf0 2020 if (INTEL_INFO(dev)->gen >= 4) {
05394f39 2021 if (obj->tiling_mode != I915_TILING_NONE)
81255565
JB
2022 dspcntr |= DISPPLANE_TILED;
2023 else
2024 dspcntr &= ~DISPPLANE_TILED;
2025 }
2026
5eddb70b 2027 I915_WRITE(reg, dspcntr);
81255565 2028
e506a0c6 2029 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
81255565 2030
c2c75131
DV
2031 if (INTEL_INFO(dev)->gen >= 4) {
2032 intel_crtc->dspaddr_offset =
5a35e99e
DL
2033 intel_gen4_compute_offset_xtiled(&x, &y,
2034 fb->bits_per_pixel / 8,
2035 fb->pitches[0]);
c2c75131
DV
2036 linear_offset -= intel_crtc->dspaddr_offset;
2037 } else {
e506a0c6 2038 intel_crtc->dspaddr_offset = linear_offset;
c2c75131 2039 }
e506a0c6
DV
2040
2041 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2042 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
01f2c773 2043 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 2044 if (INTEL_INFO(dev)->gen >= 4) {
c2c75131
DV
2045 I915_MODIFY_DISPBASE(DSPSURF(plane),
2046 obj->gtt_offset + intel_crtc->dspaddr_offset);
5eddb70b 2047 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
e506a0c6 2048 I915_WRITE(DSPLINOFF(plane), linear_offset);
5eddb70b 2049 } else
e506a0c6 2050 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
5eddb70b 2051 POSTING_READ(reg);
81255565 2052
17638cd6
JB
2053 return 0;
2054}
2055
2056static int ironlake_update_plane(struct drm_crtc *crtc,
2057 struct drm_framebuffer *fb, int x, int y)
2058{
2059 struct drm_device *dev = crtc->dev;
2060 struct drm_i915_private *dev_priv = dev->dev_private;
2061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2062 struct intel_framebuffer *intel_fb;
2063 struct drm_i915_gem_object *obj;
2064 int plane = intel_crtc->plane;
e506a0c6 2065 unsigned long linear_offset;
17638cd6
JB
2066 u32 dspcntr;
2067 u32 reg;
2068
2069 switch (plane) {
2070 case 0:
2071 case 1:
27f8227b 2072 case 2:
17638cd6
JB
2073 break;
2074 default:
2075 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2076 return -EINVAL;
2077 }
2078
2079 intel_fb = to_intel_framebuffer(fb);
2080 obj = intel_fb->obj;
2081
2082 reg = DSPCNTR(plane);
2083 dspcntr = I915_READ(reg);
2084 /* Mask out pixel format bits in case we change it */
2085 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
57779d06
VS
2086 switch (fb->pixel_format) {
2087 case DRM_FORMAT_C8:
17638cd6
JB
2088 dspcntr |= DISPPLANE_8BPP;
2089 break;
57779d06
VS
2090 case DRM_FORMAT_RGB565:
2091 dspcntr |= DISPPLANE_BGRX565;
17638cd6 2092 break;
57779d06
VS
2093 case DRM_FORMAT_XRGB8888:
2094 case DRM_FORMAT_ARGB8888:
2095 dspcntr |= DISPPLANE_BGRX888;
2096 break;
2097 case DRM_FORMAT_XBGR8888:
2098 case DRM_FORMAT_ABGR8888:
2099 dspcntr |= DISPPLANE_RGBX888;
2100 break;
2101 case DRM_FORMAT_XRGB2101010:
2102 case DRM_FORMAT_ARGB2101010:
2103 dspcntr |= DISPPLANE_BGRX101010;
2104 break;
2105 case DRM_FORMAT_XBGR2101010:
2106 case DRM_FORMAT_ABGR2101010:
2107 dspcntr |= DISPPLANE_RGBX101010;
17638cd6
JB
2108 break;
2109 default:
57779d06 2110 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
17638cd6
JB
2111 return -EINVAL;
2112 }
2113
2114 if (obj->tiling_mode != I915_TILING_NONE)
2115 dspcntr |= DISPPLANE_TILED;
2116 else
2117 dspcntr &= ~DISPPLANE_TILED;
2118
2119 /* must disable */
2120 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2121
2122 I915_WRITE(reg, dspcntr);
2123
e506a0c6 2124 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
c2c75131 2125 intel_crtc->dspaddr_offset =
5a35e99e
DL
2126 intel_gen4_compute_offset_xtiled(&x, &y,
2127 fb->bits_per_pixel / 8,
2128 fb->pitches[0]);
c2c75131 2129 linear_offset -= intel_crtc->dspaddr_offset;
17638cd6 2130
e506a0c6
DV
2131 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2132 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
01f2c773 2133 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
c2c75131
DV
2134 I915_MODIFY_DISPBASE(DSPSURF(plane),
2135 obj->gtt_offset + intel_crtc->dspaddr_offset);
bc1c91eb
DL
2136 if (IS_HASWELL(dev)) {
2137 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2138 } else {
2139 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2140 I915_WRITE(DSPLINOFF(plane), linear_offset);
2141 }
17638cd6
JB
2142 POSTING_READ(reg);
2143
2144 return 0;
2145}
2146
2147/* Assume fb object is pinned & idle & fenced and just update base pointers */
2148static int
2149intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2150 int x, int y, enum mode_set_atomic state)
2151{
2152 struct drm_device *dev = crtc->dev;
2153 struct drm_i915_private *dev_priv = dev->dev_private;
17638cd6 2154
6b8e6ed0
CW
2155 if (dev_priv->display.disable_fbc)
2156 dev_priv->display.disable_fbc(dev);
3dec0095 2157 intel_increase_pllclock(crtc);
81255565 2158
6b8e6ed0 2159 return dev_priv->display.update_plane(crtc, fb, x, y);
81255565
JB
2160}
2161
14667a4b
CW
2162static int
2163intel_finish_fb(struct drm_framebuffer *old_fb)
2164{
2165 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2166 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2167 bool was_interruptible = dev_priv->mm.interruptible;
2168 int ret;
2169
2170 wait_event(dev_priv->pending_flip_queue,
2171 atomic_read(&dev_priv->mm.wedged) ||
2172 atomic_read(&obj->pending_flip) == 0);
2173
2174 /* Big Hammer, we also need to ensure that any pending
2175 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2176 * current scanout is retired before unpinning the old
2177 * framebuffer.
2178 *
2179 * This should only fail upon a hung GPU, in which case we
2180 * can safely continue.
2181 */
2182 dev_priv->mm.interruptible = false;
2183 ret = i915_gem_object_finish_gpu(obj);
2184 dev_priv->mm.interruptible = was_interruptible;
2185
2186 return ret;
2187}
2188
198598d0
VS
2189static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2190{
2191 struct drm_device *dev = crtc->dev;
2192 struct drm_i915_master_private *master_priv;
2193 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2194
2195 if (!dev->primary->master)
2196 return;
2197
2198 master_priv = dev->primary->master->driver_priv;
2199 if (!master_priv->sarea_priv)
2200 return;
2201
2202 switch (intel_crtc->pipe) {
2203 case 0:
2204 master_priv->sarea_priv->pipeA_x = x;
2205 master_priv->sarea_priv->pipeA_y = y;
2206 break;
2207 case 1:
2208 master_priv->sarea_priv->pipeB_x = x;
2209 master_priv->sarea_priv->pipeB_y = y;
2210 break;
2211 default:
2212 break;
2213 }
2214}
2215
5c3b82e2 2216static int
3c4fdcfb 2217intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
94352cf9 2218 struct drm_framebuffer *fb)
79e53945
JB
2219{
2220 struct drm_device *dev = crtc->dev;
6b8e6ed0 2221 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 2222 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
94352cf9 2223 struct drm_framebuffer *old_fb;
5c3b82e2 2224 int ret;
79e53945
JB
2225
2226 /* no fb bound */
94352cf9 2227 if (!fb) {
a5071c2f 2228 DRM_ERROR("No FB bound\n");
5c3b82e2
CW
2229 return 0;
2230 }
2231
5826eca5
ED
2232 if(intel_crtc->plane > dev_priv->num_pipe) {
2233 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2234 intel_crtc->plane,
2235 dev_priv->num_pipe);
5c3b82e2 2236 return -EINVAL;
79e53945
JB
2237 }
2238
5c3b82e2 2239 mutex_lock(&dev->struct_mutex);
265db958 2240 ret = intel_pin_and_fence_fb_obj(dev,
94352cf9 2241 to_intel_framebuffer(fb)->obj,
919926ae 2242 NULL);
5c3b82e2
CW
2243 if (ret != 0) {
2244 mutex_unlock(&dev->struct_mutex);
a5071c2f 2245 DRM_ERROR("pin & fence failed\n");
5c3b82e2
CW
2246 return ret;
2247 }
79e53945 2248
94352cf9
DV
2249 if (crtc->fb)
2250 intel_finish_fb(crtc->fb);
265db958 2251
94352cf9 2252 ret = dev_priv->display.update_plane(crtc, fb, x, y);
4e6cfefc 2253 if (ret) {
94352cf9 2254 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
5c3b82e2 2255 mutex_unlock(&dev->struct_mutex);
a5071c2f 2256 DRM_ERROR("failed to update base address\n");
4e6cfefc 2257 return ret;
79e53945 2258 }
3c4fdcfb 2259
94352cf9
DV
2260 old_fb = crtc->fb;
2261 crtc->fb = fb;
6c4c86f5
DV
2262 crtc->x = x;
2263 crtc->y = y;
94352cf9 2264
b7f1de28
CW
2265 if (old_fb) {
2266 intel_wait_for_vblank(dev, intel_crtc->pipe);
1690e1eb 2267 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
b7f1de28 2268 }
652c393a 2269
6b8e6ed0 2270 intel_update_fbc(dev);
5c3b82e2 2271 mutex_unlock(&dev->struct_mutex);
79e53945 2272
198598d0 2273 intel_crtc_update_sarea_pos(crtc, x, y);
5c3b82e2
CW
2274
2275 return 0;
79e53945
JB
2276}
2277
5e84e1a4
ZW
2278static void intel_fdi_normal_train(struct drm_crtc *crtc)
2279{
2280 struct drm_device *dev = crtc->dev;
2281 struct drm_i915_private *dev_priv = dev->dev_private;
2282 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2283 int pipe = intel_crtc->pipe;
2284 u32 reg, temp;
2285
2286 /* enable normal train */
2287 reg = FDI_TX_CTL(pipe);
2288 temp = I915_READ(reg);
61e499bf 2289 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
2290 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2291 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
2292 } else {
2293 temp &= ~FDI_LINK_TRAIN_NONE;
2294 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 2295 }
5e84e1a4
ZW
2296 I915_WRITE(reg, temp);
2297
2298 reg = FDI_RX_CTL(pipe);
2299 temp = I915_READ(reg);
2300 if (HAS_PCH_CPT(dev)) {
2301 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2302 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2303 } else {
2304 temp &= ~FDI_LINK_TRAIN_NONE;
2305 temp |= FDI_LINK_TRAIN_NONE;
2306 }
2307 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2308
2309 /* wait one idle pattern time */
2310 POSTING_READ(reg);
2311 udelay(1000);
357555c0
JB
2312
2313 /* IVB wants error correction enabled */
2314 if (IS_IVYBRIDGE(dev))
2315 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2316 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
2317}
2318
291427f5
JB
2319static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2320{
2321 struct drm_i915_private *dev_priv = dev->dev_private;
2322 u32 flags = I915_READ(SOUTH_CHICKEN1);
2323
2324 flags |= FDI_PHASE_SYNC_OVR(pipe);
2325 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2326 flags |= FDI_PHASE_SYNC_EN(pipe);
2327 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2328 POSTING_READ(SOUTH_CHICKEN1);
2329}
2330
01a415fd
DV
2331static void ivb_modeset_global_resources(struct drm_device *dev)
2332{
2333 struct drm_i915_private *dev_priv = dev->dev_private;
2334 struct intel_crtc *pipe_B_crtc =
2335 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2336 struct intel_crtc *pipe_C_crtc =
2337 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2338 uint32_t temp;
2339
2340 /* When everything is off disable fdi C so that we could enable fdi B
2341 * with all lanes. XXX: This misses the case where a pipe is not using
2342 * any pch resources and so doesn't need any fdi lanes. */
2343 if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
2344 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2345 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2346
2347 temp = I915_READ(SOUTH_CHICKEN1);
2348 temp &= ~FDI_BC_BIFURCATION_SELECT;
2349 DRM_DEBUG_KMS("disabling fdi C rx\n");
2350 I915_WRITE(SOUTH_CHICKEN1, temp);
2351 }
2352}
2353
8db9d77b
ZW
2354/* The FDI link training functions for ILK/Ibexpeak. */
2355static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2356{
2357 struct drm_device *dev = crtc->dev;
2358 struct drm_i915_private *dev_priv = dev->dev_private;
2359 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2360 int pipe = intel_crtc->pipe;
0fc932b8 2361 int plane = intel_crtc->plane;
5eddb70b 2362 u32 reg, temp, tries;
8db9d77b 2363
0fc932b8
JB
2364 /* FDI needs bits from pipe & plane first */
2365 assert_pipe_enabled(dev_priv, pipe);
2366 assert_plane_enabled(dev_priv, plane);
2367
e1a44743
AJ
2368 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2369 for train result */
5eddb70b
CW
2370 reg = FDI_RX_IMR(pipe);
2371 temp = I915_READ(reg);
e1a44743
AJ
2372 temp &= ~FDI_RX_SYMBOL_LOCK;
2373 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2374 I915_WRITE(reg, temp);
2375 I915_READ(reg);
e1a44743
AJ
2376 udelay(150);
2377
8db9d77b 2378 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2379 reg = FDI_TX_CTL(pipe);
2380 temp = I915_READ(reg);
77ffb597
AJ
2381 temp &= ~(7 << 19);
2382 temp |= (intel_crtc->fdi_lanes - 1) << 19;
8db9d77b
ZW
2383 temp &= ~FDI_LINK_TRAIN_NONE;
2384 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 2385 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2386
5eddb70b
CW
2387 reg = FDI_RX_CTL(pipe);
2388 temp = I915_READ(reg);
8db9d77b
ZW
2389 temp &= ~FDI_LINK_TRAIN_NONE;
2390 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
2391 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2392
2393 POSTING_READ(reg);
8db9d77b
ZW
2394 udelay(150);
2395
5b2adf89 2396 /* Ironlake workaround, enable clock pointer after FDI enable*/
8f5718a6
DV
2397 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2398 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2399 FDI_RX_PHASE_SYNC_POINTER_EN);
5b2adf89 2400
5eddb70b 2401 reg = FDI_RX_IIR(pipe);
e1a44743 2402 for (tries = 0; tries < 5; tries++) {
5eddb70b 2403 temp = I915_READ(reg);
8db9d77b
ZW
2404 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2405
2406 if ((temp & FDI_RX_BIT_LOCK)) {
2407 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 2408 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
2409 break;
2410 }
8db9d77b 2411 }
e1a44743 2412 if (tries == 5)
5eddb70b 2413 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2414
2415 /* Train 2 */
5eddb70b
CW
2416 reg = FDI_TX_CTL(pipe);
2417 temp = I915_READ(reg);
8db9d77b
ZW
2418 temp &= ~FDI_LINK_TRAIN_NONE;
2419 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2420 I915_WRITE(reg, temp);
8db9d77b 2421
5eddb70b
CW
2422 reg = FDI_RX_CTL(pipe);
2423 temp = I915_READ(reg);
8db9d77b
ZW
2424 temp &= ~FDI_LINK_TRAIN_NONE;
2425 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2426 I915_WRITE(reg, temp);
8db9d77b 2427
5eddb70b
CW
2428 POSTING_READ(reg);
2429 udelay(150);
8db9d77b 2430
5eddb70b 2431 reg = FDI_RX_IIR(pipe);
e1a44743 2432 for (tries = 0; tries < 5; tries++) {
5eddb70b 2433 temp = I915_READ(reg);
8db9d77b
ZW
2434 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2435
2436 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 2437 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
2438 DRM_DEBUG_KMS("FDI train 2 done.\n");
2439 break;
2440 }
8db9d77b 2441 }
e1a44743 2442 if (tries == 5)
5eddb70b 2443 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2444
2445 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 2446
8db9d77b
ZW
2447}
2448
0206e353 2449static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
2450 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2451 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2452 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2453 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2454};
2455
2456/* The FDI link training functions for SNB/Cougarpoint. */
2457static void gen6_fdi_link_train(struct drm_crtc *crtc)
2458{
2459 struct drm_device *dev = crtc->dev;
2460 struct drm_i915_private *dev_priv = dev->dev_private;
2461 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2462 int pipe = intel_crtc->pipe;
fa37d39e 2463 u32 reg, temp, i, retry;
8db9d77b 2464
e1a44743
AJ
2465 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2466 for train result */
5eddb70b
CW
2467 reg = FDI_RX_IMR(pipe);
2468 temp = I915_READ(reg);
e1a44743
AJ
2469 temp &= ~FDI_RX_SYMBOL_LOCK;
2470 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2471 I915_WRITE(reg, temp);
2472
2473 POSTING_READ(reg);
e1a44743
AJ
2474 udelay(150);
2475
8db9d77b 2476 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2477 reg = FDI_TX_CTL(pipe);
2478 temp = I915_READ(reg);
77ffb597
AJ
2479 temp &= ~(7 << 19);
2480 temp |= (intel_crtc->fdi_lanes - 1) << 19;
8db9d77b
ZW
2481 temp &= ~FDI_LINK_TRAIN_NONE;
2482 temp |= FDI_LINK_TRAIN_PATTERN_1;
2483 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2484 /* SNB-B */
2485 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 2486 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2487
d74cf324
DV
2488 I915_WRITE(FDI_RX_MISC(pipe),
2489 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2490
5eddb70b
CW
2491 reg = FDI_RX_CTL(pipe);
2492 temp = I915_READ(reg);
8db9d77b
ZW
2493 if (HAS_PCH_CPT(dev)) {
2494 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2495 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2496 } else {
2497 temp &= ~FDI_LINK_TRAIN_NONE;
2498 temp |= FDI_LINK_TRAIN_PATTERN_1;
2499 }
5eddb70b
CW
2500 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2501
2502 POSTING_READ(reg);
8db9d77b
ZW
2503 udelay(150);
2504
8f5718a6 2505 cpt_phase_pointer_enable(dev, pipe);
291427f5 2506
0206e353 2507 for (i = 0; i < 4; i++) {
5eddb70b
CW
2508 reg = FDI_TX_CTL(pipe);
2509 temp = I915_READ(reg);
8db9d77b
ZW
2510 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2511 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2512 I915_WRITE(reg, temp);
2513
2514 POSTING_READ(reg);
8db9d77b
ZW
2515 udelay(500);
2516
fa37d39e
SP
2517 for (retry = 0; retry < 5; retry++) {
2518 reg = FDI_RX_IIR(pipe);
2519 temp = I915_READ(reg);
2520 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2521 if (temp & FDI_RX_BIT_LOCK) {
2522 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2523 DRM_DEBUG_KMS("FDI train 1 done.\n");
2524 break;
2525 }
2526 udelay(50);
8db9d77b 2527 }
fa37d39e
SP
2528 if (retry < 5)
2529 break;
8db9d77b
ZW
2530 }
2531 if (i == 4)
5eddb70b 2532 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2533
2534 /* Train 2 */
5eddb70b
CW
2535 reg = FDI_TX_CTL(pipe);
2536 temp = I915_READ(reg);
8db9d77b
ZW
2537 temp &= ~FDI_LINK_TRAIN_NONE;
2538 temp |= FDI_LINK_TRAIN_PATTERN_2;
2539 if (IS_GEN6(dev)) {
2540 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2541 /* SNB-B */
2542 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2543 }
5eddb70b 2544 I915_WRITE(reg, temp);
8db9d77b 2545
5eddb70b
CW
2546 reg = FDI_RX_CTL(pipe);
2547 temp = I915_READ(reg);
8db9d77b
ZW
2548 if (HAS_PCH_CPT(dev)) {
2549 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2550 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2551 } else {
2552 temp &= ~FDI_LINK_TRAIN_NONE;
2553 temp |= FDI_LINK_TRAIN_PATTERN_2;
2554 }
5eddb70b
CW
2555 I915_WRITE(reg, temp);
2556
2557 POSTING_READ(reg);
8db9d77b
ZW
2558 udelay(150);
2559
0206e353 2560 for (i = 0; i < 4; i++) {
5eddb70b
CW
2561 reg = FDI_TX_CTL(pipe);
2562 temp = I915_READ(reg);
8db9d77b
ZW
2563 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2564 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2565 I915_WRITE(reg, temp);
2566
2567 POSTING_READ(reg);
8db9d77b
ZW
2568 udelay(500);
2569
fa37d39e
SP
2570 for (retry = 0; retry < 5; retry++) {
2571 reg = FDI_RX_IIR(pipe);
2572 temp = I915_READ(reg);
2573 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2574 if (temp & FDI_RX_SYMBOL_LOCK) {
2575 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2576 DRM_DEBUG_KMS("FDI train 2 done.\n");
2577 break;
2578 }
2579 udelay(50);
8db9d77b 2580 }
fa37d39e
SP
2581 if (retry < 5)
2582 break;
8db9d77b
ZW
2583 }
2584 if (i == 4)
5eddb70b 2585 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2586
2587 DRM_DEBUG_KMS("FDI train done.\n");
2588}
2589
357555c0
JB
2590/* Manual link training for Ivy Bridge A0 parts */
2591static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2592{
2593 struct drm_device *dev = crtc->dev;
2594 struct drm_i915_private *dev_priv = dev->dev_private;
2595 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2596 int pipe = intel_crtc->pipe;
2597 u32 reg, temp, i;
2598
2599 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2600 for train result */
2601 reg = FDI_RX_IMR(pipe);
2602 temp = I915_READ(reg);
2603 temp &= ~FDI_RX_SYMBOL_LOCK;
2604 temp &= ~FDI_RX_BIT_LOCK;
2605 I915_WRITE(reg, temp);
2606
2607 POSTING_READ(reg);
2608 udelay(150);
2609
01a415fd
DV
2610 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2611 I915_READ(FDI_RX_IIR(pipe)));
2612
357555c0
JB
2613 /* enable CPU FDI TX and PCH FDI RX */
2614 reg = FDI_TX_CTL(pipe);
2615 temp = I915_READ(reg);
2616 temp &= ~(7 << 19);
2617 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2618 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2619 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2620 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2621 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
c4f9c4c2 2622 temp |= FDI_COMPOSITE_SYNC;
357555c0
JB
2623 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2624
d74cf324
DV
2625 I915_WRITE(FDI_RX_MISC(pipe),
2626 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2627
357555c0
JB
2628 reg = FDI_RX_CTL(pipe);
2629 temp = I915_READ(reg);
2630 temp &= ~FDI_LINK_TRAIN_AUTO;
2631 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2632 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
c4f9c4c2 2633 temp |= FDI_COMPOSITE_SYNC;
357555c0
JB
2634 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2635
2636 POSTING_READ(reg);
2637 udelay(150);
2638
8f5718a6 2639 cpt_phase_pointer_enable(dev, pipe);
291427f5 2640
0206e353 2641 for (i = 0; i < 4; i++) {
357555c0
JB
2642 reg = FDI_TX_CTL(pipe);
2643 temp = I915_READ(reg);
2644 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2645 temp |= snb_b_fdi_train_param[i];
2646 I915_WRITE(reg, temp);
2647
2648 POSTING_READ(reg);
2649 udelay(500);
2650
2651 reg = FDI_RX_IIR(pipe);
2652 temp = I915_READ(reg);
2653 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2654
2655 if (temp & FDI_RX_BIT_LOCK ||
2656 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2657 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
01a415fd 2658 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
357555c0
JB
2659 break;
2660 }
2661 }
2662 if (i == 4)
2663 DRM_ERROR("FDI train 1 fail!\n");
2664
2665 /* Train 2 */
2666 reg = FDI_TX_CTL(pipe);
2667 temp = I915_READ(reg);
2668 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2669 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2670 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2671 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2672 I915_WRITE(reg, temp);
2673
2674 reg = FDI_RX_CTL(pipe);
2675 temp = I915_READ(reg);
2676 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2677 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2678 I915_WRITE(reg, temp);
2679
2680 POSTING_READ(reg);
2681 udelay(150);
2682
0206e353 2683 for (i = 0; i < 4; i++) {
357555c0
JB
2684 reg = FDI_TX_CTL(pipe);
2685 temp = I915_READ(reg);
2686 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2687 temp |= snb_b_fdi_train_param[i];
2688 I915_WRITE(reg, temp);
2689
2690 POSTING_READ(reg);
2691 udelay(500);
2692
2693 reg = FDI_RX_IIR(pipe);
2694 temp = I915_READ(reg);
2695 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2696
2697 if (temp & FDI_RX_SYMBOL_LOCK) {
2698 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
01a415fd 2699 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
357555c0
JB
2700 break;
2701 }
2702 }
2703 if (i == 4)
2704 DRM_ERROR("FDI train 2 fail!\n");
2705
2706 DRM_DEBUG_KMS("FDI train done.\n");
2707}
2708
88cefb6c 2709static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2c07245f 2710{
88cefb6c 2711 struct drm_device *dev = intel_crtc->base.dev;
2c07245f 2712 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 2713 int pipe = intel_crtc->pipe;
5eddb70b 2714 u32 reg, temp;
79e53945 2715
c64e311e 2716
c98e9dcf 2717 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
2718 reg = FDI_RX_CTL(pipe);
2719 temp = I915_READ(reg);
2720 temp &= ~((0x7 << 19) | (0x7 << 16));
c98e9dcf 2721 temp |= (intel_crtc->fdi_lanes - 1) << 19;
5eddb70b
CW
2722 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2723 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2724
2725 POSTING_READ(reg);
c98e9dcf
JB
2726 udelay(200);
2727
2728 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
2729 temp = I915_READ(reg);
2730 I915_WRITE(reg, temp | FDI_PCDCLK);
2731
2732 POSTING_READ(reg);
c98e9dcf
JB
2733 udelay(200);
2734
20749730
PZ
2735 /* Enable CPU FDI TX PLL, always on for Ironlake */
2736 reg = FDI_TX_CTL(pipe);
2737 temp = I915_READ(reg);
2738 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2739 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 2740
20749730
PZ
2741 POSTING_READ(reg);
2742 udelay(100);
6be4a607 2743 }
0e23b99d
JB
2744}
2745
88cefb6c
DV
2746static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2747{
2748 struct drm_device *dev = intel_crtc->base.dev;
2749 struct drm_i915_private *dev_priv = dev->dev_private;
2750 int pipe = intel_crtc->pipe;
2751 u32 reg, temp;
2752
2753 /* Switch from PCDclk to Rawclk */
2754 reg = FDI_RX_CTL(pipe);
2755 temp = I915_READ(reg);
2756 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2757
2758 /* Disable CPU FDI TX PLL */
2759 reg = FDI_TX_CTL(pipe);
2760 temp = I915_READ(reg);
2761 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2762
2763 POSTING_READ(reg);
2764 udelay(100);
2765
2766 reg = FDI_RX_CTL(pipe);
2767 temp = I915_READ(reg);
2768 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2769
2770 /* Wait for the clocks to turn off. */
2771 POSTING_READ(reg);
2772 udelay(100);
2773}
2774
291427f5
JB
2775static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2776{
2777 struct drm_i915_private *dev_priv = dev->dev_private;
2778 u32 flags = I915_READ(SOUTH_CHICKEN1);
2779
2780 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2781 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2782 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2783 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2784 POSTING_READ(SOUTH_CHICKEN1);
2785}
0fc932b8
JB
2786static void ironlake_fdi_disable(struct drm_crtc *crtc)
2787{
2788 struct drm_device *dev = crtc->dev;
2789 struct drm_i915_private *dev_priv = dev->dev_private;
2790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2791 int pipe = intel_crtc->pipe;
2792 u32 reg, temp;
2793
2794 /* disable CPU FDI tx and PCH FDI rx */
2795 reg = FDI_TX_CTL(pipe);
2796 temp = I915_READ(reg);
2797 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2798 POSTING_READ(reg);
2799
2800 reg = FDI_RX_CTL(pipe);
2801 temp = I915_READ(reg);
2802 temp &= ~(0x7 << 16);
2803 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2804 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2805
2806 POSTING_READ(reg);
2807 udelay(100);
2808
2809 /* Ironlake workaround, disable clock pointer after downing FDI */
6f06ce18
JB
2810 if (HAS_PCH_IBX(dev)) {
2811 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
291427f5
JB
2812 } else if (HAS_PCH_CPT(dev)) {
2813 cpt_phase_pointer_disable(dev, pipe);
6f06ce18 2814 }
0fc932b8
JB
2815
2816 /* still set train pattern 1 */
2817 reg = FDI_TX_CTL(pipe);
2818 temp = I915_READ(reg);
2819 temp &= ~FDI_LINK_TRAIN_NONE;
2820 temp |= FDI_LINK_TRAIN_PATTERN_1;
2821 I915_WRITE(reg, temp);
2822
2823 reg = FDI_RX_CTL(pipe);
2824 temp = I915_READ(reg);
2825 if (HAS_PCH_CPT(dev)) {
2826 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2827 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2828 } else {
2829 temp &= ~FDI_LINK_TRAIN_NONE;
2830 temp |= FDI_LINK_TRAIN_PATTERN_1;
2831 }
2832 /* BPC in FDI rx is consistent with that in PIPECONF */
2833 temp &= ~(0x07 << 16);
2834 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2835 I915_WRITE(reg, temp);
2836
2837 POSTING_READ(reg);
2838 udelay(100);
2839}
2840
5bb61643
CW
2841static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2842{
2843 struct drm_device *dev = crtc->dev;
2844 struct drm_i915_private *dev_priv = dev->dev_private;
2845 unsigned long flags;
2846 bool pending;
2847
2848 if (atomic_read(&dev_priv->mm.wedged))
2849 return false;
2850
2851 spin_lock_irqsave(&dev->event_lock, flags);
2852 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2853 spin_unlock_irqrestore(&dev->event_lock, flags);
2854
2855 return pending;
2856}
2857
e6c3a2a6
CW
2858static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2859{
0f91128d 2860 struct drm_device *dev = crtc->dev;
5bb61643 2861 struct drm_i915_private *dev_priv = dev->dev_private;
e6c3a2a6
CW
2862
2863 if (crtc->fb == NULL)
2864 return;
2865
5bb61643
CW
2866 wait_event(dev_priv->pending_flip_queue,
2867 !intel_crtc_has_pending_flip(crtc));
2868
0f91128d
CW
2869 mutex_lock(&dev->struct_mutex);
2870 intel_finish_fb(crtc->fb);
2871 mutex_unlock(&dev->struct_mutex);
e6c3a2a6
CW
2872}
2873
fc316cbe 2874static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
040484af
JB
2875{
2876 struct drm_device *dev = crtc->dev;
228d3e36 2877 struct intel_encoder *intel_encoder;
040484af
JB
2878
2879 /*
2880 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2881 * must be driven by its own crtc; no sharing is possible.
2882 */
228d3e36 2883 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
228d3e36 2884 switch (intel_encoder->type) {
040484af 2885 case INTEL_OUTPUT_EDP:
228d3e36 2886 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
040484af
JB
2887 return false;
2888 continue;
2889 }
2890 }
2891
2892 return true;
2893}
2894
fc316cbe
PZ
2895static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2896{
2897 return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2898}
2899
e615efe4
ED
2900/* Program iCLKIP clock to the desired frequency */
2901static void lpt_program_iclkip(struct drm_crtc *crtc)
2902{
2903 struct drm_device *dev = crtc->dev;
2904 struct drm_i915_private *dev_priv = dev->dev_private;
2905 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2906 u32 temp;
2907
09153000
DV
2908 mutex_lock(&dev_priv->dpio_lock);
2909
e615efe4
ED
2910 /* It is necessary to ungate the pixclk gate prior to programming
2911 * the divisors, and gate it back when it is done.
2912 */
2913 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2914
2915 /* Disable SSCCTL */
2916 intel_sbi_write(dev_priv, SBI_SSCCTL6,
2917 intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2918 SBI_SSCCTL_DISABLE);
2919
2920 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2921 if (crtc->mode.clock == 20000) {
2922 auxdiv = 1;
2923 divsel = 0x41;
2924 phaseinc = 0x20;
2925 } else {
2926 /* The iCLK virtual clock root frequency is in MHz,
2927 * but the crtc->mode.clock in in KHz. To get the divisors,
2928 * it is necessary to divide one by another, so we
2929 * convert the virtual clock precision to KHz here for higher
2930 * precision.
2931 */
2932 u32 iclk_virtual_root_freq = 172800 * 1000;
2933 u32 iclk_pi_range = 64;
2934 u32 desired_divisor, msb_divisor_value, pi_value;
2935
2936 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2937 msb_divisor_value = desired_divisor / iclk_pi_range;
2938 pi_value = desired_divisor % iclk_pi_range;
2939
2940 auxdiv = 0;
2941 divsel = msb_divisor_value - 2;
2942 phaseinc = pi_value;
2943 }
2944
2945 /* This should not happen with any sane values */
2946 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2947 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2948 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2949 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2950
2951 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2952 crtc->mode.clock,
2953 auxdiv,
2954 divsel,
2955 phasedir,
2956 phaseinc);
2957
2958 /* Program SSCDIVINTPHASE6 */
2959 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2960 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2961 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2962 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2963 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2964 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2965 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2966
2967 intel_sbi_write(dev_priv,
2968 SBI_SSCDIVINTPHASE6,
2969 temp);
2970
2971 /* Program SSCAUXDIV */
2972 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2973 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2974 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2975 intel_sbi_write(dev_priv,
2976 SBI_SSCAUXDIV6,
2977 temp);
2978
2979
2980 /* Enable modulator and associated divider */
2981 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2982 temp &= ~SBI_SSCCTL_DISABLE;
2983 intel_sbi_write(dev_priv,
2984 SBI_SSCCTL6,
2985 temp);
2986
2987 /* Wait for initialization time */
2988 udelay(24);
2989
2990 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
09153000
DV
2991
2992 mutex_unlock(&dev_priv->dpio_lock);
e615efe4
ED
2993}
2994
f67a559d
JB
2995/*
2996 * Enable PCH resources required for PCH ports:
2997 * - PCH PLLs
2998 * - FDI training & RX/TX
2999 * - update transcoder timings
3000 * - DP transcoding bits
3001 * - transcoder
3002 */
3003static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
3004{
3005 struct drm_device *dev = crtc->dev;
3006 struct drm_i915_private *dev_priv = dev->dev_private;
3007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3008 int pipe = intel_crtc->pipe;
ee7b9f93 3009 u32 reg, temp;
2c07245f 3010
e7e164db
CW
3011 assert_transcoder_disabled(dev_priv, pipe);
3012
cd986abb
DV
3013 /* Write the TU size bits before fdi link training, so that error
3014 * detection works. */
3015 I915_WRITE(FDI_RX_TUSIZE1(pipe),
3016 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3017
c98e9dcf 3018 /* For PCH output, training FDI link */
674cf967 3019 dev_priv->display.fdi_link_train(crtc);
2c07245f 3020
572deb37
DV
3021 /* XXX: pch pll's can be enabled any time before we enable the PCH
3022 * transcoder, and we actually should do this to not upset any PCH
3023 * transcoder that already use the clock when we share it.
3024 *
3025 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
3026 * unconditionally resets the pll - we need that to have the right LVDS
3027 * enable sequence. */
b6b4e185 3028 ironlake_enable_pch_pll(intel_crtc);
6f13b7b5 3029
303b81e0 3030 if (HAS_PCH_CPT(dev)) {
ee7b9f93 3031 u32 sel;
4b645f14 3032
c98e9dcf 3033 temp = I915_READ(PCH_DPLL_SEL);
ee7b9f93
JB
3034 switch (pipe) {
3035 default:
3036 case 0:
3037 temp |= TRANSA_DPLL_ENABLE;
3038 sel = TRANSA_DPLLB_SEL;
3039 break;
3040 case 1:
3041 temp |= TRANSB_DPLL_ENABLE;
3042 sel = TRANSB_DPLLB_SEL;
3043 break;
3044 case 2:
3045 temp |= TRANSC_DPLL_ENABLE;
3046 sel = TRANSC_DPLLB_SEL;
3047 break;
d64311ab 3048 }
ee7b9f93
JB
3049 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
3050 temp |= sel;
3051 else
3052 temp &= ~sel;
c98e9dcf 3053 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 3054 }
5eddb70b 3055
d9b6cb56
JB
3056 /* set transcoder timing, panel must allow it */
3057 assert_panel_unlocked(dev_priv, pipe);
5eddb70b
CW
3058 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3059 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3060 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
8db9d77b 3061
5eddb70b
CW
3062 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3063 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3064 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
0529a0d9 3065 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
8db9d77b 3066
303b81e0 3067 intel_fdi_normal_train(crtc);
5e84e1a4 3068
c98e9dcf
JB
3069 /* For PCH DP, enable TRANS_DP_CTL */
3070 if (HAS_PCH_CPT(dev) &&
417e822d
KP
3071 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3072 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
9325c9f0 3073 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
5eddb70b
CW
3074 reg = TRANS_DP_CTL(pipe);
3075 temp = I915_READ(reg);
3076 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
3077 TRANS_DP_SYNC_MASK |
3078 TRANS_DP_BPC_MASK);
5eddb70b
CW
3079 temp |= (TRANS_DP_OUTPUT_ENABLE |
3080 TRANS_DP_ENH_FRAMING);
9325c9f0 3081 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf
JB
3082
3083 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 3084 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
c98e9dcf 3085 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 3086 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
3087
3088 switch (intel_trans_dp_port_sel(crtc)) {
3089 case PCH_DP_B:
5eddb70b 3090 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf
JB
3091 break;
3092 case PCH_DP_C:
5eddb70b 3093 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf
JB
3094 break;
3095 case PCH_DP_D:
5eddb70b 3096 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
3097 break;
3098 default:
e95d41e1 3099 BUG();
32f9d658 3100 }
2c07245f 3101
5eddb70b 3102 I915_WRITE(reg, temp);
6be4a607 3103 }
b52eb4dc 3104
b8a4f404 3105 ironlake_enable_pch_transcoder(dev_priv, pipe);
f67a559d
JB
3106}
3107
1507e5bd
PZ
3108static void lpt_pch_enable(struct drm_crtc *crtc)
3109{
3110 struct drm_device *dev = crtc->dev;
3111 struct drm_i915_private *dev_priv = dev->dev_private;
3112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
daed2dbb 3113 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
1507e5bd 3114
daed2dbb 3115 assert_transcoder_disabled(dev_priv, TRANSCODER_A);
1507e5bd 3116
8c52b5e8 3117 lpt_program_iclkip(crtc);
1507e5bd 3118
0540e488 3119 /* Set transcoder timing. */
daed2dbb
PZ
3120 I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
3121 I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
3122 I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
1507e5bd 3123
daed2dbb
PZ
3124 I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
3125 I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
3126 I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
3127 I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
1507e5bd 3128
937bb610 3129 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
f67a559d
JB
3130}
3131
ee7b9f93
JB
3132static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
3133{
3134 struct intel_pch_pll *pll = intel_crtc->pch_pll;
3135
3136 if (pll == NULL)
3137 return;
3138
3139 if (pll->refcount == 0) {
3140 WARN(1, "bad PCH PLL refcount\n");
3141 return;
3142 }
3143
3144 --pll->refcount;
3145 intel_crtc->pch_pll = NULL;
3146}
3147
3148static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
3149{
3150 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
3151 struct intel_pch_pll *pll;
3152 int i;
3153
3154 pll = intel_crtc->pch_pll;
3155 if (pll) {
3156 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3157 intel_crtc->base.base.id, pll->pll_reg);
3158 goto prepare;
3159 }
3160
98b6bd99
DV
3161 if (HAS_PCH_IBX(dev_priv->dev)) {
3162 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3163 i = intel_crtc->pipe;
3164 pll = &dev_priv->pch_plls[i];
3165
3166 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3167 intel_crtc->base.base.id, pll->pll_reg);
3168
3169 goto found;
3170 }
3171
ee7b9f93
JB
3172 for (i = 0; i < dev_priv->num_pch_pll; i++) {
3173 pll = &dev_priv->pch_plls[i];
3174
3175 /* Only want to check enabled timings first */
3176 if (pll->refcount == 0)
3177 continue;
3178
3179 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
3180 fp == I915_READ(pll->fp0_reg)) {
3181 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3182 intel_crtc->base.base.id,
3183 pll->pll_reg, pll->refcount, pll->active);
3184
3185 goto found;
3186 }
3187 }
3188
3189 /* Ok no matching timings, maybe there's a free one? */
3190 for (i = 0; i < dev_priv->num_pch_pll; i++) {
3191 pll = &dev_priv->pch_plls[i];
3192 if (pll->refcount == 0) {
3193 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3194 intel_crtc->base.base.id, pll->pll_reg);
3195 goto found;
3196 }
3197 }
3198
3199 return NULL;
3200
3201found:
3202 intel_crtc->pch_pll = pll;
3203 pll->refcount++;
3204 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
3205prepare: /* separate function? */
3206 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
ee7b9f93 3207
e04c7350
CW
3208 /* Wait for the clocks to stabilize before rewriting the regs */
3209 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
ee7b9f93
JB
3210 POSTING_READ(pll->pll_reg);
3211 udelay(150);
e04c7350
CW
3212
3213 I915_WRITE(pll->fp0_reg, fp);
3214 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
ee7b9f93
JB
3215 pll->on = false;
3216 return pll;
3217}
3218
d4270e57
JB
3219void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3220{
3221 struct drm_i915_private *dev_priv = dev->dev_private;
23670b32 3222 int dslreg = PIPEDSL(pipe);
d4270e57
JB
3223 u32 temp;
3224
3225 temp = I915_READ(dslreg);
3226 udelay(500);
3227 if (wait_for(I915_READ(dslreg) != temp, 5)) {
d4270e57
JB
3228 if (wait_for(I915_READ(dslreg) != temp, 5))
3229 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3230 }
3231}
3232
f67a559d
JB
3233static void ironlake_crtc_enable(struct drm_crtc *crtc)
3234{
3235 struct drm_device *dev = crtc->dev;
3236 struct drm_i915_private *dev_priv = dev->dev_private;
3237 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3238 struct intel_encoder *encoder;
f67a559d
JB
3239 int pipe = intel_crtc->pipe;
3240 int plane = intel_crtc->plane;
3241 u32 temp;
3242 bool is_pch_port;
3243
08a48469
DV
3244 WARN_ON(!crtc->enabled);
3245
f67a559d
JB
3246 if (intel_crtc->active)
3247 return;
3248
3249 intel_crtc->active = true;
3250 intel_update_watermarks(dev);
3251
3252 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3253 temp = I915_READ(PCH_LVDS);
3254 if ((temp & LVDS_PORT_EN) == 0)
3255 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3256 }
3257
fc316cbe 3258 is_pch_port = ironlake_crtc_driving_pch(crtc);
f67a559d 3259
46b6f814 3260 if (is_pch_port) {
fff367c7
DV
3261 /* Note: FDI PLL enabling _must_ be done before we enable the
3262 * cpu pipes, hence this is separate from all the other fdi/pch
3263 * enabling. */
88cefb6c 3264 ironlake_fdi_pll_enable(intel_crtc);
46b6f814
DV
3265 } else {
3266 assert_fdi_tx_disabled(dev_priv, pipe);
3267 assert_fdi_rx_disabled(dev_priv, pipe);
3268 }
f67a559d 3269
bf49ec8c
DV
3270 for_each_encoder_on_crtc(dev, crtc, encoder)
3271 if (encoder->pre_enable)
3272 encoder->pre_enable(encoder);
f67a559d
JB
3273
3274 /* Enable panel fitting for LVDS */
3275 if (dev_priv->pch_pf_size &&
547dc041
JN
3276 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
3277 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
f67a559d
JB
3278 /* Force use of hard-coded filter coefficients
3279 * as some pre-programmed values are broken,
3280 * e.g. x201.
3281 */
13888d78
PZ
3282 if (IS_IVYBRIDGE(dev))
3283 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3284 PF_PIPE_SEL_IVB(pipe));
3285 else
3286 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
9db4a9c7
JB
3287 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3288 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
f67a559d
JB
3289 }
3290
9c54c0dd
JB
3291 /*
3292 * On ILK+ LUT must be loaded before the pipe is running but with
3293 * clocks enabled
3294 */
3295 intel_crtc_load_lut(crtc);
3296
f67a559d
JB
3297 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3298 intel_enable_plane(dev_priv, plane, pipe);
3299
3300 if (is_pch_port)
3301 ironlake_pch_enable(crtc);
c98e9dcf 3302
d1ebd816 3303 mutex_lock(&dev->struct_mutex);
bed4a673 3304 intel_update_fbc(dev);
d1ebd816
BW
3305 mutex_unlock(&dev->struct_mutex);
3306
6b383a7f 3307 intel_crtc_update_cursor(crtc, true);
ef9c3aee 3308
fa5c73b1
DV
3309 for_each_encoder_on_crtc(dev, crtc, encoder)
3310 encoder->enable(encoder);
61b77ddd
DV
3311
3312 if (HAS_PCH_CPT(dev))
3313 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
6ce94100
DV
3314
3315 /*
3316 * There seems to be a race in PCH platform hw (at least on some
3317 * outputs) where an enabled pipe still completes any pageflip right
3318 * away (as if the pipe is off) instead of waiting for vblank. As soon
3319 * as the first vblank happend, everything works as expected. Hence just
3320 * wait for one vblank before returning to avoid strange things
3321 * happening.
3322 */
3323 intel_wait_for_vblank(dev, intel_crtc->pipe);
6be4a607
JB
3324}
3325
4f771f10
PZ
3326static void haswell_crtc_enable(struct drm_crtc *crtc)
3327{
3328 struct drm_device *dev = crtc->dev;
3329 struct drm_i915_private *dev_priv = dev->dev_private;
3330 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3331 struct intel_encoder *encoder;
3332 int pipe = intel_crtc->pipe;
3333 int plane = intel_crtc->plane;
4f771f10
PZ
3334 bool is_pch_port;
3335
3336 WARN_ON(!crtc->enabled);
3337
3338 if (intel_crtc->active)
3339 return;
3340
3341 intel_crtc->active = true;
3342 intel_update_watermarks(dev);
3343
fc316cbe 3344 is_pch_port = haswell_crtc_driving_pch(crtc);
4f771f10 3345
83616634 3346 if (is_pch_port)
04945641 3347 dev_priv->display.fdi_link_train(crtc);
4f771f10
PZ
3348
3349 for_each_encoder_on_crtc(dev, crtc, encoder)
3350 if (encoder->pre_enable)
3351 encoder->pre_enable(encoder);
3352
1f544388 3353 intel_ddi_enable_pipe_clock(intel_crtc);
4f771f10 3354
1f544388 3355 /* Enable panel fitting for eDP */
547dc041
JN
3356 if (dev_priv->pch_pf_size &&
3357 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4f771f10
PZ
3358 /* Force use of hard-coded filter coefficients
3359 * as some pre-programmed values are broken,
3360 * e.g. x201.
3361 */
54075a7d
PZ
3362 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3363 PF_PIPE_SEL_IVB(pipe));
4f771f10
PZ
3364 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3365 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3366 }
3367
3368 /*
3369 * On ILK+ LUT must be loaded before the pipe is running but with
3370 * clocks enabled
3371 */
3372 intel_crtc_load_lut(crtc);
3373
1f544388
PZ
3374 intel_ddi_set_pipe_settings(crtc);
3375 intel_ddi_enable_pipe_func(crtc);
4f771f10
PZ
3376
3377 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3378 intel_enable_plane(dev_priv, plane, pipe);
3379
3380 if (is_pch_port)
1507e5bd 3381 lpt_pch_enable(crtc);
4f771f10
PZ
3382
3383 mutex_lock(&dev->struct_mutex);
3384 intel_update_fbc(dev);
3385 mutex_unlock(&dev->struct_mutex);
3386
3387 intel_crtc_update_cursor(crtc, true);
3388
3389 for_each_encoder_on_crtc(dev, crtc, encoder)
3390 encoder->enable(encoder);
3391
4f771f10
PZ
3392 /*
3393 * There seems to be a race in PCH platform hw (at least on some
3394 * outputs) where an enabled pipe still completes any pageflip right
3395 * away (as if the pipe is off) instead of waiting for vblank. As soon
3396 * as the first vblank happend, everything works as expected. Hence just
3397 * wait for one vblank before returning to avoid strange things
3398 * happening.
3399 */
3400 intel_wait_for_vblank(dev, intel_crtc->pipe);
3401}
3402
6be4a607
JB
3403static void ironlake_crtc_disable(struct drm_crtc *crtc)
3404{
3405 struct drm_device *dev = crtc->dev;
3406 struct drm_i915_private *dev_priv = dev->dev_private;
3407 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3408 struct intel_encoder *encoder;
6be4a607
JB
3409 int pipe = intel_crtc->pipe;
3410 int plane = intel_crtc->plane;
5eddb70b 3411 u32 reg, temp;
b52eb4dc 3412
ef9c3aee 3413
f7abfe8b
CW
3414 if (!intel_crtc->active)
3415 return;
3416
ea9d758d
DV
3417 for_each_encoder_on_crtc(dev, crtc, encoder)
3418 encoder->disable(encoder);
3419
e6c3a2a6 3420 intel_crtc_wait_for_pending_flips(crtc);
6be4a607 3421 drm_vblank_off(dev, pipe);
6b383a7f 3422 intel_crtc_update_cursor(crtc, false);
5eddb70b 3423
b24e7179 3424 intel_disable_plane(dev_priv, plane, pipe);
913d8d11 3425
973d04f9
CW
3426 if (dev_priv->cfb_plane == plane)
3427 intel_disable_fbc(dev);
2c07245f 3428
b24e7179 3429 intel_disable_pipe(dev_priv, pipe);
32f9d658 3430
6be4a607 3431 /* Disable PF */
9db4a9c7
JB
3432 I915_WRITE(PF_CTL(pipe), 0);
3433 I915_WRITE(PF_WIN_SZ(pipe), 0);
2c07245f 3434
bf49ec8c
DV
3435 for_each_encoder_on_crtc(dev, crtc, encoder)
3436 if (encoder->post_disable)
3437 encoder->post_disable(encoder);
2c07245f 3438
0fc932b8 3439 ironlake_fdi_disable(crtc);
249c0e64 3440
b8a4f404 3441 ironlake_disable_pch_transcoder(dev_priv, pipe);
913d8d11 3442
6be4a607
JB
3443 if (HAS_PCH_CPT(dev)) {
3444 /* disable TRANS_DP_CTL */
5eddb70b
CW
3445 reg = TRANS_DP_CTL(pipe);
3446 temp = I915_READ(reg);
3447 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
cb3543c6 3448 temp |= TRANS_DP_PORT_SEL_NONE;
5eddb70b 3449 I915_WRITE(reg, temp);
6be4a607
JB
3450
3451 /* disable DPLL_SEL */
3452 temp = I915_READ(PCH_DPLL_SEL);
9db4a9c7
JB
3453 switch (pipe) {
3454 case 0:
d64311ab 3455 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
9db4a9c7
JB
3456 break;
3457 case 1:
6be4a607 3458 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
9db4a9c7
JB
3459 break;
3460 case 2:
4b645f14 3461 /* C shares PLL A or B */
d64311ab 3462 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
9db4a9c7
JB
3463 break;
3464 default:
3465 BUG(); /* wtf */
3466 }
6be4a607 3467 I915_WRITE(PCH_DPLL_SEL, temp);
6be4a607 3468 }
e3421a18 3469
6be4a607 3470 /* disable PCH DPLL */
ee7b9f93 3471 intel_disable_pch_pll(intel_crtc);
8db9d77b 3472
88cefb6c 3473 ironlake_fdi_pll_disable(intel_crtc);
6b383a7f 3474
f7abfe8b 3475 intel_crtc->active = false;
6b383a7f 3476 intel_update_watermarks(dev);
d1ebd816
BW
3477
3478 mutex_lock(&dev->struct_mutex);
6b383a7f 3479 intel_update_fbc(dev);
d1ebd816 3480 mutex_unlock(&dev->struct_mutex);
6be4a607 3481}
1b3c7a47 3482
4f771f10 3483static void haswell_crtc_disable(struct drm_crtc *crtc)
ee7b9f93 3484{
4f771f10
PZ
3485 struct drm_device *dev = crtc->dev;
3486 struct drm_i915_private *dev_priv = dev->dev_private;
ee7b9f93 3487 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4f771f10
PZ
3488 struct intel_encoder *encoder;
3489 int pipe = intel_crtc->pipe;
3490 int plane = intel_crtc->plane;
ad80a810 3491 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
83616634 3492 bool is_pch_port;
ee7b9f93 3493
4f771f10
PZ
3494 if (!intel_crtc->active)
3495 return;
3496
83616634
PZ
3497 is_pch_port = haswell_crtc_driving_pch(crtc);
3498
4f771f10
PZ
3499 for_each_encoder_on_crtc(dev, crtc, encoder)
3500 encoder->disable(encoder);
3501
3502 intel_crtc_wait_for_pending_flips(crtc);
3503 drm_vblank_off(dev, pipe);
3504 intel_crtc_update_cursor(crtc, false);
3505
3506 intel_disable_plane(dev_priv, plane, pipe);
3507
3508 if (dev_priv->cfb_plane == plane)
3509 intel_disable_fbc(dev);
3510
3511 intel_disable_pipe(dev_priv, pipe);
3512
ad80a810 3513 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4f771f10
PZ
3514
3515 /* Disable PF */
3516 I915_WRITE(PF_CTL(pipe), 0);
3517 I915_WRITE(PF_WIN_SZ(pipe), 0);
3518
1f544388 3519 intel_ddi_disable_pipe_clock(intel_crtc);
4f771f10
PZ
3520
3521 for_each_encoder_on_crtc(dev, crtc, encoder)
3522 if (encoder->post_disable)
3523 encoder->post_disable(encoder);
3524
83616634 3525 if (is_pch_port) {
ab4d966c 3526 lpt_disable_pch_transcoder(dev_priv);
1ad960f2 3527 intel_ddi_fdi_disable(crtc);
83616634 3528 }
4f771f10
PZ
3529
3530 intel_crtc->active = false;
3531 intel_update_watermarks(dev);
3532
3533 mutex_lock(&dev->struct_mutex);
3534 intel_update_fbc(dev);
3535 mutex_unlock(&dev->struct_mutex);
3536}
3537
ee7b9f93
JB
3538static void ironlake_crtc_off(struct drm_crtc *crtc)
3539{
3540 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3541 intel_put_pch_pll(intel_crtc);
3542}
3543
6441ab5f
PZ
3544static void haswell_crtc_off(struct drm_crtc *crtc)
3545{
a5c961d1
PZ
3546 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3547
3548 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3549 * start using it. */
1a240d4d 3550 intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
a5c961d1 3551
6441ab5f
PZ
3552 intel_ddi_put_crtc_pll(crtc);
3553}
3554
02e792fb
DV
3555static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3556{
02e792fb 3557 if (!enable && intel_crtc->overlay) {
23f09ce3 3558 struct drm_device *dev = intel_crtc->base.dev;
ce453d81 3559 struct drm_i915_private *dev_priv = dev->dev_private;
03f77ea5 3560
23f09ce3 3561 mutex_lock(&dev->struct_mutex);
ce453d81
CW
3562 dev_priv->mm.interruptible = false;
3563 (void) intel_overlay_switch_off(intel_crtc->overlay);
3564 dev_priv->mm.interruptible = true;
23f09ce3 3565 mutex_unlock(&dev->struct_mutex);
02e792fb 3566 }
02e792fb 3567
5dcdbcb0
CW
3568 /* Let userspace switch the overlay on again. In most cases userspace
3569 * has to recompute where to put it anyway.
3570 */
02e792fb
DV
3571}
3572
0b8765c6 3573static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
3574{
3575 struct drm_device *dev = crtc->dev;
79e53945
JB
3576 struct drm_i915_private *dev_priv = dev->dev_private;
3577 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3578 struct intel_encoder *encoder;
79e53945 3579 int pipe = intel_crtc->pipe;
80824003 3580 int plane = intel_crtc->plane;
79e53945 3581
08a48469
DV
3582 WARN_ON(!crtc->enabled);
3583
f7abfe8b
CW
3584 if (intel_crtc->active)
3585 return;
3586
3587 intel_crtc->active = true;
6b383a7f
CW
3588 intel_update_watermarks(dev);
3589
63d7bbe9 3590 intel_enable_pll(dev_priv, pipe);
040484af 3591 intel_enable_pipe(dev_priv, pipe, false);
b24e7179 3592 intel_enable_plane(dev_priv, plane, pipe);
79e53945 3593
0b8765c6 3594 intel_crtc_load_lut(crtc);
bed4a673 3595 intel_update_fbc(dev);
79e53945 3596
0b8765c6
JB
3597 /* Give the overlay scaler a chance to enable if it's on this pipe */
3598 intel_crtc_dpms_overlay(intel_crtc, true);
6b383a7f 3599 intel_crtc_update_cursor(crtc, true);
ef9c3aee 3600
fa5c73b1
DV
3601 for_each_encoder_on_crtc(dev, crtc, encoder)
3602 encoder->enable(encoder);
0b8765c6 3603}
79e53945 3604
0b8765c6
JB
3605static void i9xx_crtc_disable(struct drm_crtc *crtc)
3606{
3607 struct drm_device *dev = crtc->dev;
3608 struct drm_i915_private *dev_priv = dev->dev_private;
3609 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3610 struct intel_encoder *encoder;
0b8765c6
JB
3611 int pipe = intel_crtc->pipe;
3612 int plane = intel_crtc->plane;
b690e96c 3613
ef9c3aee 3614
f7abfe8b
CW
3615 if (!intel_crtc->active)
3616 return;
3617
ea9d758d
DV
3618 for_each_encoder_on_crtc(dev, crtc, encoder)
3619 encoder->disable(encoder);
3620
0b8765c6 3621 /* Give the overlay scaler a chance to disable if it's on this pipe */
e6c3a2a6
CW
3622 intel_crtc_wait_for_pending_flips(crtc);
3623 drm_vblank_off(dev, pipe);
0b8765c6 3624 intel_crtc_dpms_overlay(intel_crtc, false);
6b383a7f 3625 intel_crtc_update_cursor(crtc, false);
0b8765c6 3626
973d04f9
CW
3627 if (dev_priv->cfb_plane == plane)
3628 intel_disable_fbc(dev);
79e53945 3629
b24e7179 3630 intel_disable_plane(dev_priv, plane, pipe);
b24e7179 3631 intel_disable_pipe(dev_priv, pipe);
63d7bbe9 3632 intel_disable_pll(dev_priv, pipe);
0b8765c6 3633
f7abfe8b 3634 intel_crtc->active = false;
6b383a7f
CW
3635 intel_update_fbc(dev);
3636 intel_update_watermarks(dev);
0b8765c6
JB
3637}
3638
ee7b9f93
JB
3639static void i9xx_crtc_off(struct drm_crtc *crtc)
3640{
3641}
3642
976f8a20
DV
3643static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3644 bool enabled)
2c07245f
ZW
3645{
3646 struct drm_device *dev = crtc->dev;
3647 struct drm_i915_master_private *master_priv;
3648 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3649 int pipe = intel_crtc->pipe;
79e53945
JB
3650
3651 if (!dev->primary->master)
3652 return;
3653
3654 master_priv = dev->primary->master->driver_priv;
3655 if (!master_priv->sarea_priv)
3656 return;
3657
79e53945
JB
3658 switch (pipe) {
3659 case 0:
3660 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3661 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3662 break;
3663 case 1:
3664 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3665 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3666 break;
3667 default:
9db4a9c7 3668 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
79e53945
JB
3669 break;
3670 }
79e53945
JB
3671}
3672
976f8a20
DV
3673/**
3674 * Sets the power management mode of the pipe and plane.
3675 */
3676void intel_crtc_update_dpms(struct drm_crtc *crtc)
3677{
3678 struct drm_device *dev = crtc->dev;
3679 struct drm_i915_private *dev_priv = dev->dev_private;
3680 struct intel_encoder *intel_encoder;
3681 bool enable = false;
3682
3683 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3684 enable |= intel_encoder->connectors_active;
3685
3686 if (enable)
3687 dev_priv->display.crtc_enable(crtc);
3688 else
3689 dev_priv->display.crtc_disable(crtc);
3690
3691 intel_crtc_update_sarea(crtc, enable);
3692}
3693
3694static void intel_crtc_noop(struct drm_crtc *crtc)
3695{
3696}
3697
cdd59983
CW
3698static void intel_crtc_disable(struct drm_crtc *crtc)
3699{
cdd59983 3700 struct drm_device *dev = crtc->dev;
976f8a20 3701 struct drm_connector *connector;
ee7b9f93 3702 struct drm_i915_private *dev_priv = dev->dev_private;
cdd59983 3703
976f8a20
DV
3704 /* crtc should still be enabled when we disable it. */
3705 WARN_ON(!crtc->enabled);
3706
3707 dev_priv->display.crtc_disable(crtc);
3708 intel_crtc_update_sarea(crtc, false);
ee7b9f93
JB
3709 dev_priv->display.off(crtc);
3710
931872fc
CW
3711 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3712 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
cdd59983
CW
3713
3714 if (crtc->fb) {
3715 mutex_lock(&dev->struct_mutex);
1690e1eb 3716 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
cdd59983 3717 mutex_unlock(&dev->struct_mutex);
976f8a20
DV
3718 crtc->fb = NULL;
3719 }
3720
3721 /* Update computed state. */
3722 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3723 if (!connector->encoder || !connector->encoder->crtc)
3724 continue;
3725
3726 if (connector->encoder->crtc != crtc)
3727 continue;
3728
3729 connector->dpms = DRM_MODE_DPMS_OFF;
3730 to_intel_encoder(connector->encoder)->connectors_active = false;
cdd59983
CW
3731 }
3732}
3733
a261b246 3734void intel_modeset_disable(struct drm_device *dev)
79e53945 3735{
a261b246
DV
3736 struct drm_crtc *crtc;
3737
3738 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3739 if (crtc->enabled)
3740 intel_crtc_disable(crtc);
3741 }
79e53945
JB
3742}
3743
1f703855 3744void intel_encoder_noop(struct drm_encoder *encoder)
79e53945 3745{
7e7d76c3
JB
3746}
3747
ea5b213a 3748void intel_encoder_destroy(struct drm_encoder *encoder)
7e7d76c3 3749{
4ef69c7a 3750 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 3751
ea5b213a
CW
3752 drm_encoder_cleanup(encoder);
3753 kfree(intel_encoder);
7e7d76c3
JB
3754}
3755
5ab432ef
DV
3756/* Simple dpms helper for encodres with just one connector, no cloning and only
3757 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3758 * state of the entire output pipe. */
3759void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
7e7d76c3 3760{
5ab432ef
DV
3761 if (mode == DRM_MODE_DPMS_ON) {
3762 encoder->connectors_active = true;
3763
b2cabb0e 3764 intel_crtc_update_dpms(encoder->base.crtc);
5ab432ef
DV
3765 } else {
3766 encoder->connectors_active = false;
3767
b2cabb0e 3768 intel_crtc_update_dpms(encoder->base.crtc);
5ab432ef 3769 }
79e53945
JB
3770}
3771
0a91ca29
DV
3772/* Cross check the actual hw state with our own modeset state tracking (and it's
3773 * internal consistency). */
b980514c 3774static void intel_connector_check_state(struct intel_connector *connector)
79e53945 3775{
0a91ca29
DV
3776 if (connector->get_hw_state(connector)) {
3777 struct intel_encoder *encoder = connector->encoder;
3778 struct drm_crtc *crtc;
3779 bool encoder_enabled;
3780 enum pipe pipe;
3781
3782 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3783 connector->base.base.id,
3784 drm_get_connector_name(&connector->base));
3785
3786 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3787 "wrong connector dpms state\n");
3788 WARN(connector->base.encoder != &encoder->base,
3789 "active connector not linked to encoder\n");
3790 WARN(!encoder->connectors_active,
3791 "encoder->connectors_active not set\n");
3792
3793 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3794 WARN(!encoder_enabled, "encoder not enabled\n");
3795 if (WARN_ON(!encoder->base.crtc))
3796 return;
3797
3798 crtc = encoder->base.crtc;
3799
3800 WARN(!crtc->enabled, "crtc not enabled\n");
3801 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
3802 WARN(pipe != to_intel_crtc(crtc)->pipe,
3803 "encoder active on the wrong pipe\n");
3804 }
79e53945
JB
3805}
3806
5ab432ef
DV
3807/* Even simpler default implementation, if there's really no special case to
3808 * consider. */
3809void intel_connector_dpms(struct drm_connector *connector, int mode)
79e53945 3810{
5ab432ef 3811 struct intel_encoder *encoder = intel_attached_encoder(connector);
d4270e57 3812
5ab432ef
DV
3813 /* All the simple cases only support two dpms states. */
3814 if (mode != DRM_MODE_DPMS_ON)
3815 mode = DRM_MODE_DPMS_OFF;
d4270e57 3816
5ab432ef
DV
3817 if (mode == connector->dpms)
3818 return;
3819
3820 connector->dpms = mode;
3821
3822 /* Only need to change hw state when actually enabled */
3823 if (encoder->base.crtc)
3824 intel_encoder_dpms(encoder, mode);
3825 else
8af6cf88 3826 WARN_ON(encoder->connectors_active != false);
0a91ca29 3827
b980514c 3828 intel_modeset_check_state(connector->dev);
79e53945
JB
3829}
3830
f0947c37
DV
3831/* Simple connector->get_hw_state implementation for encoders that support only
3832 * one connector and no cloning and hence the encoder state determines the state
3833 * of the connector. */
3834bool intel_connector_get_hw_state(struct intel_connector *connector)
ea5b213a 3835{
24929352 3836 enum pipe pipe = 0;
f0947c37 3837 struct intel_encoder *encoder = connector->encoder;
ea5b213a 3838
f0947c37 3839 return encoder->get_hw_state(encoder, &pipe);
ea5b213a
CW
3840}
3841
79e53945 3842static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
35313cde 3843 const struct drm_display_mode *mode,
79e53945
JB
3844 struct drm_display_mode *adjusted_mode)
3845{
2c07245f 3846 struct drm_device *dev = crtc->dev;
89749350 3847
bad720ff 3848 if (HAS_PCH_SPLIT(dev)) {
2c07245f 3849 /* FDI link clock is fixed at 2.7G */
2377b741
JB
3850 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3851 return false;
2c07245f 3852 }
89749350 3853
f9bef081
DV
3854 /* All interlaced capable intel hw wants timings in frames. Note though
3855 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3856 * timings, so we need to be careful not to clobber these.*/
3857 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3858 drm_mode_set_crtcinfo(adjusted_mode, 0);
89749350 3859
44f46b42
CW
3860 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
3861 * with a hsync front porch of 0.
3862 */
3863 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
3864 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
3865 return false;
3866
79e53945
JB
3867 return true;
3868}
3869
25eb05fc
JB
3870static int valleyview_get_display_clock_speed(struct drm_device *dev)
3871{
3872 return 400000; /* FIXME */
3873}
3874
e70236a8
JB
3875static int i945_get_display_clock_speed(struct drm_device *dev)
3876{
3877 return 400000;
3878}
79e53945 3879
e70236a8 3880static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 3881{
e70236a8
JB
3882 return 333000;
3883}
79e53945 3884
e70236a8
JB
3885static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3886{
3887 return 200000;
3888}
79e53945 3889
e70236a8
JB
3890static int i915gm_get_display_clock_speed(struct drm_device *dev)
3891{
3892 u16 gcfgc = 0;
79e53945 3893
e70236a8
JB
3894 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3895
3896 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3897 return 133000;
3898 else {
3899 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3900 case GC_DISPLAY_CLOCK_333_MHZ:
3901 return 333000;
3902 default:
3903 case GC_DISPLAY_CLOCK_190_200_MHZ:
3904 return 190000;
79e53945 3905 }
e70236a8
JB
3906 }
3907}
3908
3909static int i865_get_display_clock_speed(struct drm_device *dev)
3910{
3911 return 266000;
3912}
3913
3914static int i855_get_display_clock_speed(struct drm_device *dev)
3915{
3916 u16 hpllcc = 0;
3917 /* Assume that the hardware is in the high speed state. This
3918 * should be the default.
3919 */
3920 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3921 case GC_CLOCK_133_200:
3922 case GC_CLOCK_100_200:
3923 return 200000;
3924 case GC_CLOCK_166_250:
3925 return 250000;
3926 case GC_CLOCK_100_133:
79e53945 3927 return 133000;
e70236a8 3928 }
79e53945 3929
e70236a8
JB
3930 /* Shouldn't happen */
3931 return 0;
3932}
79e53945 3933
e70236a8
JB
3934static int i830_get_display_clock_speed(struct drm_device *dev)
3935{
3936 return 133000;
79e53945
JB
3937}
3938
2c07245f 3939static void
e69d0bc1 3940intel_reduce_ratio(uint32_t *num, uint32_t *den)
2c07245f
ZW
3941{
3942 while (*num > 0xffffff || *den > 0xffffff) {
3943 *num >>= 1;
3944 *den >>= 1;
3945 }
3946}
3947
e69d0bc1
DV
3948void
3949intel_link_compute_m_n(int bits_per_pixel, int nlanes,
3950 int pixel_clock, int link_clock,
3951 struct intel_link_m_n *m_n)
2c07245f 3952{
e69d0bc1 3953 m_n->tu = 64;
22ed1113
CW
3954 m_n->gmch_m = bits_per_pixel * pixel_clock;
3955 m_n->gmch_n = link_clock * nlanes * 8;
e69d0bc1 3956 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
22ed1113
CW
3957 m_n->link_m = pixel_clock;
3958 m_n->link_n = link_clock;
e69d0bc1 3959 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
2c07245f
ZW
3960}
3961
a7615030
CW
3962static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3963{
72bbe58c
KP
3964 if (i915_panel_use_ssc >= 0)
3965 return i915_panel_use_ssc != 0;
3966 return dev_priv->lvds_use_ssc
435793df 3967 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
3968}
3969
5a354204
JB
3970/**
3971 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3972 * @crtc: CRTC structure
3b5c78a3 3973 * @mode: requested mode
5a354204
JB
3974 *
3975 * A pipe may be connected to one or more outputs. Based on the depth of the
3976 * attached framebuffer, choose a good color depth to use on the pipe.
3977 *
3978 * If possible, match the pipe depth to the fb depth. In some cases, this
3979 * isn't ideal, because the connected output supports a lesser or restricted
3980 * set of depths. Resolve that here:
3981 * LVDS typically supports only 6bpc, so clamp down in that case
3982 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3983 * Displays may support a restricted set as well, check EDID and clamp as
3984 * appropriate.
3b5c78a3 3985 * DP may want to dither down to 6bpc to fit larger modes
5a354204
JB
3986 *
3987 * RETURNS:
3988 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3989 * true if they don't match).
3990 */
3991static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
94352cf9 3992 struct drm_framebuffer *fb,
3b5c78a3
AJ
3993 unsigned int *pipe_bpp,
3994 struct drm_display_mode *mode)
5a354204
JB
3995{
3996 struct drm_device *dev = crtc->dev;
3997 struct drm_i915_private *dev_priv = dev->dev_private;
5a354204 3998 struct drm_connector *connector;
6c2b7c12 3999 struct intel_encoder *intel_encoder;
5a354204
JB
4000 unsigned int display_bpc = UINT_MAX, bpc;
4001
4002 /* Walk the encoders & connectors on this crtc, get min bpc */
6c2b7c12 4003 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5a354204
JB
4004
4005 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4006 unsigned int lvds_bpc;
4007
4008 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4009 LVDS_A3_POWER_UP)
4010 lvds_bpc = 8;
4011 else
4012 lvds_bpc = 6;
4013
4014 if (lvds_bpc < display_bpc) {
82820490 4015 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
5a354204
JB
4016 display_bpc = lvds_bpc;
4017 }
4018 continue;
4019 }
4020
5a354204
JB
4021 /* Not one of the known troublemakers, check the EDID */
4022 list_for_each_entry(connector, &dev->mode_config.connector_list,
4023 head) {
6c2b7c12 4024 if (connector->encoder != &intel_encoder->base)
5a354204
JB
4025 continue;
4026
62ac41a6
JB
4027 /* Don't use an invalid EDID bpc value */
4028 if (connector->display_info.bpc &&
4029 connector->display_info.bpc < display_bpc) {
82820490 4030 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5a354204
JB
4031 display_bpc = connector->display_info.bpc;
4032 }
4033 }
4034
4035 /*
4036 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4037 * through, clamp it down. (Note: >12bpc will be caught below.)
4038 */
4039 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4040 if (display_bpc > 8 && display_bpc < 12) {
82820490 4041 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5a354204
JB
4042 display_bpc = 12;
4043 } else {
82820490 4044 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5a354204
JB
4045 display_bpc = 8;
4046 }
4047 }
4048 }
4049
3b5c78a3
AJ
4050 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4051 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4052 display_bpc = 6;
4053 }
4054
5a354204
JB
4055 /*
4056 * We could just drive the pipe at the highest bpc all the time and
4057 * enable dithering as needed, but that costs bandwidth. So choose
4058 * the minimum value that expresses the full color range of the fb but
4059 * also stays within the max display bpc discovered above.
4060 */
4061
94352cf9 4062 switch (fb->depth) {
5a354204
JB
4063 case 8:
4064 bpc = 8; /* since we go through a colormap */
4065 break;
4066 case 15:
4067 case 16:
4068 bpc = 6; /* min is 18bpp */
4069 break;
4070 case 24:
578393cd 4071 bpc = 8;
5a354204
JB
4072 break;
4073 case 30:
578393cd 4074 bpc = 10;
5a354204
JB
4075 break;
4076 case 48:
578393cd 4077 bpc = 12;
5a354204
JB
4078 break;
4079 default:
4080 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4081 bpc = min((unsigned int)8, display_bpc);
4082 break;
4083 }
4084
578393cd
KP
4085 display_bpc = min(display_bpc, bpc);
4086
82820490
AJ
4087 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4088 bpc, display_bpc);
5a354204 4089
578393cd 4090 *pipe_bpp = display_bpc * 3;
5a354204
JB
4091
4092 return display_bpc != bpc;
4093}
4094
a0c4da24
JB
4095static int vlv_get_refclk(struct drm_crtc *crtc)
4096{
4097 struct drm_device *dev = crtc->dev;
4098 struct drm_i915_private *dev_priv = dev->dev_private;
4099 int refclk = 27000; /* for DP & HDMI */
4100
4101 return 100000; /* only one validated so far */
4102
4103 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
4104 refclk = 96000;
4105 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4106 if (intel_panel_use_ssc(dev_priv))
4107 refclk = 100000;
4108 else
4109 refclk = 96000;
4110 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4111 refclk = 100000;
4112 }
4113
4114 return refclk;
4115}
4116
c65d77d8
JB
4117static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4118{
4119 struct drm_device *dev = crtc->dev;
4120 struct drm_i915_private *dev_priv = dev->dev_private;
4121 int refclk;
4122
a0c4da24
JB
4123 if (IS_VALLEYVIEW(dev)) {
4124 refclk = vlv_get_refclk(crtc);
4125 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
c65d77d8
JB
4126 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4127 refclk = dev_priv->lvds_ssc_freq * 1000;
4128 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4129 refclk / 1000);
4130 } else if (!IS_GEN2(dev)) {
4131 refclk = 96000;
4132 } else {
4133 refclk = 48000;
4134 }
4135
4136 return refclk;
4137}
4138
4139static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
4140 intel_clock_t *clock)
4141{
4142 /* SDVO TV has fixed PLL values depend on its clock range,
4143 this mirrors vbios setting. */
4144 if (adjusted_mode->clock >= 100000
4145 && adjusted_mode->clock < 140500) {
4146 clock->p1 = 2;
4147 clock->p2 = 10;
4148 clock->n = 3;
4149 clock->m1 = 16;
4150 clock->m2 = 8;
4151 } else if (adjusted_mode->clock >= 140500
4152 && adjusted_mode->clock <= 200000) {
4153 clock->p1 = 1;
4154 clock->p2 = 10;
4155 clock->n = 6;
4156 clock->m1 = 12;
4157 clock->m2 = 8;
4158 }
4159}
4160
a7516a05
JB
4161static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
4162 intel_clock_t *clock,
4163 intel_clock_t *reduced_clock)
4164{
4165 struct drm_device *dev = crtc->dev;
4166 struct drm_i915_private *dev_priv = dev->dev_private;
4167 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4168 int pipe = intel_crtc->pipe;
4169 u32 fp, fp2 = 0;
4170
4171 if (IS_PINEVIEW(dev)) {
4172 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
4173 if (reduced_clock)
4174 fp2 = (1 << reduced_clock->n) << 16 |
4175 reduced_clock->m1 << 8 | reduced_clock->m2;
4176 } else {
4177 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
4178 if (reduced_clock)
4179 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
4180 reduced_clock->m2;
4181 }
4182
4183 I915_WRITE(FP0(pipe), fp);
4184
4185 intel_crtc->lowfreq_avail = false;
4186 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4187 reduced_clock && i915_powersave) {
4188 I915_WRITE(FP1(pipe), fp2);
4189 intel_crtc->lowfreq_avail = true;
4190 } else {
4191 I915_WRITE(FP1(pipe), fp);
4192 }
4193}
4194
a0c4da24
JB
4195static void vlv_update_pll(struct drm_crtc *crtc,
4196 struct drm_display_mode *mode,
4197 struct drm_display_mode *adjusted_mode,
4198 intel_clock_t *clock, intel_clock_t *reduced_clock,
2a8f64ca 4199 int num_connectors)
a0c4da24
JB
4200{
4201 struct drm_device *dev = crtc->dev;
4202 struct drm_i915_private *dev_priv = dev->dev_private;
4203 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4204 int pipe = intel_crtc->pipe;
4205 u32 dpll, mdiv, pdiv;
4206 u32 bestn, bestm1, bestm2, bestp1, bestp2;
2a8f64ca
VP
4207 bool is_sdvo;
4208 u32 temp;
a0c4da24 4209
09153000
DV
4210 mutex_lock(&dev_priv->dpio_lock);
4211
2a8f64ca
VP
4212 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4213 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
a0c4da24 4214
2a8f64ca
VP
4215 dpll = DPLL_VGA_MODE_DIS;
4216 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
4217 dpll |= DPLL_REFA_CLK_ENABLE_VLV;
4218 dpll |= DPLL_INTEGRATED_CLOCK_VLV;
4219
4220 I915_WRITE(DPLL(pipe), dpll);
4221 POSTING_READ(DPLL(pipe));
a0c4da24
JB
4222
4223 bestn = clock->n;
4224 bestm1 = clock->m1;
4225 bestm2 = clock->m2;
4226 bestp1 = clock->p1;
4227 bestp2 = clock->p2;
4228
2a8f64ca
VP
4229 /*
4230 * In Valleyview PLL and program lane counter registers are exposed
4231 * through DPIO interface
4232 */
a0c4da24
JB
4233 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4234 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4235 mdiv |= ((bestn << DPIO_N_SHIFT));
4236 mdiv |= (1 << DPIO_POST_DIV_SHIFT);
4237 mdiv |= (1 << DPIO_K_SHIFT);
4238 mdiv |= DPIO_ENABLE_CALIBRATION;
4239 intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4240
4241 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
4242
2a8f64ca 4243 pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
a0c4da24 4244 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
2a8f64ca
VP
4245 (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
4246 (5 << DPIO_CLK_BIAS_CTL_SHIFT);
a0c4da24
JB
4247 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4248
2a8f64ca 4249 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
a0c4da24
JB
4250
4251 dpll |= DPLL_VCO_ENABLE;
4252 I915_WRITE(DPLL(pipe), dpll);
4253 POSTING_READ(DPLL(pipe));
4254 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4255 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4256
2a8f64ca
VP
4257 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
4258
4259 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4260 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4261
4262 I915_WRITE(DPLL(pipe), dpll);
4263
4264 /* Wait for the clocks to stabilize. */
4265 POSTING_READ(DPLL(pipe));
4266 udelay(150);
a0c4da24 4267
2a8f64ca
VP
4268 temp = 0;
4269 if (is_sdvo) {
4270 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
a0c4da24
JB
4271 if (temp > 1)
4272 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4273 else
4274 temp = 0;
a0c4da24 4275 }
2a8f64ca
VP
4276 I915_WRITE(DPLL_MD(pipe), temp);
4277 POSTING_READ(DPLL_MD(pipe));
a0c4da24 4278
2a8f64ca
VP
4279 /* Now program lane control registers */
4280 if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
4281 || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4282 {
4283 temp = 0x1000C4;
4284 if(pipe == 1)
4285 temp |= (1 << 21);
4286 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
4287 }
4288 if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
4289 {
4290 temp = 0x1000C4;
4291 if(pipe == 1)
4292 temp |= (1 << 21);
4293 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4294 }
09153000
DV
4295
4296 mutex_unlock(&dev_priv->dpio_lock);
a0c4da24
JB
4297}
4298
eb1cbe48
DV
4299static void i9xx_update_pll(struct drm_crtc *crtc,
4300 struct drm_display_mode *mode,
4301 struct drm_display_mode *adjusted_mode,
4302 intel_clock_t *clock, intel_clock_t *reduced_clock,
4303 int num_connectors)
4304{
4305 struct drm_device *dev = crtc->dev;
4306 struct drm_i915_private *dev_priv = dev->dev_private;
4307 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dafd226c 4308 struct intel_encoder *encoder;
eb1cbe48
DV
4309 int pipe = intel_crtc->pipe;
4310 u32 dpll;
4311 bool is_sdvo;
4312
2a8f64ca
VP
4313 i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4314
eb1cbe48
DV
4315 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4316 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4317
4318 dpll = DPLL_VGA_MODE_DIS;
4319
4320 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4321 dpll |= DPLLB_MODE_LVDS;
4322 else
4323 dpll |= DPLLB_MODE_DAC_SERIAL;
4324 if (is_sdvo) {
4325 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4326 if (pixel_multiplier > 1) {
4327 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4328 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4329 }
4330 dpll |= DPLL_DVO_HIGH_SPEED;
4331 }
4332 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4333 dpll |= DPLL_DVO_HIGH_SPEED;
4334
4335 /* compute bitmask from p1 value */
4336 if (IS_PINEVIEW(dev))
4337 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4338 else {
4339 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4340 if (IS_G4X(dev) && reduced_clock)
4341 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4342 }
4343 switch (clock->p2) {
4344 case 5:
4345 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4346 break;
4347 case 7:
4348 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4349 break;
4350 case 10:
4351 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4352 break;
4353 case 14:
4354 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4355 break;
4356 }
4357 if (INTEL_INFO(dev)->gen >= 4)
4358 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4359
4360 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4361 dpll |= PLL_REF_INPUT_TVCLKINBC;
4362 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4363 /* XXX: just matching BIOS for now */
4364 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4365 dpll |= 3;
4366 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4367 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4368 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4369 else
4370 dpll |= PLL_REF_INPUT_DREFCLK;
4371
4372 dpll |= DPLL_VCO_ENABLE;
4373 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4374 POSTING_READ(DPLL(pipe));
4375 udelay(150);
4376
dafd226c
DV
4377 for_each_encoder_on_crtc(dev, crtc, encoder)
4378 if (encoder->pre_pll_enable)
4379 encoder->pre_pll_enable(encoder);
4380
eb1cbe48
DV
4381 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4382 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4383
4384 I915_WRITE(DPLL(pipe), dpll);
4385
4386 /* Wait for the clocks to stabilize. */
4387 POSTING_READ(DPLL(pipe));
4388 udelay(150);
4389
4390 if (INTEL_INFO(dev)->gen >= 4) {
4391 u32 temp = 0;
4392 if (is_sdvo) {
4393 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4394 if (temp > 1)
4395 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4396 else
4397 temp = 0;
4398 }
4399 I915_WRITE(DPLL_MD(pipe), temp);
4400 } else {
4401 /* The pixel multiplier can only be updated once the
4402 * DPLL is enabled and the clocks are stable.
4403 *
4404 * So write it again.
4405 */
4406 I915_WRITE(DPLL(pipe), dpll);
4407 }
4408}
4409
4410static void i8xx_update_pll(struct drm_crtc *crtc,
4411 struct drm_display_mode *adjusted_mode,
2a8f64ca 4412 intel_clock_t *clock, intel_clock_t *reduced_clock,
eb1cbe48
DV
4413 int num_connectors)
4414{
4415 struct drm_device *dev = crtc->dev;
4416 struct drm_i915_private *dev_priv = dev->dev_private;
4417 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dafd226c 4418 struct intel_encoder *encoder;
eb1cbe48
DV
4419 int pipe = intel_crtc->pipe;
4420 u32 dpll;
4421
2a8f64ca
VP
4422 i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4423
eb1cbe48
DV
4424 dpll = DPLL_VGA_MODE_DIS;
4425
4426 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4427 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4428 } else {
4429 if (clock->p1 == 2)
4430 dpll |= PLL_P1_DIVIDE_BY_TWO;
4431 else
4432 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4433 if (clock->p2 == 4)
4434 dpll |= PLL_P2_DIVIDE_BY_4;
4435 }
4436
4437 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4438 /* XXX: just matching BIOS for now */
4439 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4440 dpll |= 3;
4441 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4442 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4443 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4444 else
4445 dpll |= PLL_REF_INPUT_DREFCLK;
4446
4447 dpll |= DPLL_VCO_ENABLE;
4448 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4449 POSTING_READ(DPLL(pipe));
4450 udelay(150);
4451
dafd226c
DV
4452 for_each_encoder_on_crtc(dev, crtc, encoder)
4453 if (encoder->pre_pll_enable)
4454 encoder->pre_pll_enable(encoder);
4455
5b5896e4
DV
4456 I915_WRITE(DPLL(pipe), dpll);
4457
4458 /* Wait for the clocks to stabilize. */
4459 POSTING_READ(DPLL(pipe));
4460 udelay(150);
4461
eb1cbe48
DV
4462 /* The pixel multiplier can only be updated once the
4463 * DPLL is enabled and the clocks are stable.
4464 *
4465 * So write it again.
4466 */
4467 I915_WRITE(DPLL(pipe), dpll);
4468}
4469
b0e77b9c
PZ
4470static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4471 struct drm_display_mode *mode,
4472 struct drm_display_mode *adjusted_mode)
4473{
4474 struct drm_device *dev = intel_crtc->base.dev;
4475 struct drm_i915_private *dev_priv = dev->dev_private;
4476 enum pipe pipe = intel_crtc->pipe;
fe2b8f9d 4477 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
b0e77b9c
PZ
4478 uint32_t vsyncshift;
4479
4480 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4481 /* the chip adds 2 halflines automatically */
4482 adjusted_mode->crtc_vtotal -= 1;
4483 adjusted_mode->crtc_vblank_end -= 1;
4484 vsyncshift = adjusted_mode->crtc_hsync_start
4485 - adjusted_mode->crtc_htotal / 2;
4486 } else {
4487 vsyncshift = 0;
4488 }
4489
4490 if (INTEL_INFO(dev)->gen > 3)
fe2b8f9d 4491 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
b0e77b9c 4492
fe2b8f9d 4493 I915_WRITE(HTOTAL(cpu_transcoder),
b0e77b9c
PZ
4494 (adjusted_mode->crtc_hdisplay - 1) |
4495 ((adjusted_mode->crtc_htotal - 1) << 16));
fe2b8f9d 4496 I915_WRITE(HBLANK(cpu_transcoder),
b0e77b9c
PZ
4497 (adjusted_mode->crtc_hblank_start - 1) |
4498 ((adjusted_mode->crtc_hblank_end - 1) << 16));
fe2b8f9d 4499 I915_WRITE(HSYNC(cpu_transcoder),
b0e77b9c
PZ
4500 (adjusted_mode->crtc_hsync_start - 1) |
4501 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4502
fe2b8f9d 4503 I915_WRITE(VTOTAL(cpu_transcoder),
b0e77b9c
PZ
4504 (adjusted_mode->crtc_vdisplay - 1) |
4505 ((adjusted_mode->crtc_vtotal - 1) << 16));
fe2b8f9d 4506 I915_WRITE(VBLANK(cpu_transcoder),
b0e77b9c
PZ
4507 (adjusted_mode->crtc_vblank_start - 1) |
4508 ((adjusted_mode->crtc_vblank_end - 1) << 16));
fe2b8f9d 4509 I915_WRITE(VSYNC(cpu_transcoder),
b0e77b9c
PZ
4510 (adjusted_mode->crtc_vsync_start - 1) |
4511 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4512
b5e508d4
PZ
4513 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4514 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4515 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4516 * bits. */
4517 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4518 (pipe == PIPE_B || pipe == PIPE_C))
4519 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4520
b0e77b9c
PZ
4521 /* pipesrc controls the size that is scaled from, which should
4522 * always be the user's requested size.
4523 */
4524 I915_WRITE(PIPESRC(pipe),
4525 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4526}
4527
f564048e
EA
4528static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4529 struct drm_display_mode *mode,
4530 struct drm_display_mode *adjusted_mode,
4531 int x, int y,
94352cf9 4532 struct drm_framebuffer *fb)
79e53945
JB
4533{
4534 struct drm_device *dev = crtc->dev;
4535 struct drm_i915_private *dev_priv = dev->dev_private;
4536 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4537 int pipe = intel_crtc->pipe;
80824003 4538 int plane = intel_crtc->plane;
c751ce4f 4539 int refclk, num_connectors = 0;
652c393a 4540 intel_clock_t clock, reduced_clock;
b0e77b9c 4541 u32 dspcntr, pipeconf;
eb1cbe48
DV
4542 bool ok, has_reduced_clock = false, is_sdvo = false;
4543 bool is_lvds = false, is_tv = false, is_dp = false;
5eddb70b 4544 struct intel_encoder *encoder;
d4906093 4545 const intel_limit_t *limit;
5c3b82e2 4546 int ret;
79e53945 4547
6c2b7c12 4548 for_each_encoder_on_crtc(dev, crtc, encoder) {
5eddb70b 4549 switch (encoder->type) {
79e53945
JB
4550 case INTEL_OUTPUT_LVDS:
4551 is_lvds = true;
4552 break;
4553 case INTEL_OUTPUT_SDVO:
7d57382e 4554 case INTEL_OUTPUT_HDMI:
79e53945 4555 is_sdvo = true;
5eddb70b 4556 if (encoder->needs_tv_clock)
e2f0ba97 4557 is_tv = true;
79e53945 4558 break;
79e53945
JB
4559 case INTEL_OUTPUT_TVOUT:
4560 is_tv = true;
4561 break;
a4fc5ed6
KP
4562 case INTEL_OUTPUT_DISPLAYPORT:
4563 is_dp = true;
4564 break;
79e53945 4565 }
43565a06 4566
c751ce4f 4567 num_connectors++;
79e53945
JB
4568 }
4569
c65d77d8 4570 refclk = i9xx_get_refclk(crtc, num_connectors);
79e53945 4571
d4906093
ML
4572 /*
4573 * Returns a set of divisors for the desired target clock with the given
4574 * refclk, or FALSE. The returned values represent the clock equation:
4575 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4576 */
1b894b59 4577 limit = intel_limit(crtc, refclk);
cec2f356
SP
4578 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4579 &clock);
79e53945
JB
4580 if (!ok) {
4581 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5c3b82e2 4582 return -EINVAL;
79e53945
JB
4583 }
4584
cda4b7d3 4585 /* Ensure that the cursor is valid for the new mode before changing... */
6b383a7f 4586 intel_crtc_update_cursor(crtc, true);
cda4b7d3 4587
ddc9003c 4588 if (is_lvds && dev_priv->lvds_downclock_avail) {
cec2f356
SP
4589 /*
4590 * Ensure we match the reduced clock's P to the target clock.
4591 * If the clocks don't match, we can't switch the display clock
4592 * by using the FP0/FP1. In such case we will disable the LVDS
4593 * downclock feature.
4594 */
ddc9003c 4595 has_reduced_clock = limit->find_pll(limit, crtc,
5eddb70b
CW
4596 dev_priv->lvds_downclock,
4597 refclk,
cec2f356 4598 &clock,
5eddb70b 4599 &reduced_clock);
7026d4ac
ZW
4600 }
4601
c65d77d8
JB
4602 if (is_sdvo && is_tv)
4603 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
7026d4ac 4604
eb1cbe48 4605 if (IS_GEN2(dev))
2a8f64ca
VP
4606 i8xx_update_pll(crtc, adjusted_mode, &clock,
4607 has_reduced_clock ? &reduced_clock : NULL,
4608 num_connectors);
a0c4da24 4609 else if (IS_VALLEYVIEW(dev))
2a8f64ca
VP
4610 vlv_update_pll(crtc, mode, adjusted_mode, &clock,
4611 has_reduced_clock ? &reduced_clock : NULL,
4612 num_connectors);
79e53945 4613 else
eb1cbe48
DV
4614 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4615 has_reduced_clock ? &reduced_clock : NULL,
4616 num_connectors);
79e53945
JB
4617
4618 /* setup pipeconf */
5eddb70b 4619 pipeconf = I915_READ(PIPECONF(pipe));
79e53945
JB
4620
4621 /* Set up the display plane register */
4622 dspcntr = DISPPLANE_GAMMA_ENABLE;
4623
929c77fb
EA
4624 if (pipe == 0)
4625 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4626 else
4627 dspcntr |= DISPPLANE_SEL_PIPE_B;
79e53945 4628
a6c45cf0 4629 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
79e53945
JB
4630 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4631 * core speed.
4632 *
4633 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4634 * pipe == 0 check?
4635 */
e70236a8
JB
4636 if (mode->clock >
4637 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5eddb70b 4638 pipeconf |= PIPECONF_DOUBLE_WIDE;
79e53945 4639 else
5eddb70b 4640 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
79e53945
JB
4641 }
4642
3b5c78a3
AJ
4643 /* default to 8bpc */
4644 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
4645 if (is_dp) {
0c96c65b 4646 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3b5c78a3
AJ
4647 pipeconf |= PIPECONF_BPP_6 |
4648 PIPECONF_DITHER_EN |
4649 PIPECONF_DITHER_TYPE_SP;
4650 }
4651 }
4652
19c03924
GB
4653 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4654 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4655 pipeconf |= PIPECONF_BPP_6 |
4656 PIPECONF_ENABLE |
4657 I965_PIPECONF_ACTIVE;
4658 }
4659 }
4660
28c97730 4661 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
79e53945
JB
4662 drm_mode_debug_printmodeline(mode);
4663
a7516a05
JB
4664 if (HAS_PIPE_CXSR(dev)) {
4665 if (intel_crtc->lowfreq_avail) {
28c97730 4666 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
652c393a 4667 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
a7516a05 4668 } else {
28c97730 4669 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
652c393a
JB
4670 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4671 }
4672 }
4673
617cf884 4674 pipeconf &= ~PIPECONF_INTERLACE_MASK;
dbb02575 4675 if (!IS_GEN2(dev) &&
b0e77b9c 4676 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
734b4157 4677 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
b0e77b9c 4678 else
617cf884 4679 pipeconf |= PIPECONF_PROGRESSIVE;
734b4157 4680
b0e77b9c 4681 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5eddb70b
CW
4682
4683 /* pipesrc and dspsize control the size that is scaled from,
4684 * which should always be the user's requested size.
79e53945 4685 */
929c77fb
EA
4686 I915_WRITE(DSPSIZE(plane),
4687 ((mode->vdisplay - 1) << 16) |
4688 (mode->hdisplay - 1));
4689 I915_WRITE(DSPPOS(plane), 0);
2c07245f 4690
f564048e
EA
4691 I915_WRITE(PIPECONF(pipe), pipeconf);
4692 POSTING_READ(PIPECONF(pipe));
929c77fb 4693 intel_enable_pipe(dev_priv, pipe, false);
f564048e
EA
4694
4695 intel_wait_for_vblank(dev, pipe);
4696
f564048e
EA
4697 I915_WRITE(DSPCNTR(plane), dspcntr);
4698 POSTING_READ(DSPCNTR(plane));
4699
94352cf9 4700 ret = intel_pipe_set_base(crtc, x, y, fb);
f564048e
EA
4701
4702 intel_update_watermarks(dev);
4703
f564048e
EA
4704 return ret;
4705}
4706
9fb526db
KP
4707/*
4708 * Initialize reference clocks when the driver loads
4709 */
4710void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67
JB
4711{
4712 struct drm_i915_private *dev_priv = dev->dev_private;
4713 struct drm_mode_config *mode_config = &dev->mode_config;
13d83a67 4714 struct intel_encoder *encoder;
13d83a67
JB
4715 u32 temp;
4716 bool has_lvds = false;
199e5d79
KP
4717 bool has_cpu_edp = false;
4718 bool has_pch_edp = false;
4719 bool has_panel = false;
99eb6a01
KP
4720 bool has_ck505 = false;
4721 bool can_ssc = false;
13d83a67
JB
4722
4723 /* We need to take the global config into account */
199e5d79
KP
4724 list_for_each_entry(encoder, &mode_config->encoder_list,
4725 base.head) {
4726 switch (encoder->type) {
4727 case INTEL_OUTPUT_LVDS:
4728 has_panel = true;
4729 has_lvds = true;
4730 break;
4731 case INTEL_OUTPUT_EDP:
4732 has_panel = true;
4733 if (intel_encoder_is_pch_edp(&encoder->base))
4734 has_pch_edp = true;
4735 else
4736 has_cpu_edp = true;
4737 break;
13d83a67
JB
4738 }
4739 }
4740
99eb6a01
KP
4741 if (HAS_PCH_IBX(dev)) {
4742 has_ck505 = dev_priv->display_clock_mode;
4743 can_ssc = has_ck505;
4744 } else {
4745 has_ck505 = false;
4746 can_ssc = true;
4747 }
4748
4749 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4750 has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4751 has_ck505);
13d83a67
JB
4752
4753 /* Ironlake: try to setup display ref clock before DPLL
4754 * enabling. This is only under driver's control after
4755 * PCH B stepping, previous chipset stepping should be
4756 * ignoring this setting.
4757 */
4758 temp = I915_READ(PCH_DREF_CONTROL);
4759 /* Always enable nonspread source */
4760 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 4761
99eb6a01
KP
4762 if (has_ck505)
4763 temp |= DREF_NONSPREAD_CK505_ENABLE;
4764 else
4765 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 4766
199e5d79
KP
4767 if (has_panel) {
4768 temp &= ~DREF_SSC_SOURCE_MASK;
4769 temp |= DREF_SSC_SOURCE_ENABLE;
13d83a67 4770
199e5d79 4771 /* SSC must be turned on before enabling the CPU output */
99eb6a01 4772 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 4773 DRM_DEBUG_KMS("Using SSC on panel\n");
13d83a67 4774 temp |= DREF_SSC1_ENABLE;
e77166b5
DV
4775 } else
4776 temp &= ~DREF_SSC1_ENABLE;
199e5d79
KP
4777
4778 /* Get SSC going before enabling the outputs */
4779 I915_WRITE(PCH_DREF_CONTROL, temp);
4780 POSTING_READ(PCH_DREF_CONTROL);
4781 udelay(200);
4782
13d83a67
JB
4783 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4784
4785 /* Enable CPU source on CPU attached eDP */
199e5d79 4786 if (has_cpu_edp) {
99eb6a01 4787 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 4788 DRM_DEBUG_KMS("Using SSC on eDP\n");
13d83a67 4789 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
199e5d79 4790 }
13d83a67
JB
4791 else
4792 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79
KP
4793 } else
4794 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4795
4796 I915_WRITE(PCH_DREF_CONTROL, temp);
4797 POSTING_READ(PCH_DREF_CONTROL);
4798 udelay(200);
4799 } else {
4800 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4801
4802 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4803
4804 /* Turn off CPU output */
4805 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4806
4807 I915_WRITE(PCH_DREF_CONTROL, temp);
4808 POSTING_READ(PCH_DREF_CONTROL);
4809 udelay(200);
4810
4811 /* Turn off the SSC source */
4812 temp &= ~DREF_SSC_SOURCE_MASK;
4813 temp |= DREF_SSC_SOURCE_DISABLE;
4814
4815 /* Turn off SSC1 */
4816 temp &= ~ DREF_SSC1_ENABLE;
4817
13d83a67
JB
4818 I915_WRITE(PCH_DREF_CONTROL, temp);
4819 POSTING_READ(PCH_DREF_CONTROL);
4820 udelay(200);
4821 }
4822}
4823
d9d444cb
JB
4824static int ironlake_get_refclk(struct drm_crtc *crtc)
4825{
4826 struct drm_device *dev = crtc->dev;
4827 struct drm_i915_private *dev_priv = dev->dev_private;
4828 struct intel_encoder *encoder;
d9d444cb
JB
4829 struct intel_encoder *edp_encoder = NULL;
4830 int num_connectors = 0;
4831 bool is_lvds = false;
4832
6c2b7c12 4833 for_each_encoder_on_crtc(dev, crtc, encoder) {
d9d444cb
JB
4834 switch (encoder->type) {
4835 case INTEL_OUTPUT_LVDS:
4836 is_lvds = true;
4837 break;
4838 case INTEL_OUTPUT_EDP:
4839 edp_encoder = encoder;
4840 break;
4841 }
4842 num_connectors++;
4843 }
4844
4845 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4846 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4847 dev_priv->lvds_ssc_freq);
4848 return dev_priv->lvds_ssc_freq * 1000;
4849 }
4850
4851 return 120000;
4852}
4853
c8203565 4854static void ironlake_set_pipeconf(struct drm_crtc *crtc,
f564048e 4855 struct drm_display_mode *adjusted_mode,
c8203565 4856 bool dither)
79e53945 4857{
c8203565 4858 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
79e53945
JB
4859 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4860 int pipe = intel_crtc->pipe;
c8203565
PZ
4861 uint32_t val;
4862
4863 val = I915_READ(PIPECONF(pipe));
4864
4865 val &= ~PIPE_BPC_MASK;
4866 switch (intel_crtc->bpp) {
4867 case 18:
4868 val |= PIPE_6BPC;
4869 break;
4870 case 24:
4871 val |= PIPE_8BPC;
4872 break;
4873 case 30:
4874 val |= PIPE_10BPC;
4875 break;
4876 case 36:
4877 val |= PIPE_12BPC;
4878 break;
4879 default:
cc769b62
PZ
4880 /* Case prevented by intel_choose_pipe_bpp_dither. */
4881 BUG();
c8203565
PZ
4882 }
4883
4884 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
4885 if (dither)
4886 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
4887
4888 val &= ~PIPECONF_INTERLACE_MASK;
4889 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4890 val |= PIPECONF_INTERLACED_ILK;
4891 else
4892 val |= PIPECONF_PROGRESSIVE;
4893
4894 I915_WRITE(PIPECONF(pipe), val);
4895 POSTING_READ(PIPECONF(pipe));
4896}
4897
ee2b0b38
PZ
4898static void haswell_set_pipeconf(struct drm_crtc *crtc,
4899 struct drm_display_mode *adjusted_mode,
4900 bool dither)
4901{
4902 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4903 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
702e7a56 4904 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
ee2b0b38
PZ
4905 uint32_t val;
4906
702e7a56 4907 val = I915_READ(PIPECONF(cpu_transcoder));
ee2b0b38
PZ
4908
4909 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
4910 if (dither)
4911 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
4912
4913 val &= ~PIPECONF_INTERLACE_MASK_HSW;
4914 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4915 val |= PIPECONF_INTERLACED_ILK;
4916 else
4917 val |= PIPECONF_PROGRESSIVE;
4918
702e7a56
PZ
4919 I915_WRITE(PIPECONF(cpu_transcoder), val);
4920 POSTING_READ(PIPECONF(cpu_transcoder));
ee2b0b38
PZ
4921}
4922
6591c6e4
PZ
4923static bool ironlake_compute_clocks(struct drm_crtc *crtc,
4924 struct drm_display_mode *adjusted_mode,
4925 intel_clock_t *clock,
4926 bool *has_reduced_clock,
4927 intel_clock_t *reduced_clock)
4928{
4929 struct drm_device *dev = crtc->dev;
4930 struct drm_i915_private *dev_priv = dev->dev_private;
4931 struct intel_encoder *intel_encoder;
4932 int refclk;
d4906093 4933 const intel_limit_t *limit;
6591c6e4 4934 bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
79e53945 4935
6591c6e4
PZ
4936 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4937 switch (intel_encoder->type) {
79e53945
JB
4938 case INTEL_OUTPUT_LVDS:
4939 is_lvds = true;
4940 break;
4941 case INTEL_OUTPUT_SDVO:
7d57382e 4942 case INTEL_OUTPUT_HDMI:
79e53945 4943 is_sdvo = true;
6591c6e4 4944 if (intel_encoder->needs_tv_clock)
e2f0ba97 4945 is_tv = true;
79e53945 4946 break;
79e53945
JB
4947 case INTEL_OUTPUT_TVOUT:
4948 is_tv = true;
4949 break;
79e53945
JB
4950 }
4951 }
4952
d9d444cb 4953 refclk = ironlake_get_refclk(crtc);
79e53945 4954
d4906093
ML
4955 /*
4956 * Returns a set of divisors for the desired target clock with the given
4957 * refclk, or FALSE. The returned values represent the clock equation:
4958 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4959 */
1b894b59 4960 limit = intel_limit(crtc, refclk);
6591c6e4
PZ
4961 ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4962 clock);
4963 if (!ret)
4964 return false;
cda4b7d3 4965
ddc9003c 4966 if (is_lvds && dev_priv->lvds_downclock_avail) {
cec2f356
SP
4967 /*
4968 * Ensure we match the reduced clock's P to the target clock.
4969 * If the clocks don't match, we can't switch the display clock
4970 * by using the FP0/FP1. In such case we will disable the LVDS
4971 * downclock feature.
4972 */
6591c6e4
PZ
4973 *has_reduced_clock = limit->find_pll(limit, crtc,
4974 dev_priv->lvds_downclock,
4975 refclk,
4976 clock,
4977 reduced_clock);
652c393a 4978 }
61e9653f
DV
4979
4980 if (is_sdvo && is_tv)
6591c6e4
PZ
4981 i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
4982
4983 return true;
4984}
4985
01a415fd
DV
4986static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
4987{
4988 struct drm_i915_private *dev_priv = dev->dev_private;
4989 uint32_t temp;
4990
4991 temp = I915_READ(SOUTH_CHICKEN1);
4992 if (temp & FDI_BC_BIFURCATION_SELECT)
4993 return;
4994
4995 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4996 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4997
4998 temp |= FDI_BC_BIFURCATION_SELECT;
4999 DRM_DEBUG_KMS("enabling fdi C rx\n");
5000 I915_WRITE(SOUTH_CHICKEN1, temp);
5001 POSTING_READ(SOUTH_CHICKEN1);
5002}
5003
5004static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5005{
5006 struct drm_device *dev = intel_crtc->base.dev;
5007 struct drm_i915_private *dev_priv = dev->dev_private;
5008 struct intel_crtc *pipe_B_crtc =
5009 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5010
5011 DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
5012 intel_crtc->pipe, intel_crtc->fdi_lanes);
5013 if (intel_crtc->fdi_lanes > 4) {
5014 DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
5015 intel_crtc->pipe, intel_crtc->fdi_lanes);
5016 /* Clamp lanes to avoid programming the hw with bogus values. */
5017 intel_crtc->fdi_lanes = 4;
5018
5019 return false;
5020 }
5021
5022 if (dev_priv->num_pipe == 2)
5023 return true;
5024
5025 switch (intel_crtc->pipe) {
5026 case PIPE_A:
5027 return true;
5028 case PIPE_B:
5029 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5030 intel_crtc->fdi_lanes > 2) {
5031 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5032 intel_crtc->pipe, intel_crtc->fdi_lanes);
5033 /* Clamp lanes to avoid programming the hw with bogus values. */
5034 intel_crtc->fdi_lanes = 2;
5035
5036 return false;
5037 }
5038
5039 if (intel_crtc->fdi_lanes > 2)
5040 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5041 else
5042 cpt_enable_fdi_bc_bifurcation(dev);
5043
5044 return true;
5045 case PIPE_C:
5046 if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
5047 if (intel_crtc->fdi_lanes > 2) {
5048 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5049 intel_crtc->pipe, intel_crtc->fdi_lanes);
5050 /* Clamp lanes to avoid programming the hw with bogus values. */
5051 intel_crtc->fdi_lanes = 2;
5052
5053 return false;
5054 }
5055 } else {
5056 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5057 return false;
5058 }
5059
5060 cpt_enable_fdi_bc_bifurcation(dev);
5061
5062 return true;
5063 default:
5064 BUG();
5065 }
5066}
5067
f48d8f23
PZ
5068static void ironlake_set_m_n(struct drm_crtc *crtc,
5069 struct drm_display_mode *mode,
5070 struct drm_display_mode *adjusted_mode)
79e53945
JB
5071{
5072 struct drm_device *dev = crtc->dev;
5073 struct drm_i915_private *dev_priv = dev->dev_private;
5074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
afe2fcf5 5075 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
f48d8f23 5076 struct intel_encoder *intel_encoder, *edp_encoder = NULL;
e69d0bc1 5077 struct intel_link_m_n m_n = {0};
f48d8f23
PZ
5078 int target_clock, pixel_multiplier, lane, link_bw;
5079 bool is_dp = false, is_cpu_edp = false;
79e53945 5080
f48d8f23
PZ
5081 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5082 switch (intel_encoder->type) {
a4fc5ed6
KP
5083 case INTEL_OUTPUT_DISPLAYPORT:
5084 is_dp = true;
5085 break;
32f9d658 5086 case INTEL_OUTPUT_EDP:
e3aef172 5087 is_dp = true;
f48d8f23 5088 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
e3aef172 5089 is_cpu_edp = true;
f48d8f23 5090 edp_encoder = intel_encoder;
32f9d658 5091 break;
79e53945 5092 }
79e53945 5093 }
61e9653f 5094
2c07245f 5095 /* FDI link */
8febb297
EA
5096 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5097 lane = 0;
5098 /* CPU eDP doesn't require FDI link, so just set DP M/N
5099 according to current link config */
e3aef172 5100 if (is_cpu_edp) {
e3aef172 5101 intel_edp_link_config(edp_encoder, &lane, &link_bw);
8febb297 5102 } else {
8febb297
EA
5103 /* FDI is a binary signal running at ~2.7GHz, encoding
5104 * each output octet as 10 bits. The actual frequency
5105 * is stored as a divider into a 100MHz clock, and the
5106 * mode pixel clock is stored in units of 1KHz.
5107 * Hence the bw of each lane in terms of the mode signal
5108 * is:
5109 */
5110 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5111 }
58a27471 5112
94bf2ced
DV
5113 /* [e]DP over FDI requires target mode clock instead of link clock. */
5114 if (edp_encoder)
5115 target_clock = intel_edp_target_clock(edp_encoder, mode);
5116 else if (is_dp)
5117 target_clock = mode->clock;
5118 else
5119 target_clock = adjusted_mode->clock;
5120
8febb297
EA
5121 if (!lane) {
5122 /*
5123 * Account for spread spectrum to avoid
5124 * oversubscribing the link. Max center spread
5125 * is 2.5%; use 5% for safety's sake.
5126 */
5a354204 5127 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
8febb297 5128 lane = bps / (link_bw * 8) + 1;
5eb08b69 5129 }
2c07245f 5130
8febb297
EA
5131 intel_crtc->fdi_lanes = lane;
5132
5133 if (pixel_multiplier > 1)
5134 link_bw *= pixel_multiplier;
e69d0bc1 5135 intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
8febb297 5136
afe2fcf5
PZ
5137 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
5138 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
5139 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
5140 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
f48d8f23
PZ
5141}
5142
de13a2e3
PZ
5143static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5144 struct drm_display_mode *adjusted_mode,
5145 intel_clock_t *clock, u32 fp)
79e53945 5146{
de13a2e3 5147 struct drm_crtc *crtc = &intel_crtc->base;
79e53945
JB
5148 struct drm_device *dev = crtc->dev;
5149 struct drm_i915_private *dev_priv = dev->dev_private;
de13a2e3
PZ
5150 struct intel_encoder *intel_encoder;
5151 uint32_t dpll;
5152 int factor, pixel_multiplier, num_connectors = 0;
5153 bool is_lvds = false, is_sdvo = false, is_tv = false;
5154 bool is_dp = false, is_cpu_edp = false;
79e53945 5155
de13a2e3
PZ
5156 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5157 switch (intel_encoder->type) {
79e53945
JB
5158 case INTEL_OUTPUT_LVDS:
5159 is_lvds = true;
5160 break;
5161 case INTEL_OUTPUT_SDVO:
7d57382e 5162 case INTEL_OUTPUT_HDMI:
79e53945 5163 is_sdvo = true;
de13a2e3 5164 if (intel_encoder->needs_tv_clock)
e2f0ba97 5165 is_tv = true;
79e53945 5166 break;
79e53945
JB
5167 case INTEL_OUTPUT_TVOUT:
5168 is_tv = true;
5169 break;
a4fc5ed6
KP
5170 case INTEL_OUTPUT_DISPLAYPORT:
5171 is_dp = true;
5172 break;
32f9d658 5173 case INTEL_OUTPUT_EDP:
e3aef172 5174 is_dp = true;
de13a2e3 5175 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
e3aef172 5176 is_cpu_edp = true;
32f9d658 5177 break;
79e53945 5178 }
43565a06 5179
c751ce4f 5180 num_connectors++;
79e53945 5181 }
79e53945 5182
c1858123 5183 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
5184 factor = 21;
5185 if (is_lvds) {
5186 if ((intel_panel_use_ssc(dev_priv) &&
5187 dev_priv->lvds_ssc_freq == 100) ||
1974cad0 5188 intel_is_dual_link_lvds(dev))
8febb297
EA
5189 factor = 25;
5190 } else if (is_sdvo && is_tv)
5191 factor = 20;
c1858123 5192
de13a2e3 5193 if (clock->m < factor * clock->n)
8febb297 5194 fp |= FP_CB_TUNE;
2c07245f 5195
5eddb70b 5196 dpll = 0;
2c07245f 5197
a07d6787
EA
5198 if (is_lvds)
5199 dpll |= DPLLB_MODE_LVDS;
5200 else
5201 dpll |= DPLLB_MODE_DAC_SERIAL;
5202 if (is_sdvo) {
de13a2e3 5203 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
a07d6787
EA
5204 if (pixel_multiplier > 1) {
5205 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
79e53945 5206 }
a07d6787
EA
5207 dpll |= DPLL_DVO_HIGH_SPEED;
5208 }
e3aef172 5209 if (is_dp && !is_cpu_edp)
a07d6787 5210 dpll |= DPLL_DVO_HIGH_SPEED;
79e53945 5211
a07d6787 5212 /* compute bitmask from p1 value */
de13a2e3 5213 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
a07d6787 5214 /* also FPA1 */
de13a2e3 5215 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
a07d6787 5216
de13a2e3 5217 switch (clock->p2) {
a07d6787
EA
5218 case 5:
5219 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5220 break;
5221 case 7:
5222 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5223 break;
5224 case 10:
5225 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5226 break;
5227 case 14:
5228 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5229 break;
79e53945
JB
5230 }
5231
43565a06
KH
5232 if (is_sdvo && is_tv)
5233 dpll |= PLL_REF_INPUT_TVCLKINBC;
5234 else if (is_tv)
79e53945 5235 /* XXX: just matching BIOS for now */
43565a06 5236 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
79e53945 5237 dpll |= 3;
a7615030 5238 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
43565a06 5239 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
5240 else
5241 dpll |= PLL_REF_INPUT_DREFCLK;
5242
de13a2e3
PZ
5243 return dpll;
5244}
5245
5246static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5247 struct drm_display_mode *mode,
5248 struct drm_display_mode *adjusted_mode,
5249 int x, int y,
5250 struct drm_framebuffer *fb)
5251{
5252 struct drm_device *dev = crtc->dev;
5253 struct drm_i915_private *dev_priv = dev->dev_private;
5254 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5255 int pipe = intel_crtc->pipe;
5256 int plane = intel_crtc->plane;
5257 int num_connectors = 0;
5258 intel_clock_t clock, reduced_clock;
5259 u32 dpll, fp = 0, fp2 = 0;
e2f12b07
PZ
5260 bool ok, has_reduced_clock = false;
5261 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
de13a2e3 5262 struct intel_encoder *encoder;
de13a2e3 5263 int ret;
01a415fd 5264 bool dither, fdi_config_ok;
de13a2e3
PZ
5265
5266 for_each_encoder_on_crtc(dev, crtc, encoder) {
5267 switch (encoder->type) {
5268 case INTEL_OUTPUT_LVDS:
5269 is_lvds = true;
5270 break;
de13a2e3
PZ
5271 case INTEL_OUTPUT_DISPLAYPORT:
5272 is_dp = true;
5273 break;
5274 case INTEL_OUTPUT_EDP:
5275 is_dp = true;
e2f12b07 5276 if (!intel_encoder_is_pch_edp(&encoder->base))
de13a2e3
PZ
5277 is_cpu_edp = true;
5278 break;
5279 }
5280
5281 num_connectors++;
a07d6787 5282 }
79e53945 5283
5dc5298b
PZ
5284 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5285 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
a07d6787 5286
de13a2e3
PZ
5287 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5288 &has_reduced_clock, &reduced_clock);
5289 if (!ok) {
5290 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5291 return -EINVAL;
79e53945
JB
5292 }
5293
de13a2e3
PZ
5294 /* Ensure that the cursor is valid for the new mode before changing... */
5295 intel_crtc_update_cursor(crtc, true);
5296
5297 /* determine panel color depth */
c8241969
JN
5298 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5299 adjusted_mode);
de13a2e3
PZ
5300 if (is_lvds && dev_priv->lvds_dither)
5301 dither = true;
5302
5303 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5304 if (has_reduced_clock)
5305 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5306 reduced_clock.m2;
5307
5308 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
79e53945 5309
f7cb34d4 5310 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
79e53945
JB
5311 drm_mode_debug_printmodeline(mode);
5312
5dc5298b
PZ
5313 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5314 if (!is_cpu_edp) {
ee7b9f93 5315 struct intel_pch_pll *pll;
4b645f14 5316
ee7b9f93
JB
5317 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5318 if (pll == NULL) {
5319 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5320 pipe);
4b645f14
JB
5321 return -EINVAL;
5322 }
ee7b9f93
JB
5323 } else
5324 intel_put_pch_pll(intel_crtc);
79e53945 5325
2f0c2ad1 5326 if (is_dp && !is_cpu_edp)
a4fc5ed6 5327 intel_dp_set_m_n(crtc, mode, adjusted_mode);
79e53945 5328
dafd226c
DV
5329 for_each_encoder_on_crtc(dev, crtc, encoder)
5330 if (encoder->pre_pll_enable)
5331 encoder->pre_pll_enable(encoder);
5332
ee7b9f93
JB
5333 if (intel_crtc->pch_pll) {
5334 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5eddb70b 5335
32f9d658 5336 /* Wait for the clocks to stabilize. */
ee7b9f93 5337 POSTING_READ(intel_crtc->pch_pll->pll_reg);
32f9d658
ZW
5338 udelay(150);
5339
8febb297
EA
5340 /* The pixel multiplier can only be updated once the
5341 * DPLL is enabled and the clocks are stable.
5342 *
5343 * So write it again.
5344 */
ee7b9f93 5345 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
79e53945 5346 }
79e53945 5347
5eddb70b 5348 intel_crtc->lowfreq_avail = false;
ee7b9f93 5349 if (intel_crtc->pch_pll) {
4b645f14 5350 if (is_lvds && has_reduced_clock && i915_powersave) {
ee7b9f93 5351 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
4b645f14 5352 intel_crtc->lowfreq_avail = true;
4b645f14 5353 } else {
ee7b9f93 5354 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
652c393a
JB
5355 }
5356 }
5357
b0e77b9c 5358 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5eddb70b 5359
01a415fd
DV
5360 /* Note, this also computes intel_crtc->fdi_lanes which is used below in
5361 * ironlake_check_fdi_lanes. */
f48d8f23 5362 ironlake_set_m_n(crtc, mode, adjusted_mode);
2c07245f 5363
01a415fd 5364 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
2c07245f 5365
c8203565 5366 ironlake_set_pipeconf(crtc, adjusted_mode, dither);
79e53945 5367
9d0498a2 5368 intel_wait_for_vblank(dev, pipe);
79e53945 5369
a1f9e77e
PZ
5370 /* Set up the display plane register */
5371 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
b24e7179 5372 POSTING_READ(DSPCNTR(plane));
79e53945 5373
94352cf9 5374 ret = intel_pipe_set_base(crtc, x, y, fb);
7662c8bd
SL
5375
5376 intel_update_watermarks(dev);
5377
1f8eeabf
ED
5378 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5379
01a415fd 5380 return fdi_config_ok ? ret : -EINVAL;
79e53945
JB
5381}
5382
09b4ddf9
PZ
5383static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5384 struct drm_display_mode *mode,
5385 struct drm_display_mode *adjusted_mode,
5386 int x, int y,
5387 struct drm_framebuffer *fb)
5388{
5389 struct drm_device *dev = crtc->dev;
5390 struct drm_i915_private *dev_priv = dev->dev_private;
5391 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5392 int pipe = intel_crtc->pipe;
5393 int plane = intel_crtc->plane;
5394 int num_connectors = 0;
ed7ef439 5395 bool is_dp = false, is_cpu_edp = false;
09b4ddf9 5396 struct intel_encoder *encoder;
09b4ddf9
PZ
5397 int ret;
5398 bool dither;
5399
5400 for_each_encoder_on_crtc(dev, crtc, encoder) {
5401 switch (encoder->type) {
09b4ddf9
PZ
5402 case INTEL_OUTPUT_DISPLAYPORT:
5403 is_dp = true;
5404 break;
5405 case INTEL_OUTPUT_EDP:
5406 is_dp = true;
5407 if (!intel_encoder_is_pch_edp(&encoder->base))
5408 is_cpu_edp = true;
5409 break;
5410 }
5411
5412 num_connectors++;
5413 }
5414
a5c961d1
PZ
5415 if (is_cpu_edp)
5416 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5417 else
5418 intel_crtc->cpu_transcoder = pipe;
5419
5dc5298b
PZ
5420 /* We are not sure yet this won't happen. */
5421 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5422 INTEL_PCH_TYPE(dev));
5423
5424 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5425 num_connectors, pipe_name(pipe));
5426
702e7a56 5427 WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
1ce42920
PZ
5428 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5429
5430 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5431
6441ab5f
PZ
5432 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5433 return -EINVAL;
5434
09b4ddf9
PZ
5435 /* Ensure that the cursor is valid for the new mode before changing... */
5436 intel_crtc_update_cursor(crtc, true);
5437
5438 /* determine panel color depth */
c8241969
JN
5439 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5440 adjusted_mode);
09b4ddf9 5441
09b4ddf9
PZ
5442 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5443 drm_mode_debug_printmodeline(mode);
5444
ed7ef439 5445 if (is_dp && !is_cpu_edp)
09b4ddf9 5446 intel_dp_set_m_n(crtc, mode, adjusted_mode);
09b4ddf9
PZ
5447
5448 intel_crtc->lowfreq_avail = false;
09b4ddf9
PZ
5449
5450 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5451
1eb8dfec
PZ
5452 if (!is_dp || is_cpu_edp)
5453 ironlake_set_m_n(crtc, mode, adjusted_mode);
09b4ddf9 5454
ee2b0b38 5455 haswell_set_pipeconf(crtc, adjusted_mode, dither);
09b4ddf9 5456
09b4ddf9
PZ
5457 /* Set up the display plane register */
5458 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5459 POSTING_READ(DSPCNTR(plane));
5460
5461 ret = intel_pipe_set_base(crtc, x, y, fb);
5462
5463 intel_update_watermarks(dev);
5464
5465 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5466
1f803ee5 5467 return ret;
79e53945
JB
5468}
5469
f564048e
EA
5470static int intel_crtc_mode_set(struct drm_crtc *crtc,
5471 struct drm_display_mode *mode,
5472 struct drm_display_mode *adjusted_mode,
5473 int x, int y,
94352cf9 5474 struct drm_framebuffer *fb)
f564048e
EA
5475{
5476 struct drm_device *dev = crtc->dev;
5477 struct drm_i915_private *dev_priv = dev->dev_private;
9256aa19
DV
5478 struct drm_encoder_helper_funcs *encoder_funcs;
5479 struct intel_encoder *encoder;
0b701d27
EA
5480 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5481 int pipe = intel_crtc->pipe;
f564048e
EA
5482 int ret;
5483
0b701d27 5484 drm_vblank_pre_modeset(dev, pipe);
7662c8bd 5485
f564048e 5486 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
94352cf9 5487 x, y, fb);
79e53945 5488 drm_vblank_post_modeset(dev, pipe);
5c3b82e2 5489
9256aa19
DV
5490 if (ret != 0)
5491 return ret;
5492
5493 for_each_encoder_on_crtc(dev, crtc, encoder) {
5494 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
5495 encoder->base.base.id,
5496 drm_get_encoder_name(&encoder->base),
5497 mode->base.id, mode->name);
5498 encoder_funcs = encoder->base.helper_private;
5499 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
5500 }
5501
5502 return 0;
79e53945
JB
5503}
5504
3a9627f4
WF
5505static bool intel_eld_uptodate(struct drm_connector *connector,
5506 int reg_eldv, uint32_t bits_eldv,
5507 int reg_elda, uint32_t bits_elda,
5508 int reg_edid)
5509{
5510 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5511 uint8_t *eld = connector->eld;
5512 uint32_t i;
5513
5514 i = I915_READ(reg_eldv);
5515 i &= bits_eldv;
5516
5517 if (!eld[0])
5518 return !i;
5519
5520 if (!i)
5521 return false;
5522
5523 i = I915_READ(reg_elda);
5524 i &= ~bits_elda;
5525 I915_WRITE(reg_elda, i);
5526
5527 for (i = 0; i < eld[2]; i++)
5528 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
5529 return false;
5530
5531 return true;
5532}
5533
e0dac65e
WF
5534static void g4x_write_eld(struct drm_connector *connector,
5535 struct drm_crtc *crtc)
5536{
5537 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5538 uint8_t *eld = connector->eld;
5539 uint32_t eldv;
5540 uint32_t len;
5541 uint32_t i;
5542
5543 i = I915_READ(G4X_AUD_VID_DID);
5544
5545 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
5546 eldv = G4X_ELDV_DEVCL_DEVBLC;
5547 else
5548 eldv = G4X_ELDV_DEVCTG;
5549
3a9627f4
WF
5550 if (intel_eld_uptodate(connector,
5551 G4X_AUD_CNTL_ST, eldv,
5552 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
5553 G4X_HDMIW_HDMIEDID))
5554 return;
5555
e0dac65e
WF
5556 i = I915_READ(G4X_AUD_CNTL_ST);
5557 i &= ~(eldv | G4X_ELD_ADDR);
5558 len = (i >> 9) & 0x1f; /* ELD buffer size */
5559 I915_WRITE(G4X_AUD_CNTL_ST, i);
5560
5561 if (!eld[0])
5562 return;
5563
5564 len = min_t(uint8_t, eld[2], len);
5565 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5566 for (i = 0; i < len; i++)
5567 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
5568
5569 i = I915_READ(G4X_AUD_CNTL_ST);
5570 i |= eldv;
5571 I915_WRITE(G4X_AUD_CNTL_ST, i);
5572}
5573
83358c85
WX
5574static void haswell_write_eld(struct drm_connector *connector,
5575 struct drm_crtc *crtc)
5576{
5577 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5578 uint8_t *eld = connector->eld;
5579 struct drm_device *dev = crtc->dev;
5580 uint32_t eldv;
5581 uint32_t i;
5582 int len;
5583 int pipe = to_intel_crtc(crtc)->pipe;
5584 int tmp;
5585
5586 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
5587 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
5588 int aud_config = HSW_AUD_CFG(pipe);
5589 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
5590
5591
5592 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
5593
5594 /* Audio output enable */
5595 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
5596 tmp = I915_READ(aud_cntrl_st2);
5597 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
5598 I915_WRITE(aud_cntrl_st2, tmp);
5599
5600 /* Wait for 1 vertical blank */
5601 intel_wait_for_vblank(dev, pipe);
5602
5603 /* Set ELD valid state */
5604 tmp = I915_READ(aud_cntrl_st2);
5605 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
5606 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
5607 I915_WRITE(aud_cntrl_st2, tmp);
5608 tmp = I915_READ(aud_cntrl_st2);
5609 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
5610
5611 /* Enable HDMI mode */
5612 tmp = I915_READ(aud_config);
5613 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
5614 /* clear N_programing_enable and N_value_index */
5615 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
5616 I915_WRITE(aud_config, tmp);
5617
5618 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
5619
5620 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
5621
5622 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5623 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5624 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
5625 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5626 } else
5627 I915_WRITE(aud_config, 0);
5628
5629 if (intel_eld_uptodate(connector,
5630 aud_cntrl_st2, eldv,
5631 aud_cntl_st, IBX_ELD_ADDRESS,
5632 hdmiw_hdmiedid))
5633 return;
5634
5635 i = I915_READ(aud_cntrl_st2);
5636 i &= ~eldv;
5637 I915_WRITE(aud_cntrl_st2, i);
5638
5639 if (!eld[0])
5640 return;
5641
5642 i = I915_READ(aud_cntl_st);
5643 i &= ~IBX_ELD_ADDRESS;
5644 I915_WRITE(aud_cntl_st, i);
5645 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
5646 DRM_DEBUG_DRIVER("port num:%d\n", i);
5647
5648 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
5649 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5650 for (i = 0; i < len; i++)
5651 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5652
5653 i = I915_READ(aud_cntrl_st2);
5654 i |= eldv;
5655 I915_WRITE(aud_cntrl_st2, i);
5656
5657}
5658
e0dac65e
WF
5659static void ironlake_write_eld(struct drm_connector *connector,
5660 struct drm_crtc *crtc)
5661{
5662 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5663 uint8_t *eld = connector->eld;
5664 uint32_t eldv;
5665 uint32_t i;
5666 int len;
5667 int hdmiw_hdmiedid;
b6daa025 5668 int aud_config;
e0dac65e
WF
5669 int aud_cntl_st;
5670 int aud_cntrl_st2;
9b138a83 5671 int pipe = to_intel_crtc(crtc)->pipe;
e0dac65e 5672
b3f33cbf 5673 if (HAS_PCH_IBX(connector->dev)) {
9b138a83
WX
5674 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
5675 aud_config = IBX_AUD_CFG(pipe);
5676 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
1202b4c6 5677 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
e0dac65e 5678 } else {
9b138a83
WX
5679 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
5680 aud_config = CPT_AUD_CFG(pipe);
5681 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
1202b4c6 5682 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
e0dac65e
WF
5683 }
5684
9b138a83 5685 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
e0dac65e
WF
5686
5687 i = I915_READ(aud_cntl_st);
9b138a83 5688 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
e0dac65e
WF
5689 if (!i) {
5690 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
5691 /* operate blindly on all ports */
1202b4c6
WF
5692 eldv = IBX_ELD_VALIDB;
5693 eldv |= IBX_ELD_VALIDB << 4;
5694 eldv |= IBX_ELD_VALIDB << 8;
e0dac65e
WF
5695 } else {
5696 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
1202b4c6 5697 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
e0dac65e
WF
5698 }
5699
3a9627f4
WF
5700 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5701 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5702 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
b6daa025
WF
5703 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5704 } else
5705 I915_WRITE(aud_config, 0);
e0dac65e 5706
3a9627f4
WF
5707 if (intel_eld_uptodate(connector,
5708 aud_cntrl_st2, eldv,
5709 aud_cntl_st, IBX_ELD_ADDRESS,
5710 hdmiw_hdmiedid))
5711 return;
5712
e0dac65e
WF
5713 i = I915_READ(aud_cntrl_st2);
5714 i &= ~eldv;
5715 I915_WRITE(aud_cntrl_st2, i);
5716
5717 if (!eld[0])
5718 return;
5719
e0dac65e 5720 i = I915_READ(aud_cntl_st);
1202b4c6 5721 i &= ~IBX_ELD_ADDRESS;
e0dac65e
WF
5722 I915_WRITE(aud_cntl_st, i);
5723
5724 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
5725 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5726 for (i = 0; i < len; i++)
5727 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5728
5729 i = I915_READ(aud_cntrl_st2);
5730 i |= eldv;
5731 I915_WRITE(aud_cntrl_st2, i);
5732}
5733
5734void intel_write_eld(struct drm_encoder *encoder,
5735 struct drm_display_mode *mode)
5736{
5737 struct drm_crtc *crtc = encoder->crtc;
5738 struct drm_connector *connector;
5739 struct drm_device *dev = encoder->dev;
5740 struct drm_i915_private *dev_priv = dev->dev_private;
5741
5742 connector = drm_select_eld(encoder, mode);
5743 if (!connector)
5744 return;
5745
5746 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5747 connector->base.id,
5748 drm_get_connector_name(connector),
5749 connector->encoder->base.id,
5750 drm_get_encoder_name(connector->encoder));
5751
5752 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
5753
5754 if (dev_priv->display.write_eld)
5755 dev_priv->display.write_eld(connector, crtc);
5756}
5757
79e53945
JB
5758/** Loads the palette/gamma unit for the CRTC with the prepared values */
5759void intel_crtc_load_lut(struct drm_crtc *crtc)
5760{
5761 struct drm_device *dev = crtc->dev;
5762 struct drm_i915_private *dev_priv = dev->dev_private;
5763 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9db4a9c7 5764 int palreg = PALETTE(intel_crtc->pipe);
79e53945
JB
5765 int i;
5766
5767 /* The clocks have to be on to load the palette. */
aed3f09d 5768 if (!crtc->enabled || !intel_crtc->active)
79e53945
JB
5769 return;
5770
f2b115e6 5771 /* use legacy palette for Ironlake */
bad720ff 5772 if (HAS_PCH_SPLIT(dev))
9db4a9c7 5773 palreg = LGC_PALETTE(intel_crtc->pipe);
2c07245f 5774
79e53945
JB
5775 for (i = 0; i < 256; i++) {
5776 I915_WRITE(palreg + 4 * i,
5777 (intel_crtc->lut_r[i] << 16) |
5778 (intel_crtc->lut_g[i] << 8) |
5779 intel_crtc->lut_b[i]);
5780 }
5781}
5782
560b85bb
CW
5783static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
5784{
5785 struct drm_device *dev = crtc->dev;
5786 struct drm_i915_private *dev_priv = dev->dev_private;
5787 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5788 bool visible = base != 0;
5789 u32 cntl;
5790
5791 if (intel_crtc->cursor_visible == visible)
5792 return;
5793
9db4a9c7 5794 cntl = I915_READ(_CURACNTR);
560b85bb
CW
5795 if (visible) {
5796 /* On these chipsets we can only modify the base whilst
5797 * the cursor is disabled.
5798 */
9db4a9c7 5799 I915_WRITE(_CURABASE, base);
560b85bb
CW
5800
5801 cntl &= ~(CURSOR_FORMAT_MASK);
5802 /* XXX width must be 64, stride 256 => 0x00 << 28 */
5803 cntl |= CURSOR_ENABLE |
5804 CURSOR_GAMMA_ENABLE |
5805 CURSOR_FORMAT_ARGB;
5806 } else
5807 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
9db4a9c7 5808 I915_WRITE(_CURACNTR, cntl);
560b85bb
CW
5809
5810 intel_crtc->cursor_visible = visible;
5811}
5812
5813static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
5814{
5815 struct drm_device *dev = crtc->dev;
5816 struct drm_i915_private *dev_priv = dev->dev_private;
5817 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5818 int pipe = intel_crtc->pipe;
5819 bool visible = base != 0;
5820
5821 if (intel_crtc->cursor_visible != visible) {
548f245b 5822 uint32_t cntl = I915_READ(CURCNTR(pipe));
560b85bb
CW
5823 if (base) {
5824 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
5825 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5826 cntl |= pipe << 28; /* Connect to correct pipe */
5827 } else {
5828 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5829 cntl |= CURSOR_MODE_DISABLE;
5830 }
9db4a9c7 5831 I915_WRITE(CURCNTR(pipe), cntl);
560b85bb
CW
5832
5833 intel_crtc->cursor_visible = visible;
5834 }
5835 /* and commit changes on next vblank */
9db4a9c7 5836 I915_WRITE(CURBASE(pipe), base);
560b85bb
CW
5837}
5838
65a21cd6
JB
5839static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
5840{
5841 struct drm_device *dev = crtc->dev;
5842 struct drm_i915_private *dev_priv = dev->dev_private;
5843 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5844 int pipe = intel_crtc->pipe;
5845 bool visible = base != 0;
5846
5847 if (intel_crtc->cursor_visible != visible) {
5848 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
5849 if (base) {
5850 cntl &= ~CURSOR_MODE;
5851 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5852 } else {
5853 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5854 cntl |= CURSOR_MODE_DISABLE;
5855 }
5856 I915_WRITE(CURCNTR_IVB(pipe), cntl);
5857
5858 intel_crtc->cursor_visible = visible;
5859 }
5860 /* and commit changes on next vblank */
5861 I915_WRITE(CURBASE_IVB(pipe), base);
5862}
5863
cda4b7d3 5864/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f
CW
5865static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5866 bool on)
cda4b7d3
CW
5867{
5868 struct drm_device *dev = crtc->dev;
5869 struct drm_i915_private *dev_priv = dev->dev_private;
5870 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5871 int pipe = intel_crtc->pipe;
5872 int x = intel_crtc->cursor_x;
5873 int y = intel_crtc->cursor_y;
560b85bb 5874 u32 base, pos;
cda4b7d3
CW
5875 bool visible;
5876
5877 pos = 0;
5878
6b383a7f 5879 if (on && crtc->enabled && crtc->fb) {
cda4b7d3
CW
5880 base = intel_crtc->cursor_addr;
5881 if (x > (int) crtc->fb->width)
5882 base = 0;
5883
5884 if (y > (int) crtc->fb->height)
5885 base = 0;
5886 } else
5887 base = 0;
5888
5889 if (x < 0) {
5890 if (x + intel_crtc->cursor_width < 0)
5891 base = 0;
5892
5893 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5894 x = -x;
5895 }
5896 pos |= x << CURSOR_X_SHIFT;
5897
5898 if (y < 0) {
5899 if (y + intel_crtc->cursor_height < 0)
5900 base = 0;
5901
5902 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5903 y = -y;
5904 }
5905 pos |= y << CURSOR_Y_SHIFT;
5906
5907 visible = base != 0;
560b85bb 5908 if (!visible && !intel_crtc->cursor_visible)
cda4b7d3
CW
5909 return;
5910
0cd83aa9 5911 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
65a21cd6
JB
5912 I915_WRITE(CURPOS_IVB(pipe), pos);
5913 ivb_update_cursor(crtc, base);
5914 } else {
5915 I915_WRITE(CURPOS(pipe), pos);
5916 if (IS_845G(dev) || IS_I865G(dev))
5917 i845_update_cursor(crtc, base);
5918 else
5919 i9xx_update_cursor(crtc, base);
5920 }
cda4b7d3
CW
5921}
5922
79e53945 5923static int intel_crtc_cursor_set(struct drm_crtc *crtc,
05394f39 5924 struct drm_file *file,
79e53945
JB
5925 uint32_t handle,
5926 uint32_t width, uint32_t height)
5927{
5928 struct drm_device *dev = crtc->dev;
5929 struct drm_i915_private *dev_priv = dev->dev_private;
5930 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 5931 struct drm_i915_gem_object *obj;
cda4b7d3 5932 uint32_t addr;
3f8bc370 5933 int ret;
79e53945 5934
79e53945
JB
5935 /* if we want to turn off the cursor ignore width and height */
5936 if (!handle) {
28c97730 5937 DRM_DEBUG_KMS("cursor off\n");
3f8bc370 5938 addr = 0;
05394f39 5939 obj = NULL;
5004417d 5940 mutex_lock(&dev->struct_mutex);
3f8bc370 5941 goto finish;
79e53945
JB
5942 }
5943
5944 /* Currently we only support 64x64 cursors */
5945 if (width != 64 || height != 64) {
5946 DRM_ERROR("we currently only support 64x64 cursors\n");
5947 return -EINVAL;
5948 }
5949
05394f39 5950 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 5951 if (&obj->base == NULL)
79e53945
JB
5952 return -ENOENT;
5953
05394f39 5954 if (obj->base.size < width * height * 4) {
79e53945 5955 DRM_ERROR("buffer is to small\n");
34b8686e
DA
5956 ret = -ENOMEM;
5957 goto fail;
79e53945
JB
5958 }
5959
71acb5eb 5960 /* we only need to pin inside GTT if cursor is non-phy */
7f9872e0 5961 mutex_lock(&dev->struct_mutex);
b295d1b6 5962 if (!dev_priv->info->cursor_needs_physical) {
d9e86c0e
CW
5963 if (obj->tiling_mode) {
5964 DRM_ERROR("cursor cannot be tiled\n");
5965 ret = -EINVAL;
5966 goto fail_locked;
5967 }
5968
2da3b9b9 5969 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
e7b526bb
CW
5970 if (ret) {
5971 DRM_ERROR("failed to move cursor bo into the GTT\n");
2da3b9b9 5972 goto fail_locked;
e7b526bb
CW
5973 }
5974
d9e86c0e
CW
5975 ret = i915_gem_object_put_fence(obj);
5976 if (ret) {
2da3b9b9 5977 DRM_ERROR("failed to release fence for cursor");
d9e86c0e
CW
5978 goto fail_unpin;
5979 }
5980
05394f39 5981 addr = obj->gtt_offset;
71acb5eb 5982 } else {
6eeefaf3 5983 int align = IS_I830(dev) ? 16 * 1024 : 256;
05394f39 5984 ret = i915_gem_attach_phys_object(dev, obj,
6eeefaf3
CW
5985 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5986 align);
71acb5eb
DA
5987 if (ret) {
5988 DRM_ERROR("failed to attach phys object\n");
7f9872e0 5989 goto fail_locked;
71acb5eb 5990 }
05394f39 5991 addr = obj->phys_obj->handle->busaddr;
3f8bc370
KH
5992 }
5993
a6c45cf0 5994 if (IS_GEN2(dev))
14b60391
JB
5995 I915_WRITE(CURSIZE, (height << 12) | width);
5996
3f8bc370 5997 finish:
3f8bc370 5998 if (intel_crtc->cursor_bo) {
b295d1b6 5999 if (dev_priv->info->cursor_needs_physical) {
05394f39 6000 if (intel_crtc->cursor_bo != obj)
71acb5eb
DA
6001 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6002 } else
6003 i915_gem_object_unpin(intel_crtc->cursor_bo);
05394f39 6004 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
3f8bc370 6005 }
80824003 6006
7f9872e0 6007 mutex_unlock(&dev->struct_mutex);
3f8bc370
KH
6008
6009 intel_crtc->cursor_addr = addr;
05394f39 6010 intel_crtc->cursor_bo = obj;
cda4b7d3
CW
6011 intel_crtc->cursor_width = width;
6012 intel_crtc->cursor_height = height;
6013
6b383a7f 6014 intel_crtc_update_cursor(crtc, true);
3f8bc370 6015
79e53945 6016 return 0;
e7b526bb 6017fail_unpin:
05394f39 6018 i915_gem_object_unpin(obj);
7f9872e0 6019fail_locked:
34b8686e 6020 mutex_unlock(&dev->struct_mutex);
bc9025bd 6021fail:
05394f39 6022 drm_gem_object_unreference_unlocked(&obj->base);
34b8686e 6023 return ret;
79e53945
JB
6024}
6025
6026static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6027{
79e53945 6028 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 6029
cda4b7d3
CW
6030 intel_crtc->cursor_x = x;
6031 intel_crtc->cursor_y = y;
652c393a 6032
6b383a7f 6033 intel_crtc_update_cursor(crtc, true);
79e53945
JB
6034
6035 return 0;
6036}
6037
6038/** Sets the color ramps on behalf of RandR */
6039void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6040 u16 blue, int regno)
6041{
6042 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6043
6044 intel_crtc->lut_r[regno] = red >> 8;
6045 intel_crtc->lut_g[regno] = green >> 8;
6046 intel_crtc->lut_b[regno] = blue >> 8;
6047}
6048
b8c00ac5
DA
6049void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6050 u16 *blue, int regno)
6051{
6052 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6053
6054 *red = intel_crtc->lut_r[regno] << 8;
6055 *green = intel_crtc->lut_g[regno] << 8;
6056 *blue = intel_crtc->lut_b[regno] << 8;
6057}
6058
79e53945 6059static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7203425a 6060 u16 *blue, uint32_t start, uint32_t size)
79e53945 6061{
7203425a 6062 int end = (start + size > 256) ? 256 : start + size, i;
79e53945 6063 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 6064
7203425a 6065 for (i = start; i < end; i++) {
79e53945
JB
6066 intel_crtc->lut_r[i] = red[i] >> 8;
6067 intel_crtc->lut_g[i] = green[i] >> 8;
6068 intel_crtc->lut_b[i] = blue[i] >> 8;
6069 }
6070
6071 intel_crtc_load_lut(crtc);
6072}
6073
6074/**
6075 * Get a pipe with a simple mode set on it for doing load-based monitor
6076 * detection.
6077 *
6078 * It will be up to the load-detect code to adjust the pipe as appropriate for
c751ce4f 6079 * its requirements. The pipe will be connected to no other encoders.
79e53945 6080 *
c751ce4f 6081 * Currently this code will only succeed if there is a pipe with no encoders
79e53945
JB
6082 * configured for it. In the future, it could choose to temporarily disable
6083 * some outputs to free up a pipe for its use.
6084 *
6085 * \return crtc, or NULL if no pipes are available.
6086 */
6087
6088/* VESA 640x480x72Hz mode to set on the pipe */
6089static struct drm_display_mode load_detect_mode = {
6090 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6091 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6092};
6093
d2dff872
CW
6094static struct drm_framebuffer *
6095intel_framebuffer_create(struct drm_device *dev,
308e5bcb 6096 struct drm_mode_fb_cmd2 *mode_cmd,
d2dff872
CW
6097 struct drm_i915_gem_object *obj)
6098{
6099 struct intel_framebuffer *intel_fb;
6100 int ret;
6101
6102 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6103 if (!intel_fb) {
6104 drm_gem_object_unreference_unlocked(&obj->base);
6105 return ERR_PTR(-ENOMEM);
6106 }
6107
6108 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6109 if (ret) {
6110 drm_gem_object_unreference_unlocked(&obj->base);
6111 kfree(intel_fb);
6112 return ERR_PTR(ret);
6113 }
6114
6115 return &intel_fb->base;
6116}
6117
6118static u32
6119intel_framebuffer_pitch_for_width(int width, int bpp)
6120{
6121 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6122 return ALIGN(pitch, 64);
6123}
6124
6125static u32
6126intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6127{
6128 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6129 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6130}
6131
6132static struct drm_framebuffer *
6133intel_framebuffer_create_for_mode(struct drm_device *dev,
6134 struct drm_display_mode *mode,
6135 int depth, int bpp)
6136{
6137 struct drm_i915_gem_object *obj;
0fed39bd 6138 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
d2dff872
CW
6139
6140 obj = i915_gem_alloc_object(dev,
6141 intel_framebuffer_size_for_mode(mode, bpp));
6142 if (obj == NULL)
6143 return ERR_PTR(-ENOMEM);
6144
6145 mode_cmd.width = mode->hdisplay;
6146 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
6147 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6148 bpp);
5ca0c34a 6149 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872
CW
6150
6151 return intel_framebuffer_create(dev, &mode_cmd, obj);
6152}
6153
6154static struct drm_framebuffer *
6155mode_fits_in_fbdev(struct drm_device *dev,
6156 struct drm_display_mode *mode)
6157{
6158 struct drm_i915_private *dev_priv = dev->dev_private;
6159 struct drm_i915_gem_object *obj;
6160 struct drm_framebuffer *fb;
6161
6162 if (dev_priv->fbdev == NULL)
6163 return NULL;
6164
6165 obj = dev_priv->fbdev->ifb.obj;
6166 if (obj == NULL)
6167 return NULL;
6168
6169 fb = &dev_priv->fbdev->ifb.base;
01f2c773
VS
6170 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6171 fb->bits_per_pixel))
d2dff872
CW
6172 return NULL;
6173
01f2c773 6174 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
6175 return NULL;
6176
6177 return fb;
6178}
6179
d2434ab7 6180bool intel_get_load_detect_pipe(struct drm_connector *connector,
7173188d 6181 struct drm_display_mode *mode,
8261b191 6182 struct intel_load_detect_pipe *old)
79e53945
JB
6183{
6184 struct intel_crtc *intel_crtc;
d2434ab7
DV
6185 struct intel_encoder *intel_encoder =
6186 intel_attached_encoder(connector);
79e53945 6187 struct drm_crtc *possible_crtc;
4ef69c7a 6188 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
6189 struct drm_crtc *crtc = NULL;
6190 struct drm_device *dev = encoder->dev;
94352cf9 6191 struct drm_framebuffer *fb;
79e53945
JB
6192 int i = -1;
6193
d2dff872
CW
6194 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6195 connector->base.id, drm_get_connector_name(connector),
6196 encoder->base.id, drm_get_encoder_name(encoder));
6197
79e53945
JB
6198 /*
6199 * Algorithm gets a little messy:
7a5e4805 6200 *
79e53945
JB
6201 * - if the connector already has an assigned crtc, use it (but make
6202 * sure it's on first)
7a5e4805 6203 *
79e53945
JB
6204 * - try to find the first unused crtc that can drive this connector,
6205 * and use that if we find one
79e53945
JB
6206 */
6207
6208 /* See if we already have a CRTC for this connector */
6209 if (encoder->crtc) {
6210 crtc = encoder->crtc;
8261b191 6211
24218aac 6212 old->dpms_mode = connector->dpms;
8261b191
CW
6213 old->load_detect_temp = false;
6214
6215 /* Make sure the crtc and connector are running */
24218aac
DV
6216 if (connector->dpms != DRM_MODE_DPMS_ON)
6217 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8261b191 6218
7173188d 6219 return true;
79e53945
JB
6220 }
6221
6222 /* Find an unused one (if possible) */
6223 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6224 i++;
6225 if (!(encoder->possible_crtcs & (1 << i)))
6226 continue;
6227 if (!possible_crtc->enabled) {
6228 crtc = possible_crtc;
6229 break;
6230 }
79e53945
JB
6231 }
6232
6233 /*
6234 * If we didn't find an unused CRTC, don't use any.
6235 */
6236 if (!crtc) {
7173188d
CW
6237 DRM_DEBUG_KMS("no pipe available for load-detect\n");
6238 return false;
79e53945
JB
6239 }
6240
fc303101
DV
6241 intel_encoder->new_crtc = to_intel_crtc(crtc);
6242 to_intel_connector(connector)->new_encoder = intel_encoder;
79e53945
JB
6243
6244 intel_crtc = to_intel_crtc(crtc);
24218aac 6245 old->dpms_mode = connector->dpms;
8261b191 6246 old->load_detect_temp = true;
d2dff872 6247 old->release_fb = NULL;
79e53945 6248
6492711d
CW
6249 if (!mode)
6250 mode = &load_detect_mode;
79e53945 6251
d2dff872
CW
6252 /* We need a framebuffer large enough to accommodate all accesses
6253 * that the plane may generate whilst we perform load detection.
6254 * We can not rely on the fbcon either being present (we get called
6255 * during its initialisation to detect all boot displays, or it may
6256 * not even exist) or that it is large enough to satisfy the
6257 * requested mode.
6258 */
94352cf9
DV
6259 fb = mode_fits_in_fbdev(dev, mode);
6260 if (fb == NULL) {
d2dff872 6261 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
94352cf9
DV
6262 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6263 old->release_fb = fb;
d2dff872
CW
6264 } else
6265 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
94352cf9 6266 if (IS_ERR(fb)) {
d2dff872 6267 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
0e8b3d3e 6268 return false;
79e53945 6269 }
79e53945 6270
94352cf9 6271 if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
6492711d 6272 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
d2dff872
CW
6273 if (old->release_fb)
6274 old->release_fb->funcs->destroy(old->release_fb);
0e8b3d3e 6275 return false;
79e53945 6276 }
7173188d 6277
79e53945 6278 /* let the connector get through one full cycle before testing */
9d0498a2 6279 intel_wait_for_vblank(dev, intel_crtc->pipe);
7173188d 6280 return true;
79e53945
JB
6281}
6282
d2434ab7 6283void intel_release_load_detect_pipe(struct drm_connector *connector,
8261b191 6284 struct intel_load_detect_pipe *old)
79e53945 6285{
d2434ab7
DV
6286 struct intel_encoder *intel_encoder =
6287 intel_attached_encoder(connector);
4ef69c7a 6288 struct drm_encoder *encoder = &intel_encoder->base;
79e53945 6289
d2dff872
CW
6290 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6291 connector->base.id, drm_get_connector_name(connector),
6292 encoder->base.id, drm_get_encoder_name(encoder));
6293
8261b191 6294 if (old->load_detect_temp) {
fc303101
DV
6295 struct drm_crtc *crtc = encoder->crtc;
6296
6297 to_intel_connector(connector)->new_encoder = NULL;
6298 intel_encoder->new_crtc = NULL;
6299 intel_set_mode(crtc, NULL, 0, 0, NULL);
d2dff872
CW
6300
6301 if (old->release_fb)
6302 old->release_fb->funcs->destroy(old->release_fb);
6303
0622a53c 6304 return;
79e53945
JB
6305 }
6306
c751ce4f 6307 /* Switch crtc and encoder back off if necessary */
24218aac
DV
6308 if (old->dpms_mode != DRM_MODE_DPMS_ON)
6309 connector->funcs->dpms(connector, old->dpms_mode);
79e53945
JB
6310}
6311
6312/* Returns the clock of the currently programmed mode of the given pipe. */
6313static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6314{
6315 struct drm_i915_private *dev_priv = dev->dev_private;
6316 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6317 int pipe = intel_crtc->pipe;
548f245b 6318 u32 dpll = I915_READ(DPLL(pipe));
79e53945
JB
6319 u32 fp;
6320 intel_clock_t clock;
6321
6322 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
39adb7a5 6323 fp = I915_READ(FP0(pipe));
79e53945 6324 else
39adb7a5 6325 fp = I915_READ(FP1(pipe));
79e53945
JB
6326
6327 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
6328 if (IS_PINEVIEW(dev)) {
6329 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6330 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
6331 } else {
6332 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6333 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6334 }
6335
a6c45cf0 6336 if (!IS_GEN2(dev)) {
f2b115e6
AJ
6337 if (IS_PINEVIEW(dev))
6338 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6339 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
6340 else
6341 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
6342 DPLL_FPA01_P1_POST_DIV_SHIFT);
6343
6344 switch (dpll & DPLL_MODE_MASK) {
6345 case DPLLB_MODE_DAC_SERIAL:
6346 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6347 5 : 10;
6348 break;
6349 case DPLLB_MODE_LVDS:
6350 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6351 7 : 14;
6352 break;
6353 default:
28c97730 6354 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945
JB
6355 "mode\n", (int)(dpll & DPLL_MODE_MASK));
6356 return 0;
6357 }
6358
6359 /* XXX: Handle the 100Mhz refclk */
2177832f 6360 intel_clock(dev, 96000, &clock);
79e53945
JB
6361 } else {
6362 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6363
6364 if (is_lvds) {
6365 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6366 DPLL_FPA01_P1_POST_DIV_SHIFT);
6367 clock.p2 = 14;
6368
6369 if ((dpll & PLL_REF_INPUT_MASK) ==
6370 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6371 /* XXX: might not be 66MHz */
2177832f 6372 intel_clock(dev, 66000, &clock);
79e53945 6373 } else
2177832f 6374 intel_clock(dev, 48000, &clock);
79e53945
JB
6375 } else {
6376 if (dpll & PLL_P1_DIVIDE_BY_TWO)
6377 clock.p1 = 2;
6378 else {
6379 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6380 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6381 }
6382 if (dpll & PLL_P2_DIVIDE_BY_4)
6383 clock.p2 = 4;
6384 else
6385 clock.p2 = 2;
6386
2177832f 6387 intel_clock(dev, 48000, &clock);
79e53945
JB
6388 }
6389 }
6390
6391 /* XXX: It would be nice to validate the clocks, but we can't reuse
6392 * i830PllIsValid() because it relies on the xf86_config connector
6393 * configuration being accurate, which it isn't necessarily.
6394 */
6395
6396 return clock.dot;
6397}
6398
6399/** Returns the currently programmed mode of the given pipe. */
6400struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6401 struct drm_crtc *crtc)
6402{
548f245b 6403 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 6404 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
fe2b8f9d 6405 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
79e53945 6406 struct drm_display_mode *mode;
fe2b8f9d
PZ
6407 int htot = I915_READ(HTOTAL(cpu_transcoder));
6408 int hsync = I915_READ(HSYNC(cpu_transcoder));
6409 int vtot = I915_READ(VTOTAL(cpu_transcoder));
6410 int vsync = I915_READ(VSYNC(cpu_transcoder));
79e53945
JB
6411
6412 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6413 if (!mode)
6414 return NULL;
6415
6416 mode->clock = intel_crtc_clock_get(dev, crtc);
6417 mode->hdisplay = (htot & 0xffff) + 1;
6418 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6419 mode->hsync_start = (hsync & 0xffff) + 1;
6420 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6421 mode->vdisplay = (vtot & 0xffff) + 1;
6422 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6423 mode->vsync_start = (vsync & 0xffff) + 1;
6424 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6425
6426 drm_mode_set_name(mode);
79e53945
JB
6427
6428 return mode;
6429}
6430
3dec0095 6431static void intel_increase_pllclock(struct drm_crtc *crtc)
652c393a
JB
6432{
6433 struct drm_device *dev = crtc->dev;
6434 drm_i915_private_t *dev_priv = dev->dev_private;
6435 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6436 int pipe = intel_crtc->pipe;
dbdc6479
JB
6437 int dpll_reg = DPLL(pipe);
6438 int dpll;
652c393a 6439
bad720ff 6440 if (HAS_PCH_SPLIT(dev))
652c393a
JB
6441 return;
6442
6443 if (!dev_priv->lvds_downclock_avail)
6444 return;
6445
dbdc6479 6446 dpll = I915_READ(dpll_reg);
652c393a 6447 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
44d98a61 6448 DRM_DEBUG_DRIVER("upclocking LVDS\n");
652c393a 6449
8ac5a6d5 6450 assert_panel_unlocked(dev_priv, pipe);
652c393a
JB
6451
6452 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6453 I915_WRITE(dpll_reg, dpll);
9d0498a2 6454 intel_wait_for_vblank(dev, pipe);
dbdc6479 6455
652c393a
JB
6456 dpll = I915_READ(dpll_reg);
6457 if (dpll & DISPLAY_RATE_SELECT_FPA1)
44d98a61 6458 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
652c393a 6459 }
652c393a
JB
6460}
6461
6462static void intel_decrease_pllclock(struct drm_crtc *crtc)
6463{
6464 struct drm_device *dev = crtc->dev;
6465 drm_i915_private_t *dev_priv = dev->dev_private;
6466 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
652c393a 6467
bad720ff 6468 if (HAS_PCH_SPLIT(dev))
652c393a
JB
6469 return;
6470
6471 if (!dev_priv->lvds_downclock_avail)
6472 return;
6473
6474 /*
6475 * Since this is called by a timer, we should never get here in
6476 * the manual case.
6477 */
6478 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
dc257cf1
DV
6479 int pipe = intel_crtc->pipe;
6480 int dpll_reg = DPLL(pipe);
6481 int dpll;
f6e5b160 6482
44d98a61 6483 DRM_DEBUG_DRIVER("downclocking LVDS\n");
652c393a 6484
8ac5a6d5 6485 assert_panel_unlocked(dev_priv, pipe);
652c393a 6486
dc257cf1 6487 dpll = I915_READ(dpll_reg);
652c393a
JB
6488 dpll |= DISPLAY_RATE_SELECT_FPA1;
6489 I915_WRITE(dpll_reg, dpll);
9d0498a2 6490 intel_wait_for_vblank(dev, pipe);
652c393a
JB
6491 dpll = I915_READ(dpll_reg);
6492 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
44d98a61 6493 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
652c393a
JB
6494 }
6495
6496}
6497
f047e395
CW
6498void intel_mark_busy(struct drm_device *dev)
6499{
f047e395
CW
6500 i915_update_gfx_val(dev->dev_private);
6501}
6502
6503void intel_mark_idle(struct drm_device *dev)
652c393a 6504{
f047e395
CW
6505}
6506
6507void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
6508{
6509 struct drm_device *dev = obj->base.dev;
652c393a 6510 struct drm_crtc *crtc;
652c393a
JB
6511
6512 if (!i915_powersave)
6513 return;
6514
652c393a 6515 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652c393a
JB
6516 if (!crtc->fb)
6517 continue;
6518
f047e395
CW
6519 if (to_intel_framebuffer(crtc->fb)->obj == obj)
6520 intel_increase_pllclock(crtc);
652c393a 6521 }
652c393a
JB
6522}
6523
f047e395 6524void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
652c393a 6525{
f047e395
CW
6526 struct drm_device *dev = obj->base.dev;
6527 struct drm_crtc *crtc;
652c393a 6528
f047e395 6529 if (!i915_powersave)
acb87dfb
CW
6530 return;
6531
652c393a
JB
6532 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6533 if (!crtc->fb)
6534 continue;
6535
f047e395
CW
6536 if (to_intel_framebuffer(crtc->fb)->obj == obj)
6537 intel_decrease_pllclock(crtc);
652c393a
JB
6538 }
6539}
6540
79e53945
JB
6541static void intel_crtc_destroy(struct drm_crtc *crtc)
6542{
6543 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a
DV
6544 struct drm_device *dev = crtc->dev;
6545 struct intel_unpin_work *work;
6546 unsigned long flags;
6547
6548 spin_lock_irqsave(&dev->event_lock, flags);
6549 work = intel_crtc->unpin_work;
6550 intel_crtc->unpin_work = NULL;
6551 spin_unlock_irqrestore(&dev->event_lock, flags);
6552
6553 if (work) {
6554 cancel_work_sync(&work->work);
6555 kfree(work);
6556 }
79e53945
JB
6557
6558 drm_crtc_cleanup(crtc);
67e77c5a 6559
79e53945
JB
6560 kfree(intel_crtc);
6561}
6562
6b95a207
KH
6563static void intel_unpin_work_fn(struct work_struct *__work)
6564{
6565 struct intel_unpin_work *work =
6566 container_of(__work, struct intel_unpin_work, work);
b4a98e57 6567 struct drm_device *dev = work->crtc->dev;
6b95a207 6568
b4a98e57 6569 mutex_lock(&dev->struct_mutex);
1690e1eb 6570 intel_unpin_fb_obj(work->old_fb_obj);
05394f39
CW
6571 drm_gem_object_unreference(&work->pending_flip_obj->base);
6572 drm_gem_object_unreference(&work->old_fb_obj->base);
d9e86c0e 6573
b4a98e57
CW
6574 intel_update_fbc(dev);
6575 mutex_unlock(&dev->struct_mutex);
6576
6577 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
6578 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
6579
6b95a207
KH
6580 kfree(work);
6581}
6582
1afe3e9d 6583static void do_intel_finish_page_flip(struct drm_device *dev,
49b14a5c 6584 struct drm_crtc *crtc)
6b95a207
KH
6585{
6586 drm_i915_private_t *dev_priv = dev->dev_private;
6b95a207
KH
6587 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6588 struct intel_unpin_work *work;
05394f39 6589 struct drm_i915_gem_object *obj;
6b95a207
KH
6590 unsigned long flags;
6591
6592 /* Ignore early vblank irqs */
6593 if (intel_crtc == NULL)
6594 return;
6595
6596 spin_lock_irqsave(&dev->event_lock, flags);
6597 work = intel_crtc->unpin_work;
6598 if (work == NULL || !work->pending) {
6599 spin_unlock_irqrestore(&dev->event_lock, flags);
6600 return;
6601 }
6602
6603 intel_crtc->unpin_work = NULL;
6b95a207 6604
45a066eb
RC
6605 if (work->event)
6606 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
6b95a207 6607
0af7e4df
MK
6608 drm_vblank_put(dev, intel_crtc->pipe);
6609
6b95a207
KH
6610 spin_unlock_irqrestore(&dev->event_lock, flags);
6611
05394f39 6612 obj = work->old_fb_obj;
d9e86c0e 6613
5bb61643 6614 wake_up(&dev_priv->pending_flip_queue);
b4a98e57
CW
6615
6616 queue_work(dev_priv->wq, &work->work);
e5510fac
JB
6617
6618 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6b95a207
KH
6619}
6620
1afe3e9d
JB
6621void intel_finish_page_flip(struct drm_device *dev, int pipe)
6622{
6623 drm_i915_private_t *dev_priv = dev->dev_private;
6624 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
6625
49b14a5c 6626 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
6627}
6628
6629void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
6630{
6631 drm_i915_private_t *dev_priv = dev->dev_private;
6632 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
6633
49b14a5c 6634 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
6635}
6636
6b95a207
KH
6637void intel_prepare_page_flip(struct drm_device *dev, int plane)
6638{
6639 drm_i915_private_t *dev_priv = dev->dev_private;
6640 struct intel_crtc *intel_crtc =
6641 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6642 unsigned long flags;
6643
6644 spin_lock_irqsave(&dev->event_lock, flags);
de3f440f 6645 if (intel_crtc->unpin_work) {
4e5359cd
SF
6646 if ((++intel_crtc->unpin_work->pending) > 1)
6647 DRM_ERROR("Prepared flip multiple times\n");
de3f440f
JB
6648 } else {
6649 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6650 }
6b95a207
KH
6651 spin_unlock_irqrestore(&dev->event_lock, flags);
6652}
6653
8c9f3aaf
JB
6654static int intel_gen2_queue_flip(struct drm_device *dev,
6655 struct drm_crtc *crtc,
6656 struct drm_framebuffer *fb,
6657 struct drm_i915_gem_object *obj)
6658{
6659 struct drm_i915_private *dev_priv = dev->dev_private;
6660 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf 6661 u32 flip_mask;
6d90c952 6662 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6663 int ret;
6664
6d90c952 6665 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6666 if (ret)
83d4092b 6667 goto err;
8c9f3aaf 6668
6d90c952 6669 ret = intel_ring_begin(ring, 6);
8c9f3aaf 6670 if (ret)
83d4092b 6671 goto err_unpin;
8c9f3aaf
JB
6672
6673 /* Can't queue multiple flips, so wait for the previous
6674 * one to finish before executing the next.
6675 */
6676 if (intel_crtc->plane)
6677 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6678 else
6679 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
6680 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6681 intel_ring_emit(ring, MI_NOOP);
6682 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6683 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6684 intel_ring_emit(ring, fb->pitches[0]);
e506a0c6 6685 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6d90c952
DV
6686 intel_ring_emit(ring, 0); /* aux display base address, unused */
6687 intel_ring_advance(ring);
83d4092b
CW
6688 return 0;
6689
6690err_unpin:
6691 intel_unpin_fb_obj(obj);
6692err:
8c9f3aaf
JB
6693 return ret;
6694}
6695
6696static int intel_gen3_queue_flip(struct drm_device *dev,
6697 struct drm_crtc *crtc,
6698 struct drm_framebuffer *fb,
6699 struct drm_i915_gem_object *obj)
6700{
6701 struct drm_i915_private *dev_priv = dev->dev_private;
6702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf 6703 u32 flip_mask;
6d90c952 6704 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6705 int ret;
6706
6d90c952 6707 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6708 if (ret)
83d4092b 6709 goto err;
8c9f3aaf 6710
6d90c952 6711 ret = intel_ring_begin(ring, 6);
8c9f3aaf 6712 if (ret)
83d4092b 6713 goto err_unpin;
8c9f3aaf
JB
6714
6715 if (intel_crtc->plane)
6716 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6717 else
6718 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
6719 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6720 intel_ring_emit(ring, MI_NOOP);
6721 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
6722 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6723 intel_ring_emit(ring, fb->pitches[0]);
e506a0c6 6724 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6d90c952
DV
6725 intel_ring_emit(ring, MI_NOOP);
6726
6727 intel_ring_advance(ring);
83d4092b
CW
6728 return 0;
6729
6730err_unpin:
6731 intel_unpin_fb_obj(obj);
6732err:
8c9f3aaf
JB
6733 return ret;
6734}
6735
6736static int intel_gen4_queue_flip(struct drm_device *dev,
6737 struct drm_crtc *crtc,
6738 struct drm_framebuffer *fb,
6739 struct drm_i915_gem_object *obj)
6740{
6741 struct drm_i915_private *dev_priv = dev->dev_private;
6742 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6743 uint32_t pf, pipesrc;
6d90c952 6744 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6745 int ret;
6746
6d90c952 6747 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6748 if (ret)
83d4092b 6749 goto err;
8c9f3aaf 6750
6d90c952 6751 ret = intel_ring_begin(ring, 4);
8c9f3aaf 6752 if (ret)
83d4092b 6753 goto err_unpin;
8c9f3aaf
JB
6754
6755 /* i965+ uses the linear or tiled offsets from the
6756 * Display Registers (which do not change across a page-flip)
6757 * so we need only reprogram the base address.
6758 */
6d90c952
DV
6759 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6760 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6761 intel_ring_emit(ring, fb->pitches[0]);
c2c75131
DV
6762 intel_ring_emit(ring,
6763 (obj->gtt_offset + intel_crtc->dspaddr_offset) |
6764 obj->tiling_mode);
8c9f3aaf
JB
6765
6766 /* XXX Enabling the panel-fitter across page-flip is so far
6767 * untested on non-native modes, so ignore it for now.
6768 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6769 */
6770 pf = 0;
6771 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952
DV
6772 intel_ring_emit(ring, pf | pipesrc);
6773 intel_ring_advance(ring);
83d4092b
CW
6774 return 0;
6775
6776err_unpin:
6777 intel_unpin_fb_obj(obj);
6778err:
8c9f3aaf
JB
6779 return ret;
6780}
6781
6782static int intel_gen6_queue_flip(struct drm_device *dev,
6783 struct drm_crtc *crtc,
6784 struct drm_framebuffer *fb,
6785 struct drm_i915_gem_object *obj)
6786{
6787 struct drm_i915_private *dev_priv = dev->dev_private;
6788 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6d90c952 6789 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6790 uint32_t pf, pipesrc;
6791 int ret;
6792
6d90c952 6793 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6794 if (ret)
83d4092b 6795 goto err;
8c9f3aaf 6796
6d90c952 6797 ret = intel_ring_begin(ring, 4);
8c9f3aaf 6798 if (ret)
83d4092b 6799 goto err_unpin;
8c9f3aaf 6800
6d90c952
DV
6801 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6802 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6803 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
c2c75131 6804 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
8c9f3aaf 6805
dc257cf1
DV
6806 /* Contrary to the suggestions in the documentation,
6807 * "Enable Panel Fitter" does not seem to be required when page
6808 * flipping with a non-native mode, and worse causes a normal
6809 * modeset to fail.
6810 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6811 */
6812 pf = 0;
8c9f3aaf 6813 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952
DV
6814 intel_ring_emit(ring, pf | pipesrc);
6815 intel_ring_advance(ring);
83d4092b
CW
6816 return 0;
6817
6818err_unpin:
6819 intel_unpin_fb_obj(obj);
6820err:
8c9f3aaf
JB
6821 return ret;
6822}
6823
7c9017e5
JB
6824/*
6825 * On gen7 we currently use the blit ring because (in early silicon at least)
6826 * the render ring doesn't give us interrpts for page flip completion, which
6827 * means clients will hang after the first flip is queued. Fortunately the
6828 * blit ring generates interrupts properly, so use it instead.
6829 */
6830static int intel_gen7_queue_flip(struct drm_device *dev,
6831 struct drm_crtc *crtc,
6832 struct drm_framebuffer *fb,
6833 struct drm_i915_gem_object *obj)
6834{
6835 struct drm_i915_private *dev_priv = dev->dev_private;
6836 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6837 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
cb05d8de 6838 uint32_t plane_bit = 0;
7c9017e5
JB
6839 int ret;
6840
6841 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6842 if (ret)
83d4092b 6843 goto err;
7c9017e5 6844
cb05d8de
DV
6845 switch(intel_crtc->plane) {
6846 case PLANE_A:
6847 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
6848 break;
6849 case PLANE_B:
6850 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
6851 break;
6852 case PLANE_C:
6853 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
6854 break;
6855 default:
6856 WARN_ONCE(1, "unknown plane in flip command\n");
6857 ret = -ENODEV;
ab3951eb 6858 goto err_unpin;
cb05d8de
DV
6859 }
6860
7c9017e5
JB
6861 ret = intel_ring_begin(ring, 4);
6862 if (ret)
83d4092b 6863 goto err_unpin;
7c9017e5 6864
cb05d8de 6865 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
01f2c773 6866 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
c2c75131 6867 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7c9017e5
JB
6868 intel_ring_emit(ring, (MI_NOOP));
6869 intel_ring_advance(ring);
83d4092b
CW
6870 return 0;
6871
6872err_unpin:
6873 intel_unpin_fb_obj(obj);
6874err:
7c9017e5
JB
6875 return ret;
6876}
6877
8c9f3aaf
JB
6878static int intel_default_queue_flip(struct drm_device *dev,
6879 struct drm_crtc *crtc,
6880 struct drm_framebuffer *fb,
6881 struct drm_i915_gem_object *obj)
6882{
6883 return -ENODEV;
6884}
6885
6b95a207
KH
6886static int intel_crtc_page_flip(struct drm_crtc *crtc,
6887 struct drm_framebuffer *fb,
6888 struct drm_pending_vblank_event *event)
6889{
6890 struct drm_device *dev = crtc->dev;
6891 struct drm_i915_private *dev_priv = dev->dev_private;
6892 struct intel_framebuffer *intel_fb;
05394f39 6893 struct drm_i915_gem_object *obj;
6b95a207
KH
6894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6895 struct intel_unpin_work *work;
8c9f3aaf 6896 unsigned long flags;
52e68630 6897 int ret;
6b95a207 6898
e6a595d2
VS
6899 /* Can't change pixel format via MI display flips. */
6900 if (fb->pixel_format != crtc->fb->pixel_format)
6901 return -EINVAL;
6902
6903 /*
6904 * TILEOFF/LINOFF registers can't be changed via MI display flips.
6905 * Note that pitch changes could also affect these register.
6906 */
6907 if (INTEL_INFO(dev)->gen > 3 &&
6908 (fb->offsets[0] != crtc->fb->offsets[0] ||
6909 fb->pitches[0] != crtc->fb->pitches[0]))
6910 return -EINVAL;
6911
6b95a207
KH
6912 work = kzalloc(sizeof *work, GFP_KERNEL);
6913 if (work == NULL)
6914 return -ENOMEM;
6915
6b95a207 6916 work->event = event;
b4a98e57 6917 work->crtc = crtc;
6b95a207 6918 intel_fb = to_intel_framebuffer(crtc->fb);
b1b87f6b 6919 work->old_fb_obj = intel_fb->obj;
6b95a207
KH
6920 INIT_WORK(&work->work, intel_unpin_work_fn);
6921
7317c75e
JB
6922 ret = drm_vblank_get(dev, intel_crtc->pipe);
6923 if (ret)
6924 goto free_work;
6925
6b95a207
KH
6926 /* We borrow the event spin lock for protecting unpin_work */
6927 spin_lock_irqsave(&dev->event_lock, flags);
6928 if (intel_crtc->unpin_work) {
6929 spin_unlock_irqrestore(&dev->event_lock, flags);
6930 kfree(work);
7317c75e 6931 drm_vblank_put(dev, intel_crtc->pipe);
468f0b44
CW
6932
6933 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6b95a207
KH
6934 return -EBUSY;
6935 }
6936 intel_crtc->unpin_work = work;
6937 spin_unlock_irqrestore(&dev->event_lock, flags);
6938
6939 intel_fb = to_intel_framebuffer(fb);
6940 obj = intel_fb->obj;
6941
b4a98e57
CW
6942 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
6943 flush_workqueue(dev_priv->wq);
6944
79158103
CW
6945 ret = i915_mutex_lock_interruptible(dev);
6946 if (ret)
6947 goto cleanup;
6b95a207 6948
75dfca80 6949 /* Reference the objects for the scheduled work. */
05394f39
CW
6950 drm_gem_object_reference(&work->old_fb_obj->base);
6951 drm_gem_object_reference(&obj->base);
6b95a207
KH
6952
6953 crtc->fb = fb;
96b099fd 6954
e1f99ce6 6955 work->pending_flip_obj = obj;
e1f99ce6 6956
4e5359cd
SF
6957 work->enable_stall_check = true;
6958
b4a98e57 6959 atomic_inc(&intel_crtc->unpin_work_count);
e1f99ce6 6960
8c9f3aaf
JB
6961 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6962 if (ret)
6963 goto cleanup_pending;
6b95a207 6964
7782de3b 6965 intel_disable_fbc(dev);
f047e395 6966 intel_mark_fb_busy(obj);
6b95a207
KH
6967 mutex_unlock(&dev->struct_mutex);
6968
e5510fac
JB
6969 trace_i915_flip_request(intel_crtc->plane, obj);
6970
6b95a207 6971 return 0;
96b099fd 6972
8c9f3aaf 6973cleanup_pending:
b4a98e57 6974 atomic_dec(&intel_crtc->unpin_work_count);
05394f39
CW
6975 drm_gem_object_unreference(&work->old_fb_obj->base);
6976 drm_gem_object_unreference(&obj->base);
96b099fd
CW
6977 mutex_unlock(&dev->struct_mutex);
6978
79158103 6979cleanup:
96b099fd
CW
6980 spin_lock_irqsave(&dev->event_lock, flags);
6981 intel_crtc->unpin_work = NULL;
6982 spin_unlock_irqrestore(&dev->event_lock, flags);
6983
7317c75e
JB
6984 drm_vblank_put(dev, intel_crtc->pipe);
6985free_work:
96b099fd
CW
6986 kfree(work);
6987
6988 return ret;
6b95a207
KH
6989}
6990
f6e5b160 6991static struct drm_crtc_helper_funcs intel_helper_funcs = {
f6e5b160
CW
6992 .mode_set_base_atomic = intel_pipe_set_base_atomic,
6993 .load_lut = intel_crtc_load_lut,
976f8a20 6994 .disable = intel_crtc_noop,
f6e5b160
CW
6995};
6996
6ed0f796 6997bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
47f1c6c9 6998{
6ed0f796
DV
6999 struct intel_encoder *other_encoder;
7000 struct drm_crtc *crtc = &encoder->new_crtc->base;
47f1c6c9 7001
6ed0f796
DV
7002 if (WARN_ON(!crtc))
7003 return false;
7004
7005 list_for_each_entry(other_encoder,
7006 &crtc->dev->mode_config.encoder_list,
7007 base.head) {
7008
7009 if (&other_encoder->new_crtc->base != crtc ||
7010 encoder == other_encoder)
7011 continue;
7012 else
7013 return true;
f47166d2
CW
7014 }
7015
6ed0f796
DV
7016 return false;
7017}
47f1c6c9 7018
50f56119
DV
7019static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
7020 struct drm_crtc *crtc)
7021{
7022 struct drm_device *dev;
7023 struct drm_crtc *tmp;
7024 int crtc_mask = 1;
47f1c6c9 7025
50f56119 7026 WARN(!crtc, "checking null crtc?\n");
47f1c6c9 7027
50f56119 7028 dev = crtc->dev;
47f1c6c9 7029
50f56119
DV
7030 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
7031 if (tmp == crtc)
7032 break;
7033 crtc_mask <<= 1;
7034 }
47f1c6c9 7035
50f56119
DV
7036 if (encoder->possible_crtcs & crtc_mask)
7037 return true;
7038 return false;
47f1c6c9 7039}
79e53945 7040
9a935856
DV
7041/**
7042 * intel_modeset_update_staged_output_state
7043 *
7044 * Updates the staged output configuration state, e.g. after we've read out the
7045 * current hw state.
7046 */
7047static void intel_modeset_update_staged_output_state(struct drm_device *dev)
f6e5b160 7048{
9a935856
DV
7049 struct intel_encoder *encoder;
7050 struct intel_connector *connector;
f6e5b160 7051
9a935856
DV
7052 list_for_each_entry(connector, &dev->mode_config.connector_list,
7053 base.head) {
7054 connector->new_encoder =
7055 to_intel_encoder(connector->base.encoder);
7056 }
f6e5b160 7057
9a935856
DV
7058 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7059 base.head) {
7060 encoder->new_crtc =
7061 to_intel_crtc(encoder->base.crtc);
7062 }
f6e5b160
CW
7063}
7064
9a935856
DV
7065/**
7066 * intel_modeset_commit_output_state
7067 *
7068 * This function copies the stage display pipe configuration to the real one.
7069 */
7070static void intel_modeset_commit_output_state(struct drm_device *dev)
7071{
7072 struct intel_encoder *encoder;
7073 struct intel_connector *connector;
f6e5b160 7074
9a935856
DV
7075 list_for_each_entry(connector, &dev->mode_config.connector_list,
7076 base.head) {
7077 connector->base.encoder = &connector->new_encoder->base;
7078 }
f6e5b160 7079
9a935856
DV
7080 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7081 base.head) {
7082 encoder->base.crtc = &encoder->new_crtc->base;
7083 }
7084}
7085
7758a113
DV
7086static struct drm_display_mode *
7087intel_modeset_adjusted_mode(struct drm_crtc *crtc,
7088 struct drm_display_mode *mode)
ee7b9f93 7089{
7758a113
DV
7090 struct drm_device *dev = crtc->dev;
7091 struct drm_display_mode *adjusted_mode;
7092 struct drm_encoder_helper_funcs *encoder_funcs;
7093 struct intel_encoder *encoder;
ee7b9f93 7094
7758a113
DV
7095 adjusted_mode = drm_mode_duplicate(dev, mode);
7096 if (!adjusted_mode)
7097 return ERR_PTR(-ENOMEM);
7098
7099 /* Pass our mode to the connectors and the CRTC to give them a chance to
7100 * adjust it according to limitations or connector properties, and also
7101 * a chance to reject the mode entirely.
47f1c6c9 7102 */
7758a113
DV
7103 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7104 base.head) {
47f1c6c9 7105
7758a113
DV
7106 if (&encoder->new_crtc->base != crtc)
7107 continue;
7108 encoder_funcs = encoder->base.helper_private;
7109 if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
7110 adjusted_mode))) {
7111 DRM_DEBUG_KMS("Encoder fixup failed\n");
7112 goto fail;
7113 }
ee7b9f93 7114 }
47f1c6c9 7115
7758a113
DV
7116 if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
7117 DRM_DEBUG_KMS("CRTC fixup failed\n");
7118 goto fail;
ee7b9f93 7119 }
7758a113 7120 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
47f1c6c9 7121
7758a113
DV
7122 return adjusted_mode;
7123fail:
7124 drm_mode_destroy(dev, adjusted_mode);
7125 return ERR_PTR(-EINVAL);
ee7b9f93 7126}
47f1c6c9 7127
e2e1ed41
DV
7128/* Computes which crtcs are affected and sets the relevant bits in the mask. For
7129 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
7130static void
7131intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
7132 unsigned *prepare_pipes, unsigned *disable_pipes)
79e53945
JB
7133{
7134 struct intel_crtc *intel_crtc;
e2e1ed41
DV
7135 struct drm_device *dev = crtc->dev;
7136 struct intel_encoder *encoder;
7137 struct intel_connector *connector;
7138 struct drm_crtc *tmp_crtc;
79e53945 7139
e2e1ed41 7140 *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
79e53945 7141
e2e1ed41
DV
7142 /* Check which crtcs have changed outputs connected to them, these need
7143 * to be part of the prepare_pipes mask. We don't (yet) support global
7144 * modeset across multiple crtcs, so modeset_pipes will only have one
7145 * bit set at most. */
7146 list_for_each_entry(connector, &dev->mode_config.connector_list,
7147 base.head) {
7148 if (connector->base.encoder == &connector->new_encoder->base)
7149 continue;
79e53945 7150
e2e1ed41
DV
7151 if (connector->base.encoder) {
7152 tmp_crtc = connector->base.encoder->crtc;
7153
7154 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7155 }
7156
7157 if (connector->new_encoder)
7158 *prepare_pipes |=
7159 1 << connector->new_encoder->new_crtc->pipe;
79e53945
JB
7160 }
7161
e2e1ed41
DV
7162 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7163 base.head) {
7164 if (encoder->base.crtc == &encoder->new_crtc->base)
7165 continue;
7166
7167 if (encoder->base.crtc) {
7168 tmp_crtc = encoder->base.crtc;
7169
7170 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7171 }
7172
7173 if (encoder->new_crtc)
7174 *prepare_pipes |= 1 << encoder->new_crtc->pipe;
80824003
JB
7175 }
7176
e2e1ed41
DV
7177 /* Check for any pipes that will be fully disabled ... */
7178 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7179 base.head) {
7180 bool used = false;
22fd0fab 7181
e2e1ed41
DV
7182 /* Don't try to disable disabled crtcs. */
7183 if (!intel_crtc->base.enabled)
7184 continue;
7e7d76c3 7185
e2e1ed41
DV
7186 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7187 base.head) {
7188 if (encoder->new_crtc == intel_crtc)
7189 used = true;
7190 }
7191
7192 if (!used)
7193 *disable_pipes |= 1 << intel_crtc->pipe;
7e7d76c3
JB
7194 }
7195
e2e1ed41
DV
7196
7197 /* set_mode is also used to update properties on life display pipes. */
7198 intel_crtc = to_intel_crtc(crtc);
7199 if (crtc->enabled)
7200 *prepare_pipes |= 1 << intel_crtc->pipe;
7201
7202 /* We only support modeset on one single crtc, hence we need to do that
7203 * only for the passed in crtc iff we change anything else than just
7204 * disable crtcs.
7205 *
7206 * This is actually not true, to be fully compatible with the old crtc
7207 * helper we automatically disable _any_ output (i.e. doesn't need to be
7208 * connected to the crtc we're modesetting on) if it's disconnected.
7209 * Which is a rather nutty api (since changed the output configuration
7210 * without userspace's explicit request can lead to confusion), but
7211 * alas. Hence we currently need to modeset on all pipes we prepare. */
7212 if (*prepare_pipes)
7213 *modeset_pipes = *prepare_pipes;
7214
7215 /* ... and mask these out. */
7216 *modeset_pipes &= ~(*disable_pipes);
7217 *prepare_pipes &= ~(*disable_pipes);
47f1c6c9 7218}
79e53945 7219
ea9d758d 7220static bool intel_crtc_in_use(struct drm_crtc *crtc)
f6e5b160 7221{
ea9d758d 7222 struct drm_encoder *encoder;
f6e5b160 7223 struct drm_device *dev = crtc->dev;
f6e5b160 7224
ea9d758d
DV
7225 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
7226 if (encoder->crtc == crtc)
7227 return true;
7228
7229 return false;
7230}
7231
7232static void
7233intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
7234{
7235 struct intel_encoder *intel_encoder;
7236 struct intel_crtc *intel_crtc;
7237 struct drm_connector *connector;
7238
7239 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
7240 base.head) {
7241 if (!intel_encoder->base.crtc)
7242 continue;
7243
7244 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
7245
7246 if (prepare_pipes & (1 << intel_crtc->pipe))
7247 intel_encoder->connectors_active = false;
7248 }
7249
7250 intel_modeset_commit_output_state(dev);
7251
7252 /* Update computed state. */
7253 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7254 base.head) {
7255 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
7256 }
7257
7258 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7259 if (!connector->encoder || !connector->encoder->crtc)
7260 continue;
7261
7262 intel_crtc = to_intel_crtc(connector->encoder->crtc);
7263
7264 if (prepare_pipes & (1 << intel_crtc->pipe)) {
68d34720
DV
7265 struct drm_property *dpms_property =
7266 dev->mode_config.dpms_property;
7267
ea9d758d 7268 connector->dpms = DRM_MODE_DPMS_ON;
662595df 7269 drm_object_property_set_value(&connector->base,
68d34720
DV
7270 dpms_property,
7271 DRM_MODE_DPMS_ON);
ea9d758d
DV
7272
7273 intel_encoder = to_intel_encoder(connector->encoder);
7274 intel_encoder->connectors_active = true;
7275 }
7276 }
7277
7278}
7279
25c5b266
DV
7280#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
7281 list_for_each_entry((intel_crtc), \
7282 &(dev)->mode_config.crtc_list, \
7283 base.head) \
7284 if (mask & (1 <<(intel_crtc)->pipe)) \
7285
b980514c 7286void
8af6cf88
DV
7287intel_modeset_check_state(struct drm_device *dev)
7288{
7289 struct intel_crtc *crtc;
7290 struct intel_encoder *encoder;
7291 struct intel_connector *connector;
7292
7293 list_for_each_entry(connector, &dev->mode_config.connector_list,
7294 base.head) {
7295 /* This also checks the encoder/connector hw state with the
7296 * ->get_hw_state callbacks. */
7297 intel_connector_check_state(connector);
7298
7299 WARN(&connector->new_encoder->base != connector->base.encoder,
7300 "connector's staged encoder doesn't match current encoder\n");
7301 }
7302
7303 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7304 base.head) {
7305 bool enabled = false;
7306 bool active = false;
7307 enum pipe pipe, tracked_pipe;
7308
7309 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
7310 encoder->base.base.id,
7311 drm_get_encoder_name(&encoder->base));
7312
7313 WARN(&encoder->new_crtc->base != encoder->base.crtc,
7314 "encoder's stage crtc doesn't match current crtc\n");
7315 WARN(encoder->connectors_active && !encoder->base.crtc,
7316 "encoder's active_connectors set, but no crtc\n");
7317
7318 list_for_each_entry(connector, &dev->mode_config.connector_list,
7319 base.head) {
7320 if (connector->base.encoder != &encoder->base)
7321 continue;
7322 enabled = true;
7323 if (connector->base.dpms != DRM_MODE_DPMS_OFF)
7324 active = true;
7325 }
7326 WARN(!!encoder->base.crtc != enabled,
7327 "encoder's enabled state mismatch "
7328 "(expected %i, found %i)\n",
7329 !!encoder->base.crtc, enabled);
7330 WARN(active && !encoder->base.crtc,
7331 "active encoder with no crtc\n");
7332
7333 WARN(encoder->connectors_active != active,
7334 "encoder's computed active state doesn't match tracked active state "
7335 "(expected %i, found %i)\n", active, encoder->connectors_active);
7336
7337 active = encoder->get_hw_state(encoder, &pipe);
7338 WARN(active != encoder->connectors_active,
7339 "encoder's hw state doesn't match sw tracking "
7340 "(expected %i, found %i)\n",
7341 encoder->connectors_active, active);
7342
7343 if (!encoder->base.crtc)
7344 continue;
7345
7346 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
7347 WARN(active && pipe != tracked_pipe,
7348 "active encoder's pipe doesn't match"
7349 "(expected %i, found %i)\n",
7350 tracked_pipe, pipe);
7351
7352 }
7353
7354 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7355 base.head) {
7356 bool enabled = false;
7357 bool active = false;
7358
7359 DRM_DEBUG_KMS("[CRTC:%d]\n",
7360 crtc->base.base.id);
7361
7362 WARN(crtc->active && !crtc->base.enabled,
7363 "active crtc, but not enabled in sw tracking\n");
7364
7365 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7366 base.head) {
7367 if (encoder->base.crtc != &crtc->base)
7368 continue;
7369 enabled = true;
7370 if (encoder->connectors_active)
7371 active = true;
7372 }
7373 WARN(active != crtc->active,
7374 "crtc's computed active state doesn't match tracked active state "
7375 "(expected %i, found %i)\n", active, crtc->active);
7376 WARN(enabled != crtc->base.enabled,
7377 "crtc's computed enabled state doesn't match tracked enabled state "
7378 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
7379
7380 assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
7381 }
7382}
7383
a6778b3c
DV
7384bool intel_set_mode(struct drm_crtc *crtc,
7385 struct drm_display_mode *mode,
94352cf9 7386 int x, int y, struct drm_framebuffer *fb)
a6778b3c
DV
7387{
7388 struct drm_device *dev = crtc->dev;
dbf2b54e 7389 drm_i915_private_t *dev_priv = dev->dev_private;
3ac18232 7390 struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
25c5b266
DV
7391 struct intel_crtc *intel_crtc;
7392 unsigned disable_pipes, prepare_pipes, modeset_pipes;
a6778b3c
DV
7393 bool ret = true;
7394
3ac18232
TG
7395 saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
7396 if (!saved_mode) {
7397 DRM_ERROR("i915: Could not allocate saved display mode.\n");
7398 return false;
7399 }
7400 saved_hwmode = saved_mode + 1;
7401
e2e1ed41 7402 intel_modeset_affected_pipes(crtc, &modeset_pipes,
25c5b266
DV
7403 &prepare_pipes, &disable_pipes);
7404
7405 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7406 modeset_pipes, prepare_pipes, disable_pipes);
e2e1ed41 7407
976f8a20
DV
7408 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7409 intel_crtc_disable(&intel_crtc->base);
87f1faa6 7410
3ac18232
TG
7411 *saved_hwmode = crtc->hwmode;
7412 *saved_mode = crtc->mode;
a6778b3c 7413
25c5b266
DV
7414 /* Hack: Because we don't (yet) support global modeset on multiple
7415 * crtcs, we don't keep track of the new mode for more than one crtc.
7416 * Hence simply check whether any bit is set in modeset_pipes in all the
7417 * pieces of code that are not yet converted to deal with mutliple crtcs
7418 * changing their mode at the same time. */
7419 adjusted_mode = NULL;
7420 if (modeset_pipes) {
7421 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7422 if (IS_ERR(adjusted_mode)) {
3ac18232
TG
7423 ret = false;
7424 goto out;
25c5b266 7425 }
25c5b266 7426 }
a6778b3c 7427
ea9d758d
DV
7428 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
7429 if (intel_crtc->base.enabled)
7430 dev_priv->display.crtc_disable(&intel_crtc->base);
7431 }
a6778b3c 7432
6c4c86f5
DV
7433 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
7434 * to set it here already despite that we pass it down the callchain.
f6e5b160 7435 */
6c4c86f5 7436 if (modeset_pipes)
25c5b266 7437 crtc->mode = *mode;
7758a113 7438
ea9d758d
DV
7439 /* Only after disabling all output pipelines that will be changed can we
7440 * update the the output configuration. */
7441 intel_modeset_update_state(dev, prepare_pipes);
f6e5b160 7442
47fab737
DV
7443 if (dev_priv->display.modeset_global_resources)
7444 dev_priv->display.modeset_global_resources(dev);
7445
a6778b3c
DV
7446 /* Set up the DPLL and any encoders state that needs to adjust or depend
7447 * on the DPLL.
f6e5b160 7448 */
25c5b266
DV
7449 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7450 ret = !intel_crtc_mode_set(&intel_crtc->base,
7451 mode, adjusted_mode,
7452 x, y, fb);
7453 if (!ret)
7454 goto done;
a6778b3c
DV
7455 }
7456
7457 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
25c5b266
DV
7458 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
7459 dev_priv->display.crtc_enable(&intel_crtc->base);
a6778b3c 7460
25c5b266
DV
7461 if (modeset_pipes) {
7462 /* Store real post-adjustment hardware mode. */
7463 crtc->hwmode = *adjusted_mode;
a6778b3c 7464
25c5b266
DV
7465 /* Calculate and store various constants which
7466 * are later needed by vblank and swap-completion
7467 * timestamping. They are derived from true hwmode.
7468 */
7469 drm_calc_timestamping_constants(crtc);
7470 }
a6778b3c
DV
7471
7472 /* FIXME: add subpixel order */
7473done:
7474 drm_mode_destroy(dev, adjusted_mode);
25c5b266 7475 if (!ret && crtc->enabled) {
3ac18232
TG
7476 crtc->hwmode = *saved_hwmode;
7477 crtc->mode = *saved_mode;
8af6cf88
DV
7478 } else {
7479 intel_modeset_check_state(dev);
a6778b3c
DV
7480 }
7481
3ac18232
TG
7482out:
7483 kfree(saved_mode);
a6778b3c 7484 return ret;
f6e5b160
CW
7485}
7486
25c5b266
DV
7487#undef for_each_intel_crtc_masked
7488
d9e55608
DV
7489static void intel_set_config_free(struct intel_set_config *config)
7490{
7491 if (!config)
7492 return;
7493
1aa4b628
DV
7494 kfree(config->save_connector_encoders);
7495 kfree(config->save_encoder_crtcs);
d9e55608
DV
7496 kfree(config);
7497}
7498
85f9eb71
DV
7499static int intel_set_config_save_state(struct drm_device *dev,
7500 struct intel_set_config *config)
7501{
85f9eb71
DV
7502 struct drm_encoder *encoder;
7503 struct drm_connector *connector;
7504 int count;
7505
1aa4b628
DV
7506 config->save_encoder_crtcs =
7507 kcalloc(dev->mode_config.num_encoder,
7508 sizeof(struct drm_crtc *), GFP_KERNEL);
7509 if (!config->save_encoder_crtcs)
85f9eb71
DV
7510 return -ENOMEM;
7511
1aa4b628
DV
7512 config->save_connector_encoders =
7513 kcalloc(dev->mode_config.num_connector,
7514 sizeof(struct drm_encoder *), GFP_KERNEL);
7515 if (!config->save_connector_encoders)
85f9eb71
DV
7516 return -ENOMEM;
7517
7518 /* Copy data. Note that driver private data is not affected.
7519 * Should anything bad happen only the expected state is
7520 * restored, not the drivers personal bookkeeping.
7521 */
85f9eb71
DV
7522 count = 0;
7523 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1aa4b628 7524 config->save_encoder_crtcs[count++] = encoder->crtc;
85f9eb71
DV
7525 }
7526
7527 count = 0;
7528 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1aa4b628 7529 config->save_connector_encoders[count++] = connector->encoder;
85f9eb71
DV
7530 }
7531
7532 return 0;
7533}
7534
7535static void intel_set_config_restore_state(struct drm_device *dev,
7536 struct intel_set_config *config)
7537{
9a935856
DV
7538 struct intel_encoder *encoder;
7539 struct intel_connector *connector;
85f9eb71
DV
7540 int count;
7541
85f9eb71 7542 count = 0;
9a935856
DV
7543 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7544 encoder->new_crtc =
7545 to_intel_crtc(config->save_encoder_crtcs[count++]);
85f9eb71
DV
7546 }
7547
7548 count = 0;
9a935856
DV
7549 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
7550 connector->new_encoder =
7551 to_intel_encoder(config->save_connector_encoders[count++]);
85f9eb71
DV
7552 }
7553}
7554
5e2b584e
DV
7555static void
7556intel_set_config_compute_mode_changes(struct drm_mode_set *set,
7557 struct intel_set_config *config)
7558{
7559
7560 /* We should be able to check here if the fb has the same properties
7561 * and then just flip_or_move it */
7562 if (set->crtc->fb != set->fb) {
7563 /* If we have no fb then treat it as a full mode set */
7564 if (set->crtc->fb == NULL) {
7565 DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
7566 config->mode_changed = true;
7567 } else if (set->fb == NULL) {
7568 config->mode_changed = true;
7569 } else if (set->fb->depth != set->crtc->fb->depth) {
7570 config->mode_changed = true;
7571 } else if (set->fb->bits_per_pixel !=
7572 set->crtc->fb->bits_per_pixel) {
7573 config->mode_changed = true;
7574 } else
7575 config->fb_changed = true;
7576 }
7577
835c5873 7578 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
5e2b584e
DV
7579 config->fb_changed = true;
7580
7581 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
7582 DRM_DEBUG_KMS("modes are different, full mode set\n");
7583 drm_mode_debug_printmodeline(&set->crtc->mode);
7584 drm_mode_debug_printmodeline(set->mode);
7585 config->mode_changed = true;
7586 }
7587}
7588
2e431051 7589static int
9a935856
DV
7590intel_modeset_stage_output_state(struct drm_device *dev,
7591 struct drm_mode_set *set,
7592 struct intel_set_config *config)
50f56119 7593{
85f9eb71 7594 struct drm_crtc *new_crtc;
9a935856
DV
7595 struct intel_connector *connector;
7596 struct intel_encoder *encoder;
2e431051 7597 int count, ro;
50f56119 7598
9a935856
DV
7599 /* The upper layers ensure that we either disabl a crtc or have a list
7600 * of connectors. For paranoia, double-check this. */
7601 WARN_ON(!set->fb && (set->num_connectors != 0));
7602 WARN_ON(set->fb && (set->num_connectors == 0));
7603
50f56119 7604 count = 0;
9a935856
DV
7605 list_for_each_entry(connector, &dev->mode_config.connector_list,
7606 base.head) {
7607 /* Otherwise traverse passed in connector list and get encoders
7608 * for them. */
50f56119 7609 for (ro = 0; ro < set->num_connectors; ro++) {
9a935856
DV
7610 if (set->connectors[ro] == &connector->base) {
7611 connector->new_encoder = connector->encoder;
50f56119
DV
7612 break;
7613 }
7614 }
7615
9a935856
DV
7616 /* If we disable the crtc, disable all its connectors. Also, if
7617 * the connector is on the changing crtc but not on the new
7618 * connector list, disable it. */
7619 if ((!set->fb || ro == set->num_connectors) &&
7620 connector->base.encoder &&
7621 connector->base.encoder->crtc == set->crtc) {
7622 connector->new_encoder = NULL;
7623
7624 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
7625 connector->base.base.id,
7626 drm_get_connector_name(&connector->base));
7627 }
7628
7629
7630 if (&connector->new_encoder->base != connector->base.encoder) {
50f56119 7631 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
5e2b584e 7632 config->mode_changed = true;
50f56119 7633 }
9a935856
DV
7634
7635 /* Disable all disconnected encoders. */
7636 if (connector->base.status == connector_status_disconnected)
7637 connector->new_encoder = NULL;
50f56119 7638 }
9a935856 7639 /* connector->new_encoder is now updated for all connectors. */
50f56119 7640
9a935856 7641 /* Update crtc of enabled connectors. */
50f56119 7642 count = 0;
9a935856
DV
7643 list_for_each_entry(connector, &dev->mode_config.connector_list,
7644 base.head) {
7645 if (!connector->new_encoder)
50f56119
DV
7646 continue;
7647
9a935856 7648 new_crtc = connector->new_encoder->base.crtc;
50f56119
DV
7649
7650 for (ro = 0; ro < set->num_connectors; ro++) {
9a935856 7651 if (set->connectors[ro] == &connector->base)
50f56119
DV
7652 new_crtc = set->crtc;
7653 }
7654
7655 /* Make sure the new CRTC will work with the encoder */
9a935856
DV
7656 if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
7657 new_crtc)) {
5e2b584e 7658 return -EINVAL;
50f56119 7659 }
9a935856
DV
7660 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
7661
7662 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
7663 connector->base.base.id,
7664 drm_get_connector_name(&connector->base),
7665 new_crtc->base.id);
7666 }
7667
7668 /* Check for any encoders that needs to be disabled. */
7669 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7670 base.head) {
7671 list_for_each_entry(connector,
7672 &dev->mode_config.connector_list,
7673 base.head) {
7674 if (connector->new_encoder == encoder) {
7675 WARN_ON(!connector->new_encoder->new_crtc);
7676
7677 goto next_encoder;
7678 }
7679 }
7680 encoder->new_crtc = NULL;
7681next_encoder:
7682 /* Only now check for crtc changes so we don't miss encoders
7683 * that will be disabled. */
7684 if (&encoder->new_crtc->base != encoder->base.crtc) {
50f56119 7685 DRM_DEBUG_KMS("crtc changed, full mode switch\n");
5e2b584e 7686 config->mode_changed = true;
50f56119
DV
7687 }
7688 }
9a935856 7689 /* Now we've also updated encoder->new_crtc for all encoders. */
50f56119 7690
2e431051
DV
7691 return 0;
7692}
7693
7694static int intel_crtc_set_config(struct drm_mode_set *set)
7695{
7696 struct drm_device *dev;
2e431051
DV
7697 struct drm_mode_set save_set;
7698 struct intel_set_config *config;
7699 int ret;
2e431051 7700
8d3e375e
DV
7701 BUG_ON(!set);
7702 BUG_ON(!set->crtc);
7703 BUG_ON(!set->crtc->helper_private);
2e431051
DV
7704
7705 if (!set->mode)
7706 set->fb = NULL;
7707
431e50f7
DV
7708 /* The fb helper likes to play gross jokes with ->mode_set_config.
7709 * Unfortunately the crtc helper doesn't do much at all for this case,
7710 * so we have to cope with this madness until the fb helper is fixed up. */
7711 if (set->fb && set->num_connectors == 0)
7712 return 0;
7713
2e431051
DV
7714 if (set->fb) {
7715 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
7716 set->crtc->base.id, set->fb->base.id,
7717 (int)set->num_connectors, set->x, set->y);
7718 } else {
7719 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
2e431051
DV
7720 }
7721
7722 dev = set->crtc->dev;
7723
7724 ret = -ENOMEM;
7725 config = kzalloc(sizeof(*config), GFP_KERNEL);
7726 if (!config)
7727 goto out_config;
7728
7729 ret = intel_set_config_save_state(dev, config);
7730 if (ret)
7731 goto out_config;
7732
7733 save_set.crtc = set->crtc;
7734 save_set.mode = &set->crtc->mode;
7735 save_set.x = set->crtc->x;
7736 save_set.y = set->crtc->y;
7737 save_set.fb = set->crtc->fb;
7738
7739 /* Compute whether we need a full modeset, only an fb base update or no
7740 * change at all. In the future we might also check whether only the
7741 * mode changed, e.g. for LVDS where we only change the panel fitter in
7742 * such cases. */
7743 intel_set_config_compute_mode_changes(set, config);
7744
9a935856 7745 ret = intel_modeset_stage_output_state(dev, set, config);
2e431051
DV
7746 if (ret)
7747 goto fail;
7748
5e2b584e 7749 if (config->mode_changed) {
87f1faa6 7750 if (set->mode) {
50f56119
DV
7751 DRM_DEBUG_KMS("attempting to set mode from"
7752 " userspace\n");
7753 drm_mode_debug_printmodeline(set->mode);
87f1faa6
DV
7754 }
7755
7756 if (!intel_set_mode(set->crtc, set->mode,
7757 set->x, set->y, set->fb)) {
7758 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
7759 set->crtc->base.id);
7760 ret = -EINVAL;
7761 goto fail;
7762 }
5e2b584e 7763 } else if (config->fb_changed) {
4f660f49 7764 ret = intel_pipe_set_base(set->crtc,
94352cf9 7765 set->x, set->y, set->fb);
50f56119
DV
7766 }
7767
d9e55608
DV
7768 intel_set_config_free(config);
7769
50f56119
DV
7770 return 0;
7771
7772fail:
85f9eb71 7773 intel_set_config_restore_state(dev, config);
50f56119
DV
7774
7775 /* Try to restore the config */
5e2b584e 7776 if (config->mode_changed &&
a6778b3c
DV
7777 !intel_set_mode(save_set.crtc, save_set.mode,
7778 save_set.x, save_set.y, save_set.fb))
50f56119
DV
7779 DRM_ERROR("failed to restore config after modeset failure\n");
7780
d9e55608
DV
7781out_config:
7782 intel_set_config_free(config);
50f56119
DV
7783 return ret;
7784}
f6e5b160
CW
7785
7786static const struct drm_crtc_funcs intel_crtc_funcs = {
f6e5b160
CW
7787 .cursor_set = intel_crtc_cursor_set,
7788 .cursor_move = intel_crtc_cursor_move,
7789 .gamma_set = intel_crtc_gamma_set,
50f56119 7790 .set_config = intel_crtc_set_config,
f6e5b160
CW
7791 .destroy = intel_crtc_destroy,
7792 .page_flip = intel_crtc_page_flip,
7793};
7794
79f689aa
PZ
7795static void intel_cpu_pll_init(struct drm_device *dev)
7796{
affa9354 7797 if (HAS_DDI(dev))
79f689aa
PZ
7798 intel_ddi_pll_init(dev);
7799}
7800
ee7b9f93
JB
7801static void intel_pch_pll_init(struct drm_device *dev)
7802{
7803 drm_i915_private_t *dev_priv = dev->dev_private;
7804 int i;
7805
7806 if (dev_priv->num_pch_pll == 0) {
7807 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
7808 return;
7809 }
7810
7811 for (i = 0; i < dev_priv->num_pch_pll; i++) {
7812 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
7813 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
7814 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
7815 }
7816}
7817
b358d0a6 7818static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 7819{
22fd0fab 7820 drm_i915_private_t *dev_priv = dev->dev_private;
79e53945
JB
7821 struct intel_crtc *intel_crtc;
7822 int i;
7823
7824 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
7825 if (intel_crtc == NULL)
7826 return;
7827
7828 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7829
7830 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
79e53945
JB
7831 for (i = 0; i < 256; i++) {
7832 intel_crtc->lut_r[i] = i;
7833 intel_crtc->lut_g[i] = i;
7834 intel_crtc->lut_b[i] = i;
7835 }
7836
80824003
JB
7837 /* Swap pipes & planes for FBC on pre-965 */
7838 intel_crtc->pipe = pipe;
7839 intel_crtc->plane = pipe;
a5c961d1 7840 intel_crtc->cpu_transcoder = pipe;
e2e767ab 7841 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
28c97730 7842 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 7843 intel_crtc->plane = !pipe;
80824003
JB
7844 }
7845
22fd0fab
JB
7846 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
7847 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
7848 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7849 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7850
5a354204 7851 intel_crtc->bpp = 24; /* default for pre-Ironlake */
7e7d76c3 7852
79e53945 7853 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
79e53945
JB
7854}
7855
08d7b3d1 7856int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 7857 struct drm_file *file)
08d7b3d1 7858{
08d7b3d1 7859 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
c05422d5
DV
7860 struct drm_mode_object *drmmode_obj;
7861 struct intel_crtc *crtc;
08d7b3d1 7862
1cff8f6b
DV
7863 if (!drm_core_check_feature(dev, DRIVER_MODESET))
7864 return -ENODEV;
08d7b3d1 7865
c05422d5
DV
7866 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7867 DRM_MODE_OBJECT_CRTC);
08d7b3d1 7868
c05422d5 7869 if (!drmmode_obj) {
08d7b3d1
CW
7870 DRM_ERROR("no such CRTC id\n");
7871 return -EINVAL;
7872 }
7873
c05422d5
DV
7874 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7875 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 7876
c05422d5 7877 return 0;
08d7b3d1
CW
7878}
7879
66a9278e 7880static int intel_encoder_clones(struct intel_encoder *encoder)
79e53945 7881{
66a9278e
DV
7882 struct drm_device *dev = encoder->base.dev;
7883 struct intel_encoder *source_encoder;
79e53945 7884 int index_mask = 0;
79e53945
JB
7885 int entry = 0;
7886
66a9278e
DV
7887 list_for_each_entry(source_encoder,
7888 &dev->mode_config.encoder_list, base.head) {
7889
7890 if (encoder == source_encoder)
79e53945 7891 index_mask |= (1 << entry);
66a9278e
DV
7892
7893 /* Intel hw has only one MUX where enocoders could be cloned. */
7894 if (encoder->cloneable && source_encoder->cloneable)
7895 index_mask |= (1 << entry);
7896
79e53945
JB
7897 entry++;
7898 }
4ef69c7a 7899
79e53945
JB
7900 return index_mask;
7901}
7902
4d302442
CW
7903static bool has_edp_a(struct drm_device *dev)
7904{
7905 struct drm_i915_private *dev_priv = dev->dev_private;
7906
7907 if (!IS_MOBILE(dev))
7908 return false;
7909
7910 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7911 return false;
7912
7913 if (IS_GEN5(dev) &&
7914 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7915 return false;
7916
7917 return true;
7918}
7919
79e53945
JB
7920static void intel_setup_outputs(struct drm_device *dev)
7921{
725e30ad 7922 struct drm_i915_private *dev_priv = dev->dev_private;
4ef69c7a 7923 struct intel_encoder *encoder;
cb0953d7 7924 bool dpd_is_edp = false;
f3cfcba6 7925 bool has_lvds;
79e53945 7926
f3cfcba6 7927 has_lvds = intel_lvds_init(dev);
c5d1b51d
CW
7928 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7929 /* disable the panel fitter on everything but LVDS */
7930 I915_WRITE(PFIT_CONTROL, 0);
7931 }
79e53945 7932
affa9354 7933 if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
79935fca 7934 intel_crt_init(dev);
cb0953d7 7935
affa9354 7936 if (HAS_DDI(dev)) {
0e72a5b5
ED
7937 int found;
7938
7939 /* Haswell uses DDI functions to detect digital outputs */
7940 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
7941 /* DDI A only supports eDP */
7942 if (found)
7943 intel_ddi_init(dev, PORT_A);
7944
7945 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
7946 * register */
7947 found = I915_READ(SFUSE_STRAP);
7948
7949 if (found & SFUSE_STRAP_DDIB_DETECTED)
7950 intel_ddi_init(dev, PORT_B);
7951 if (found & SFUSE_STRAP_DDIC_DETECTED)
7952 intel_ddi_init(dev, PORT_C);
7953 if (found & SFUSE_STRAP_DDID_DETECTED)
7954 intel_ddi_init(dev, PORT_D);
7955 } else if (HAS_PCH_SPLIT(dev)) {
cb0953d7 7956 int found;
270b3042
DV
7957 dpd_is_edp = intel_dpd_is_edp(dev);
7958
7959 if (has_edp_a(dev))
7960 intel_dp_init(dev, DP_A, PORT_A);
cb0953d7 7961
30ad48b7 7962 if (I915_READ(HDMIB) & PORT_DETECTED) {
461ed3ca 7963 /* PCH SDVOB multiplex with HDMIB */
eef4eacb 7964 found = intel_sdvo_init(dev, PCH_SDVOB, true);
30ad48b7 7965 if (!found)
08d644ad 7966 intel_hdmi_init(dev, HDMIB, PORT_B);
5eb08b69 7967 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
ab9d7c30 7968 intel_dp_init(dev, PCH_DP_B, PORT_B);
30ad48b7
ZW
7969 }
7970
7971 if (I915_READ(HDMIC) & PORT_DETECTED)
08d644ad 7972 intel_hdmi_init(dev, HDMIC, PORT_C);
30ad48b7 7973
b708a1d5 7974 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
08d644ad 7975 intel_hdmi_init(dev, HDMID, PORT_D);
30ad48b7 7976
5eb08b69 7977 if (I915_READ(PCH_DP_C) & DP_DETECTED)
ab9d7c30 7978 intel_dp_init(dev, PCH_DP_C, PORT_C);
5eb08b69 7979
270b3042 7980 if (I915_READ(PCH_DP_D) & DP_DETECTED)
ab9d7c30 7981 intel_dp_init(dev, PCH_DP_D, PORT_D);
4a87d65d
JB
7982 } else if (IS_VALLEYVIEW(dev)) {
7983 int found;
7984
19c03924
GB
7985 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
7986 if (I915_READ(DP_C) & DP_DETECTED)
7987 intel_dp_init(dev, DP_C, PORT_C);
7988
4a87d65d
JB
7989 if (I915_READ(SDVOB) & PORT_DETECTED) {
7990 /* SDVOB multiplex with HDMIB */
7991 found = intel_sdvo_init(dev, SDVOB, true);
7992 if (!found)
08d644ad 7993 intel_hdmi_init(dev, SDVOB, PORT_B);
4a87d65d 7994 if (!found && (I915_READ(DP_B) & DP_DETECTED))
ab9d7c30 7995 intel_dp_init(dev, DP_B, PORT_B);
4a87d65d
JB
7996 }
7997
7998 if (I915_READ(SDVOC) & PORT_DETECTED)
08d644ad 7999 intel_hdmi_init(dev, SDVOC, PORT_C);
5eb08b69 8000
103a196f 8001 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
27185ae1 8002 bool found = false;
7d57382e 8003
725e30ad 8004 if (I915_READ(SDVOB) & SDVO_DETECTED) {
b01f2c3a 8005 DRM_DEBUG_KMS("probing SDVOB\n");
eef4eacb 8006 found = intel_sdvo_init(dev, SDVOB, true);
b01f2c3a
JB
8007 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
8008 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
08d644ad 8009 intel_hdmi_init(dev, SDVOB, PORT_B);
b01f2c3a 8010 }
27185ae1 8011
b01f2c3a
JB
8012 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
8013 DRM_DEBUG_KMS("probing DP_B\n");
ab9d7c30 8014 intel_dp_init(dev, DP_B, PORT_B);
b01f2c3a 8015 }
725e30ad 8016 }
13520b05
KH
8017
8018 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 8019
b01f2c3a
JB
8020 if (I915_READ(SDVOB) & SDVO_DETECTED) {
8021 DRM_DEBUG_KMS("probing SDVOC\n");
eef4eacb 8022 found = intel_sdvo_init(dev, SDVOC, false);
b01f2c3a 8023 }
27185ae1
ML
8024
8025 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
8026
b01f2c3a
JB
8027 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
8028 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
08d644ad 8029 intel_hdmi_init(dev, SDVOC, PORT_C);
b01f2c3a
JB
8030 }
8031 if (SUPPORTS_INTEGRATED_DP(dev)) {
8032 DRM_DEBUG_KMS("probing DP_C\n");
ab9d7c30 8033 intel_dp_init(dev, DP_C, PORT_C);
b01f2c3a 8034 }
725e30ad 8035 }
27185ae1 8036
b01f2c3a
JB
8037 if (SUPPORTS_INTEGRATED_DP(dev) &&
8038 (I915_READ(DP_D) & DP_DETECTED)) {
8039 DRM_DEBUG_KMS("probing DP_D\n");
ab9d7c30 8040 intel_dp_init(dev, DP_D, PORT_D);
b01f2c3a 8041 }
bad720ff 8042 } else if (IS_GEN2(dev))
79e53945
JB
8043 intel_dvo_init(dev);
8044
103a196f 8045 if (SUPPORTS_TV(dev))
79e53945
JB
8046 intel_tv_init(dev);
8047
4ef69c7a
CW
8048 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8049 encoder->base.possible_crtcs = encoder->crtc_mask;
8050 encoder->base.possible_clones =
66a9278e 8051 intel_encoder_clones(encoder);
79e53945 8052 }
47356eb6 8053
40579abe 8054 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
9fb526db 8055 ironlake_init_pch_refclk(dev);
270b3042
DV
8056
8057 drm_helper_move_panel_connectors_to_head(dev);
79e53945
JB
8058}
8059
8060static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
8061{
8062 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945
JB
8063
8064 drm_framebuffer_cleanup(fb);
05394f39 8065 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
79e53945
JB
8066
8067 kfree(intel_fb);
8068}
8069
8070static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 8071 struct drm_file *file,
79e53945
JB
8072 unsigned int *handle)
8073{
8074 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 8075 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 8076
05394f39 8077 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
8078}
8079
8080static const struct drm_framebuffer_funcs intel_fb_funcs = {
8081 .destroy = intel_user_framebuffer_destroy,
8082 .create_handle = intel_user_framebuffer_create_handle,
8083};
8084
38651674
DA
8085int intel_framebuffer_init(struct drm_device *dev,
8086 struct intel_framebuffer *intel_fb,
308e5bcb 8087 struct drm_mode_fb_cmd2 *mode_cmd,
05394f39 8088 struct drm_i915_gem_object *obj)
79e53945 8089{
79e53945
JB
8090 int ret;
8091
05394f39 8092 if (obj->tiling_mode == I915_TILING_Y)
57cd6508
CW
8093 return -EINVAL;
8094
308e5bcb 8095 if (mode_cmd->pitches[0] & 63)
57cd6508
CW
8096 return -EINVAL;
8097
5d7bd705
VS
8098 /* FIXME <= Gen4 stride limits are bit unclear */
8099 if (mode_cmd->pitches[0] > 32768)
8100 return -EINVAL;
8101
8102 if (obj->tiling_mode != I915_TILING_NONE &&
8103 mode_cmd->pitches[0] != obj->stride)
8104 return -EINVAL;
8105
57779d06 8106 /* Reject formats not supported by any plane early. */
308e5bcb 8107 switch (mode_cmd->pixel_format) {
57779d06 8108 case DRM_FORMAT_C8:
04b3924d
VS
8109 case DRM_FORMAT_RGB565:
8110 case DRM_FORMAT_XRGB8888:
8111 case DRM_FORMAT_ARGB8888:
57779d06
VS
8112 break;
8113 case DRM_FORMAT_XRGB1555:
8114 case DRM_FORMAT_ARGB1555:
8115 if (INTEL_INFO(dev)->gen > 3)
8116 return -EINVAL;
8117 break;
8118 case DRM_FORMAT_XBGR8888:
8119 case DRM_FORMAT_ABGR8888:
04b3924d
VS
8120 case DRM_FORMAT_XRGB2101010:
8121 case DRM_FORMAT_ARGB2101010:
57779d06
VS
8122 case DRM_FORMAT_XBGR2101010:
8123 case DRM_FORMAT_ABGR2101010:
8124 if (INTEL_INFO(dev)->gen < 4)
8125 return -EINVAL;
b5626747 8126 break;
04b3924d
VS
8127 case DRM_FORMAT_YUYV:
8128 case DRM_FORMAT_UYVY:
8129 case DRM_FORMAT_YVYU:
8130 case DRM_FORMAT_VYUY:
57779d06
VS
8131 if (INTEL_INFO(dev)->gen < 6)
8132 return -EINVAL;
57cd6508
CW
8133 break;
8134 default:
57779d06 8135 DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
57cd6508
CW
8136 return -EINVAL;
8137 }
8138
90f9a336
VS
8139 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
8140 if (mode_cmd->offsets[0] != 0)
8141 return -EINVAL;
8142
79e53945
JB
8143 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8144 if (ret) {
8145 DRM_ERROR("framebuffer init failed %d\n", ret);
8146 return ret;
8147 }
8148
8149 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
79e53945 8150 intel_fb->obj = obj;
79e53945
JB
8151 return 0;
8152}
8153
79e53945
JB
8154static struct drm_framebuffer *
8155intel_user_framebuffer_create(struct drm_device *dev,
8156 struct drm_file *filp,
308e5bcb 8157 struct drm_mode_fb_cmd2 *mode_cmd)
79e53945 8158{
05394f39 8159 struct drm_i915_gem_object *obj;
79e53945 8160
308e5bcb
JB
8161 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8162 mode_cmd->handles[0]));
c8725226 8163 if (&obj->base == NULL)
cce13ff7 8164 return ERR_PTR(-ENOENT);
79e53945 8165
d2dff872 8166 return intel_framebuffer_create(dev, mode_cmd, obj);
79e53945
JB
8167}
8168
79e53945 8169static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 8170 .fb_create = intel_user_framebuffer_create,
eb1f8e4f 8171 .output_poll_changed = intel_fb_output_poll_changed,
79e53945
JB
8172};
8173
e70236a8
JB
8174/* Set up chip specific display functions */
8175static void intel_init_display(struct drm_device *dev)
8176{
8177 struct drm_i915_private *dev_priv = dev->dev_private;
8178
8179 /* We always want a DPMS function */
affa9354 8180 if (HAS_DDI(dev)) {
09b4ddf9 8181 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
4f771f10
PZ
8182 dev_priv->display.crtc_enable = haswell_crtc_enable;
8183 dev_priv->display.crtc_disable = haswell_crtc_disable;
6441ab5f 8184 dev_priv->display.off = haswell_crtc_off;
09b4ddf9
PZ
8185 dev_priv->display.update_plane = ironlake_update_plane;
8186 } else if (HAS_PCH_SPLIT(dev)) {
f564048e 8187 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
76e5a89c
DV
8188 dev_priv->display.crtc_enable = ironlake_crtc_enable;
8189 dev_priv->display.crtc_disable = ironlake_crtc_disable;
ee7b9f93 8190 dev_priv->display.off = ironlake_crtc_off;
17638cd6 8191 dev_priv->display.update_plane = ironlake_update_plane;
f564048e 8192 } else {
f564048e 8193 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
76e5a89c
DV
8194 dev_priv->display.crtc_enable = i9xx_crtc_enable;
8195 dev_priv->display.crtc_disable = i9xx_crtc_disable;
ee7b9f93 8196 dev_priv->display.off = i9xx_crtc_off;
17638cd6 8197 dev_priv->display.update_plane = i9xx_update_plane;
f564048e 8198 }
e70236a8 8199
e70236a8 8200 /* Returns the core display clock speed */
25eb05fc
JB
8201 if (IS_VALLEYVIEW(dev))
8202 dev_priv->display.get_display_clock_speed =
8203 valleyview_get_display_clock_speed;
8204 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
e70236a8
JB
8205 dev_priv->display.get_display_clock_speed =
8206 i945_get_display_clock_speed;
8207 else if (IS_I915G(dev))
8208 dev_priv->display.get_display_clock_speed =
8209 i915_get_display_clock_speed;
f2b115e6 8210 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
e70236a8
JB
8211 dev_priv->display.get_display_clock_speed =
8212 i9xx_misc_get_display_clock_speed;
8213 else if (IS_I915GM(dev))
8214 dev_priv->display.get_display_clock_speed =
8215 i915gm_get_display_clock_speed;
8216 else if (IS_I865G(dev))
8217 dev_priv->display.get_display_clock_speed =
8218 i865_get_display_clock_speed;
f0f8a9ce 8219 else if (IS_I85X(dev))
e70236a8
JB
8220 dev_priv->display.get_display_clock_speed =
8221 i855_get_display_clock_speed;
8222 else /* 852, 830 */
8223 dev_priv->display.get_display_clock_speed =
8224 i830_get_display_clock_speed;
8225
7f8a8569 8226 if (HAS_PCH_SPLIT(dev)) {
f00a3ddf 8227 if (IS_GEN5(dev)) {
674cf967 8228 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
e0dac65e 8229 dev_priv->display.write_eld = ironlake_write_eld;
1398261a 8230 } else if (IS_GEN6(dev)) {
674cf967 8231 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
e0dac65e 8232 dev_priv->display.write_eld = ironlake_write_eld;
357555c0
JB
8233 } else if (IS_IVYBRIDGE(dev)) {
8234 /* FIXME: detect B0+ stepping and use auto training */
8235 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
e0dac65e 8236 dev_priv->display.write_eld = ironlake_write_eld;
01a415fd
DV
8237 dev_priv->display.modeset_global_resources =
8238 ivb_modeset_global_resources;
c82e4d26
ED
8239 } else if (IS_HASWELL(dev)) {
8240 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
83358c85 8241 dev_priv->display.write_eld = haswell_write_eld;
7f8a8569
ZW
8242 } else
8243 dev_priv->display.update_wm = NULL;
6067aaea 8244 } else if (IS_G4X(dev)) {
e0dac65e 8245 dev_priv->display.write_eld = g4x_write_eld;
e70236a8 8246 }
8c9f3aaf
JB
8247
8248 /* Default just returns -ENODEV to indicate unsupported */
8249 dev_priv->display.queue_flip = intel_default_queue_flip;
8250
8251 switch (INTEL_INFO(dev)->gen) {
8252 case 2:
8253 dev_priv->display.queue_flip = intel_gen2_queue_flip;
8254 break;
8255
8256 case 3:
8257 dev_priv->display.queue_flip = intel_gen3_queue_flip;
8258 break;
8259
8260 case 4:
8261 case 5:
8262 dev_priv->display.queue_flip = intel_gen4_queue_flip;
8263 break;
8264
8265 case 6:
8266 dev_priv->display.queue_flip = intel_gen6_queue_flip;
8267 break;
7c9017e5
JB
8268 case 7:
8269 dev_priv->display.queue_flip = intel_gen7_queue_flip;
8270 break;
8c9f3aaf 8271 }
e70236a8
JB
8272}
8273
b690e96c
JB
8274/*
8275 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8276 * resume, or other times. This quirk makes sure that's the case for
8277 * affected systems.
8278 */
0206e353 8279static void quirk_pipea_force(struct drm_device *dev)
b690e96c
JB
8280{
8281 struct drm_i915_private *dev_priv = dev->dev_private;
8282
8283 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 8284 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
8285}
8286
435793df
KP
8287/*
8288 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
8289 */
8290static void quirk_ssc_force_disable(struct drm_device *dev)
8291{
8292 struct drm_i915_private *dev_priv = dev->dev_private;
8293 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 8294 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
8295}
8296
4dca20ef 8297/*
5a15ab5b
CE
8298 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
8299 * brightness value
4dca20ef
CE
8300 */
8301static void quirk_invert_brightness(struct drm_device *dev)
8302{
8303 struct drm_i915_private *dev_priv = dev->dev_private;
8304 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 8305 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
8306}
8307
b690e96c
JB
8308struct intel_quirk {
8309 int device;
8310 int subsystem_vendor;
8311 int subsystem_device;
8312 void (*hook)(struct drm_device *dev);
8313};
8314
5f85f176
EE
8315/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
8316struct intel_dmi_quirk {
8317 void (*hook)(struct drm_device *dev);
8318 const struct dmi_system_id (*dmi_id_list)[];
8319};
8320
8321static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
8322{
8323 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
8324 return 1;
8325}
8326
8327static const struct intel_dmi_quirk intel_dmi_quirks[] = {
8328 {
8329 .dmi_id_list = &(const struct dmi_system_id[]) {
8330 {
8331 .callback = intel_dmi_reverse_brightness,
8332 .ident = "NCR Corporation",
8333 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
8334 DMI_MATCH(DMI_PRODUCT_NAME, ""),
8335 },
8336 },
8337 { } /* terminating entry */
8338 },
8339 .hook = quirk_invert_brightness,
8340 },
8341};
8342
c43b5634 8343static struct intel_quirk intel_quirks[] = {
b690e96c 8344 /* HP Mini needs pipe A force quirk (LP: #322104) */
0206e353 8345 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
b690e96c 8346
b690e96c
JB
8347 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8348 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
8349
b690e96c
JB
8350 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8351 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
8352
ccd0d36e 8353 /* 830/845 need to leave pipe A & dpll A up */
b690e96c 8354 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
dcdaed6e 8355 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
435793df
KP
8356
8357 /* Lenovo U160 cannot use SSC on LVDS */
8358 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
8359
8360 /* Sony Vaio Y cannot use SSC on LVDS */
8361 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b
CE
8362
8363 /* Acer Aspire 5734Z must invert backlight brightness */
8364 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
b690e96c
JB
8365};
8366
8367static void intel_init_quirks(struct drm_device *dev)
8368{
8369 struct pci_dev *d = dev->pdev;
8370 int i;
8371
8372 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
8373 struct intel_quirk *q = &intel_quirks[i];
8374
8375 if (d->device == q->device &&
8376 (d->subsystem_vendor == q->subsystem_vendor ||
8377 q->subsystem_vendor == PCI_ANY_ID) &&
8378 (d->subsystem_device == q->subsystem_device ||
8379 q->subsystem_device == PCI_ANY_ID))
8380 q->hook(dev);
8381 }
5f85f176
EE
8382 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
8383 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
8384 intel_dmi_quirks[i].hook(dev);
8385 }
b690e96c
JB
8386}
8387
9cce37f4
JB
8388/* Disable the VGA plane that we never use */
8389static void i915_disable_vga(struct drm_device *dev)
8390{
8391 struct drm_i915_private *dev_priv = dev->dev_private;
8392 u8 sr1;
8393 u32 vga_reg;
8394
8395 if (HAS_PCH_SPLIT(dev))
8396 vga_reg = CPU_VGACNTRL;
8397 else
8398 vga_reg = VGACNTRL;
8399
8400 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 8401 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
8402 sr1 = inb(VGA_SR_DATA);
8403 outb(sr1 | 1<<5, VGA_SR_DATA);
8404 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
8405 udelay(300);
8406
8407 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
8408 POSTING_READ(vga_reg);
8409}
8410
f817586c
DV
8411void intel_modeset_init_hw(struct drm_device *dev)
8412{
0232e927
ED
8413 /* We attempt to init the necessary power wells early in the initialization
8414 * time, so the subsystems that expect power to be enabled can work.
8415 */
8416 intel_init_power_wells(dev);
8417
a8f78b58
ED
8418 intel_prepare_ddi(dev);
8419
f817586c
DV
8420 intel_init_clock_gating(dev);
8421
79f5b2c7 8422 mutex_lock(&dev->struct_mutex);
8090c6b9 8423 intel_enable_gt_powersave(dev);
79f5b2c7 8424 mutex_unlock(&dev->struct_mutex);
f817586c
DV
8425}
8426
79e53945
JB
8427void intel_modeset_init(struct drm_device *dev)
8428{
652c393a 8429 struct drm_i915_private *dev_priv = dev->dev_private;
b840d907 8430 int i, ret;
79e53945
JB
8431
8432 drm_mode_config_init(dev);
8433
8434 dev->mode_config.min_width = 0;
8435 dev->mode_config.min_height = 0;
8436
019d96cb
DA
8437 dev->mode_config.preferred_depth = 24;
8438 dev->mode_config.prefer_shadow = 1;
8439
e6ecefaa 8440 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 8441
b690e96c
JB
8442 intel_init_quirks(dev);
8443
1fa61106
ED
8444 intel_init_pm(dev);
8445
e70236a8
JB
8446 intel_init_display(dev);
8447
a6c45cf0
CW
8448 if (IS_GEN2(dev)) {
8449 dev->mode_config.max_width = 2048;
8450 dev->mode_config.max_height = 2048;
8451 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
8452 dev->mode_config.max_width = 4096;
8453 dev->mode_config.max_height = 4096;
79e53945 8454 } else {
a6c45cf0
CW
8455 dev->mode_config.max_width = 8192;
8456 dev->mode_config.max_height = 8192;
79e53945 8457 }
dd2757f8 8458 dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
79e53945 8459
28c97730 8460 DRM_DEBUG_KMS("%d display pipe%s available.\n",
a3524f1b 8461 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
79e53945 8462
a3524f1b 8463 for (i = 0; i < dev_priv->num_pipe; i++) {
79e53945 8464 intel_crtc_init(dev, i);
00c2064b
JB
8465 ret = intel_plane_init(dev, i);
8466 if (ret)
8467 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
79e53945
JB
8468 }
8469
79f689aa 8470 intel_cpu_pll_init(dev);
ee7b9f93
JB
8471 intel_pch_pll_init(dev);
8472
9cce37f4
JB
8473 /* Just disable it once at startup */
8474 i915_disable_vga(dev);
79e53945 8475 intel_setup_outputs(dev);
11be49eb
CW
8476
8477 /* Just in case the BIOS is doing something questionable. */
8478 intel_disable_fbc(dev);
2c7111db
CW
8479}
8480
24929352
DV
8481static void
8482intel_connector_break_all_links(struct intel_connector *connector)
8483{
8484 connector->base.dpms = DRM_MODE_DPMS_OFF;
8485 connector->base.encoder = NULL;
8486 connector->encoder->connectors_active = false;
8487 connector->encoder->base.crtc = NULL;
8488}
8489
7fad798e
DV
8490static void intel_enable_pipe_a(struct drm_device *dev)
8491{
8492 struct intel_connector *connector;
8493 struct drm_connector *crt = NULL;
8494 struct intel_load_detect_pipe load_detect_temp;
8495
8496 /* We can't just switch on the pipe A, we need to set things up with a
8497 * proper mode and output configuration. As a gross hack, enable pipe A
8498 * by enabling the load detect pipe once. */
8499 list_for_each_entry(connector,
8500 &dev->mode_config.connector_list,
8501 base.head) {
8502 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
8503 crt = &connector->base;
8504 break;
8505 }
8506 }
8507
8508 if (!crt)
8509 return;
8510
8511 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
8512 intel_release_load_detect_pipe(crt, &load_detect_temp);
8513
652c393a 8514
7fad798e
DV
8515}
8516
fa555837
DV
8517static bool
8518intel_check_plane_mapping(struct intel_crtc *crtc)
8519{
8520 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
8521 u32 reg, val;
8522
8523 if (dev_priv->num_pipe == 1)
8524 return true;
8525
8526 reg = DSPCNTR(!crtc->plane);
8527 val = I915_READ(reg);
8528
8529 if ((val & DISPLAY_PLANE_ENABLE) &&
8530 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8531 return false;
8532
8533 return true;
8534}
8535
24929352
DV
8536static void intel_sanitize_crtc(struct intel_crtc *crtc)
8537{
8538 struct drm_device *dev = crtc->base.dev;
8539 struct drm_i915_private *dev_priv = dev->dev_private;
fa555837 8540 u32 reg;
24929352 8541
24929352 8542 /* Clear any frame start delays used for debugging left by the BIOS */
702e7a56 8543 reg = PIPECONF(crtc->cpu_transcoder);
24929352
DV
8544 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8545
8546 /* We need to sanitize the plane -> pipe mapping first because this will
fa555837
DV
8547 * disable the crtc (and hence change the state) if it is wrong. Note
8548 * that gen4+ has a fixed plane -> pipe mapping. */
8549 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
24929352
DV
8550 struct intel_connector *connector;
8551 bool plane;
8552
24929352
DV
8553 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
8554 crtc->base.base.id);
8555
8556 /* Pipe has the wrong plane attached and the plane is active.
8557 * Temporarily change the plane mapping and disable everything
8558 * ... */
8559 plane = crtc->plane;
8560 crtc->plane = !plane;
8561 dev_priv->display.crtc_disable(&crtc->base);
8562 crtc->plane = plane;
8563
8564 /* ... and break all links. */
8565 list_for_each_entry(connector, &dev->mode_config.connector_list,
8566 base.head) {
8567 if (connector->encoder->base.crtc != &crtc->base)
8568 continue;
8569
8570 intel_connector_break_all_links(connector);
8571 }
8572
8573 WARN_ON(crtc->active);
8574 crtc->base.enabled = false;
8575 }
24929352 8576
7fad798e
DV
8577 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
8578 crtc->pipe == PIPE_A && !crtc->active) {
8579 /* BIOS forgot to enable pipe A, this mostly happens after
8580 * resume. Force-enable the pipe to fix this, the update_dpms
8581 * call below we restore the pipe to the right state, but leave
8582 * the required bits on. */
8583 intel_enable_pipe_a(dev);
8584 }
8585
24929352
DV
8586 /* Adjust the state of the output pipe according to whether we
8587 * have active connectors/encoders. */
8588 intel_crtc_update_dpms(&crtc->base);
8589
8590 if (crtc->active != crtc->base.enabled) {
8591 struct intel_encoder *encoder;
8592
8593 /* This can happen either due to bugs in the get_hw_state
8594 * functions or because the pipe is force-enabled due to the
8595 * pipe A quirk. */
8596 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
8597 crtc->base.base.id,
8598 crtc->base.enabled ? "enabled" : "disabled",
8599 crtc->active ? "enabled" : "disabled");
8600
8601 crtc->base.enabled = crtc->active;
8602
8603 /* Because we only establish the connector -> encoder ->
8604 * crtc links if something is active, this means the
8605 * crtc is now deactivated. Break the links. connector
8606 * -> encoder links are only establish when things are
8607 * actually up, hence no need to break them. */
8608 WARN_ON(crtc->active);
8609
8610 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
8611 WARN_ON(encoder->connectors_active);
8612 encoder->base.crtc = NULL;
8613 }
8614 }
8615}
8616
8617static void intel_sanitize_encoder(struct intel_encoder *encoder)
8618{
8619 struct intel_connector *connector;
8620 struct drm_device *dev = encoder->base.dev;
8621
8622 /* We need to check both for a crtc link (meaning that the
8623 * encoder is active and trying to read from a pipe) and the
8624 * pipe itself being active. */
8625 bool has_active_crtc = encoder->base.crtc &&
8626 to_intel_crtc(encoder->base.crtc)->active;
8627
8628 if (encoder->connectors_active && !has_active_crtc) {
8629 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
8630 encoder->base.base.id,
8631 drm_get_encoder_name(&encoder->base));
8632
8633 /* Connector is active, but has no active pipe. This is
8634 * fallout from our resume register restoring. Disable
8635 * the encoder manually again. */
8636 if (encoder->base.crtc) {
8637 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
8638 encoder->base.base.id,
8639 drm_get_encoder_name(&encoder->base));
8640 encoder->disable(encoder);
8641 }
8642
8643 /* Inconsistent output/port/pipe state happens presumably due to
8644 * a bug in one of the get_hw_state functions. Or someplace else
8645 * in our code, like the register restore mess on resume. Clamp
8646 * things to off as a safer default. */
8647 list_for_each_entry(connector,
8648 &dev->mode_config.connector_list,
8649 base.head) {
8650 if (connector->encoder != encoder)
8651 continue;
8652
8653 intel_connector_break_all_links(connector);
8654 }
8655 }
8656 /* Enabled encoders without active connectors will be fixed in
8657 * the crtc fixup. */
8658}
8659
8660/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
8661 * and i915 state tracking structures. */
45e2b5f6
DV
8662void intel_modeset_setup_hw_state(struct drm_device *dev,
8663 bool force_restore)
24929352
DV
8664{
8665 struct drm_i915_private *dev_priv = dev->dev_private;
8666 enum pipe pipe;
8667 u32 tmp;
8668 struct intel_crtc *crtc;
8669 struct intel_encoder *encoder;
8670 struct intel_connector *connector;
8671
affa9354 8672 if (HAS_DDI(dev)) {
e28d54cb
PZ
8673 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
8674
8675 if (tmp & TRANS_DDI_FUNC_ENABLE) {
8676 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
8677 case TRANS_DDI_EDP_INPUT_A_ON:
8678 case TRANS_DDI_EDP_INPUT_A_ONOFF:
8679 pipe = PIPE_A;
8680 break;
8681 case TRANS_DDI_EDP_INPUT_B_ONOFF:
8682 pipe = PIPE_B;
8683 break;
8684 case TRANS_DDI_EDP_INPUT_C_ONOFF:
8685 pipe = PIPE_C;
8686 break;
8687 }
8688
8689 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8690 crtc->cpu_transcoder = TRANSCODER_EDP;
8691
8692 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
8693 pipe_name(pipe));
8694 }
8695 }
8696
24929352
DV
8697 for_each_pipe(pipe) {
8698 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8699
702e7a56 8700 tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
24929352
DV
8701 if (tmp & PIPECONF_ENABLE)
8702 crtc->active = true;
8703 else
8704 crtc->active = false;
8705
8706 crtc->base.enabled = crtc->active;
8707
8708 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
8709 crtc->base.base.id,
8710 crtc->active ? "enabled" : "disabled");
8711 }
8712
affa9354 8713 if (HAS_DDI(dev))
6441ab5f
PZ
8714 intel_ddi_setup_hw_pll_state(dev);
8715
24929352
DV
8716 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8717 base.head) {
8718 pipe = 0;
8719
8720 if (encoder->get_hw_state(encoder, &pipe)) {
8721 encoder->base.crtc =
8722 dev_priv->pipe_to_crtc_mapping[pipe];
8723 } else {
8724 encoder->base.crtc = NULL;
8725 }
8726
8727 encoder->connectors_active = false;
8728 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
8729 encoder->base.base.id,
8730 drm_get_encoder_name(&encoder->base),
8731 encoder->base.crtc ? "enabled" : "disabled",
8732 pipe);
8733 }
8734
8735 list_for_each_entry(connector, &dev->mode_config.connector_list,
8736 base.head) {
8737 if (connector->get_hw_state(connector)) {
8738 connector->base.dpms = DRM_MODE_DPMS_ON;
8739 connector->encoder->connectors_active = true;
8740 connector->base.encoder = &connector->encoder->base;
8741 } else {
8742 connector->base.dpms = DRM_MODE_DPMS_OFF;
8743 connector->base.encoder = NULL;
8744 }
8745 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
8746 connector->base.base.id,
8747 drm_get_connector_name(&connector->base),
8748 connector->base.encoder ? "enabled" : "disabled");
8749 }
8750
8751 /* HW state is read out, now we need to sanitize this mess. */
8752 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8753 base.head) {
8754 intel_sanitize_encoder(encoder);
8755 }
8756
8757 for_each_pipe(pipe) {
8758 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8759 intel_sanitize_crtc(crtc);
8760 }
9a935856 8761
45e2b5f6
DV
8762 if (force_restore) {
8763 for_each_pipe(pipe) {
8764 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8765 intel_set_mode(&crtc->base, &crtc->base.mode,
8766 crtc->base.x, crtc->base.y, crtc->base.fb);
8767 }
8768 } else {
8769 intel_modeset_update_staged_output_state(dev);
8770 }
8af6cf88
DV
8771
8772 intel_modeset_check_state(dev);
2e938892
DV
8773
8774 drm_mode_config_reset(dev);
2c7111db
CW
8775}
8776
8777void intel_modeset_gem_init(struct drm_device *dev)
8778{
1833b134 8779 intel_modeset_init_hw(dev);
02e792fb
DV
8780
8781 intel_setup_overlay(dev);
24929352 8782
45e2b5f6 8783 intel_modeset_setup_hw_state(dev, false);
79e53945
JB
8784}
8785
8786void intel_modeset_cleanup(struct drm_device *dev)
8787{
652c393a
JB
8788 struct drm_i915_private *dev_priv = dev->dev_private;
8789 struct drm_crtc *crtc;
8790 struct intel_crtc *intel_crtc;
8791
f87ea761 8792 drm_kms_helper_poll_fini(dev);
652c393a
JB
8793 mutex_lock(&dev->struct_mutex);
8794
723bfd70
JB
8795 intel_unregister_dsm_handler();
8796
8797
652c393a
JB
8798 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8799 /* Skip inactive CRTCs */
8800 if (!crtc->fb)
8801 continue;
8802
8803 intel_crtc = to_intel_crtc(crtc);
3dec0095 8804 intel_increase_pllclock(crtc);
652c393a
JB
8805 }
8806
973d04f9 8807 intel_disable_fbc(dev);
e70236a8 8808
8090c6b9 8809 intel_disable_gt_powersave(dev);
0cdab21f 8810
930ebb46
DV
8811 ironlake_teardown_rc6(dev);
8812
57f350b6
JB
8813 if (IS_VALLEYVIEW(dev))
8814 vlv_init_dpio(dev);
8815
69341a5e
KH
8816 mutex_unlock(&dev->struct_mutex);
8817
6c0d9350
DV
8818 /* Disable the irq before mode object teardown, for the irq might
8819 * enqueue unpin/hotplug work. */
8820 drm_irq_uninstall(dev);
8821 cancel_work_sync(&dev_priv->hotplug_work);
c6a828d3 8822 cancel_work_sync(&dev_priv->rps.work);
6c0d9350 8823
1630fe75
CW
8824 /* flush any delayed tasks or pending work */
8825 flush_scheduled_work();
8826
79e53945
JB
8827 drm_mode_config_cleanup(dev);
8828}
8829
f1c79df3
ZW
8830/*
8831 * Return which encoder is currently attached for connector.
8832 */
df0e9248 8833struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
79e53945 8834{
df0e9248
CW
8835 return &intel_attached_encoder(connector)->base;
8836}
f1c79df3 8837
df0e9248
CW
8838void intel_connector_attach_encoder(struct intel_connector *connector,
8839 struct intel_encoder *encoder)
8840{
8841 connector->encoder = encoder;
8842 drm_mode_connector_attach_encoder(&connector->base,
8843 &encoder->base);
79e53945 8844}
28d52043
DA
8845
8846/*
8847 * set vga decode state - true == enable VGA decode
8848 */
8849int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
8850{
8851 struct drm_i915_private *dev_priv = dev->dev_private;
8852 u16 gmch_ctrl;
8853
8854 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
8855 if (state)
8856 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
8857 else
8858 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
8859 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
8860 return 0;
8861}
c4a1d9e4
CW
8862
8863#ifdef CONFIG_DEBUG_FS
8864#include <linux/seq_file.h>
8865
8866struct intel_display_error_state {
8867 struct intel_cursor_error_state {
8868 u32 control;
8869 u32 position;
8870 u32 base;
8871 u32 size;
52331309 8872 } cursor[I915_MAX_PIPES];
c4a1d9e4
CW
8873
8874 struct intel_pipe_error_state {
8875 u32 conf;
8876 u32 source;
8877
8878 u32 htotal;
8879 u32 hblank;
8880 u32 hsync;
8881 u32 vtotal;
8882 u32 vblank;
8883 u32 vsync;
52331309 8884 } pipe[I915_MAX_PIPES];
c4a1d9e4
CW
8885
8886 struct intel_plane_error_state {
8887 u32 control;
8888 u32 stride;
8889 u32 size;
8890 u32 pos;
8891 u32 addr;
8892 u32 surface;
8893 u32 tile_offset;
52331309 8894 } plane[I915_MAX_PIPES];
c4a1d9e4
CW
8895};
8896
8897struct intel_display_error_state *
8898intel_display_capture_error_state(struct drm_device *dev)
8899{
0206e353 8900 drm_i915_private_t *dev_priv = dev->dev_private;
c4a1d9e4 8901 struct intel_display_error_state *error;
702e7a56 8902 enum transcoder cpu_transcoder;
c4a1d9e4
CW
8903 int i;
8904
8905 error = kmalloc(sizeof(*error), GFP_ATOMIC);
8906 if (error == NULL)
8907 return NULL;
8908
52331309 8909 for_each_pipe(i) {
702e7a56
PZ
8910 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
8911
c4a1d9e4
CW
8912 error->cursor[i].control = I915_READ(CURCNTR(i));
8913 error->cursor[i].position = I915_READ(CURPOS(i));
8914 error->cursor[i].base = I915_READ(CURBASE(i));
8915
8916 error->plane[i].control = I915_READ(DSPCNTR(i));
8917 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
8918 error->plane[i].size = I915_READ(DSPSIZE(i));
0206e353 8919 error->plane[i].pos = I915_READ(DSPPOS(i));
c4a1d9e4
CW
8920 error->plane[i].addr = I915_READ(DSPADDR(i));
8921 if (INTEL_INFO(dev)->gen >= 4) {
8922 error->plane[i].surface = I915_READ(DSPSURF(i));
8923 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
8924 }
8925
702e7a56 8926 error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
c4a1d9e4 8927 error->pipe[i].source = I915_READ(PIPESRC(i));
fe2b8f9d
PZ
8928 error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
8929 error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
8930 error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
8931 error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
8932 error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
8933 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
c4a1d9e4
CW
8934 }
8935
8936 return error;
8937}
8938
8939void
8940intel_display_print_error_state(struct seq_file *m,
8941 struct drm_device *dev,
8942 struct intel_display_error_state *error)
8943{
52331309 8944 drm_i915_private_t *dev_priv = dev->dev_private;
c4a1d9e4
CW
8945 int i;
8946
52331309
DL
8947 seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
8948 for_each_pipe(i) {
c4a1d9e4
CW
8949 seq_printf(m, "Pipe [%d]:\n", i);
8950 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
8951 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
8952 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
8953 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
8954 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
8955 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
8956 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
8957 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
8958
8959 seq_printf(m, "Plane [%d]:\n", i);
8960 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
8961 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
8962 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
8963 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
8964 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
8965 if (INTEL_INFO(dev)->gen >= 4) {
8966 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
8967 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
8968 }
8969
8970 seq_printf(m, "Cursor [%d]:\n", i);
8971 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
8972 seq_printf(m, " POS: %08x\n", error->cursor[i].position);
8973 seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
8974 }
8975}
8976#endif