drm/i915: Support pf CRC source on haswell transcoder edp
[linux-block.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_rect.h>
44 #include <linux/dma_remapping.h>
45
46 /* Primary plane formats supported by all gen */
47 #define COMMON_PRIMARY_FORMATS \
48         DRM_FORMAT_C8, \
49         DRM_FORMAT_RGB565, \
50         DRM_FORMAT_XRGB8888, \
51         DRM_FORMAT_ARGB8888
52
53 /* Primary plane formats for gen <= 3 */
54 static const uint32_t intel_primary_formats_gen2[] = {
55         COMMON_PRIMARY_FORMATS,
56         DRM_FORMAT_XRGB1555,
57         DRM_FORMAT_ARGB1555,
58 };
59
60 /* Primary plane formats for gen >= 4 */
61 static const uint32_t intel_primary_formats_gen4[] = {
62         COMMON_PRIMARY_FORMATS, \
63         DRM_FORMAT_XBGR8888,
64         DRM_FORMAT_ABGR8888,
65         DRM_FORMAT_XRGB2101010,
66         DRM_FORMAT_ARGB2101010,
67         DRM_FORMAT_XBGR2101010,
68         DRM_FORMAT_ABGR2101010,
69 };
70
71 /* Cursor formats */
72 static const uint32_t intel_cursor_formats[] = {
73         DRM_FORMAT_ARGB8888,
74 };
75
76 #define DIV_ROUND_CLOSEST_ULL(ll, d)    \
77 ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
78
79 static void intel_increase_pllclock(struct drm_device *dev,
80                                     enum pipe pipe);
81 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
82
83 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
84                                 struct intel_crtc_config *pipe_config);
85 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
86                                    struct intel_crtc_config *pipe_config);
87
88 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
89                           int x, int y, struct drm_framebuffer *old_fb);
90 static int intel_framebuffer_init(struct drm_device *dev,
91                                   struct intel_framebuffer *ifb,
92                                   struct drm_mode_fb_cmd2 *mode_cmd,
93                                   struct drm_i915_gem_object *obj);
94 static void intel_dp_set_m_n(struct intel_crtc *crtc);
95 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
96 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
97 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
98                                          struct intel_link_m_n *m_n);
99 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
100 static void haswell_set_pipeconf(struct drm_crtc *crtc);
101 static void intel_set_pipe_csc(struct drm_crtc *crtc);
102 static void vlv_prepare_pll(struct intel_crtc *crtc);
103
104 typedef struct {
105         int     min, max;
106 } intel_range_t;
107
108 typedef struct {
109         int     dot_limit;
110         int     p2_slow, p2_fast;
111 } intel_p2_t;
112
113 typedef struct intel_limit intel_limit_t;
114 struct intel_limit {
115         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
116         intel_p2_t          p2;
117 };
118
119 int
120 intel_pch_rawclk(struct drm_device *dev)
121 {
122         struct drm_i915_private *dev_priv = dev->dev_private;
123
124         WARN_ON(!HAS_PCH_SPLIT(dev));
125
126         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
127 }
128
129 static inline u32 /* units of 100MHz */
130 intel_fdi_link_freq(struct drm_device *dev)
131 {
132         if (IS_GEN5(dev)) {
133                 struct drm_i915_private *dev_priv = dev->dev_private;
134                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
135         } else
136                 return 27;
137 }
138
139 static const intel_limit_t intel_limits_i8xx_dac = {
140         .dot = { .min = 25000, .max = 350000 },
141         .vco = { .min = 908000, .max = 1512000 },
142         .n = { .min = 2, .max = 16 },
143         .m = { .min = 96, .max = 140 },
144         .m1 = { .min = 18, .max = 26 },
145         .m2 = { .min = 6, .max = 16 },
146         .p = { .min = 4, .max = 128 },
147         .p1 = { .min = 2, .max = 33 },
148         .p2 = { .dot_limit = 165000,
149                 .p2_slow = 4, .p2_fast = 2 },
150 };
151
152 static const intel_limit_t intel_limits_i8xx_dvo = {
153         .dot = { .min = 25000, .max = 350000 },
154         .vco = { .min = 908000, .max = 1512000 },
155         .n = { .min = 2, .max = 16 },
156         .m = { .min = 96, .max = 140 },
157         .m1 = { .min = 18, .max = 26 },
158         .m2 = { .min = 6, .max = 16 },
159         .p = { .min = 4, .max = 128 },
160         .p1 = { .min = 2, .max = 33 },
161         .p2 = { .dot_limit = 165000,
162                 .p2_slow = 4, .p2_fast = 4 },
163 };
164
165 static const intel_limit_t intel_limits_i8xx_lvds = {
166         .dot = { .min = 25000, .max = 350000 },
167         .vco = { .min = 908000, .max = 1512000 },
168         .n = { .min = 2, .max = 16 },
169         .m = { .min = 96, .max = 140 },
170         .m1 = { .min = 18, .max = 26 },
171         .m2 = { .min = 6, .max = 16 },
172         .p = { .min = 4, .max = 128 },
173         .p1 = { .min = 1, .max = 6 },
174         .p2 = { .dot_limit = 165000,
175                 .p2_slow = 14, .p2_fast = 7 },
176 };
177
178 static const intel_limit_t intel_limits_i9xx_sdvo = {
179         .dot = { .min = 20000, .max = 400000 },
180         .vco = { .min = 1400000, .max = 2800000 },
181         .n = { .min = 1, .max = 6 },
182         .m = { .min = 70, .max = 120 },
183         .m1 = { .min = 8, .max = 18 },
184         .m2 = { .min = 3, .max = 7 },
185         .p = { .min = 5, .max = 80 },
186         .p1 = { .min = 1, .max = 8 },
187         .p2 = { .dot_limit = 200000,
188                 .p2_slow = 10, .p2_fast = 5 },
189 };
190
191 static const intel_limit_t intel_limits_i9xx_lvds = {
192         .dot = { .min = 20000, .max = 400000 },
193         .vco = { .min = 1400000, .max = 2800000 },
194         .n = { .min = 1, .max = 6 },
195         .m = { .min = 70, .max = 120 },
196         .m1 = { .min = 8, .max = 18 },
197         .m2 = { .min = 3, .max = 7 },
198         .p = { .min = 7, .max = 98 },
199         .p1 = { .min = 1, .max = 8 },
200         .p2 = { .dot_limit = 112000,
201                 .p2_slow = 14, .p2_fast = 7 },
202 };
203
204
205 static const intel_limit_t intel_limits_g4x_sdvo = {
206         .dot = { .min = 25000, .max = 270000 },
207         .vco = { .min = 1750000, .max = 3500000},
208         .n = { .min = 1, .max = 4 },
209         .m = { .min = 104, .max = 138 },
210         .m1 = { .min = 17, .max = 23 },
211         .m2 = { .min = 5, .max = 11 },
212         .p = { .min = 10, .max = 30 },
213         .p1 = { .min = 1, .max = 3},
214         .p2 = { .dot_limit = 270000,
215                 .p2_slow = 10,
216                 .p2_fast = 10
217         },
218 };
219
220 static const intel_limit_t intel_limits_g4x_hdmi = {
221         .dot = { .min = 22000, .max = 400000 },
222         .vco = { .min = 1750000, .max = 3500000},
223         .n = { .min = 1, .max = 4 },
224         .m = { .min = 104, .max = 138 },
225         .m1 = { .min = 16, .max = 23 },
226         .m2 = { .min = 5, .max = 11 },
227         .p = { .min = 5, .max = 80 },
228         .p1 = { .min = 1, .max = 8},
229         .p2 = { .dot_limit = 165000,
230                 .p2_slow = 10, .p2_fast = 5 },
231 };
232
233 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
234         .dot = { .min = 20000, .max = 115000 },
235         .vco = { .min = 1750000, .max = 3500000 },
236         .n = { .min = 1, .max = 3 },
237         .m = { .min = 104, .max = 138 },
238         .m1 = { .min = 17, .max = 23 },
239         .m2 = { .min = 5, .max = 11 },
240         .p = { .min = 28, .max = 112 },
241         .p1 = { .min = 2, .max = 8 },
242         .p2 = { .dot_limit = 0,
243                 .p2_slow = 14, .p2_fast = 14
244         },
245 };
246
247 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
248         .dot = { .min = 80000, .max = 224000 },
249         .vco = { .min = 1750000, .max = 3500000 },
250         .n = { .min = 1, .max = 3 },
251         .m = { .min = 104, .max = 138 },
252         .m1 = { .min = 17, .max = 23 },
253         .m2 = { .min = 5, .max = 11 },
254         .p = { .min = 14, .max = 42 },
255         .p1 = { .min = 2, .max = 6 },
256         .p2 = { .dot_limit = 0,
257                 .p2_slow = 7, .p2_fast = 7
258         },
259 };
260
261 static const intel_limit_t intel_limits_pineview_sdvo = {
262         .dot = { .min = 20000, .max = 400000},
263         .vco = { .min = 1700000, .max = 3500000 },
264         /* Pineview's Ncounter is a ring counter */
265         .n = { .min = 3, .max = 6 },
266         .m = { .min = 2, .max = 256 },
267         /* Pineview only has one combined m divider, which we treat as m2. */
268         .m1 = { .min = 0, .max = 0 },
269         .m2 = { .min = 0, .max = 254 },
270         .p = { .min = 5, .max = 80 },
271         .p1 = { .min = 1, .max = 8 },
272         .p2 = { .dot_limit = 200000,
273                 .p2_slow = 10, .p2_fast = 5 },
274 };
275
276 static const intel_limit_t intel_limits_pineview_lvds = {
277         .dot = { .min = 20000, .max = 400000 },
278         .vco = { .min = 1700000, .max = 3500000 },
279         .n = { .min = 3, .max = 6 },
280         .m = { .min = 2, .max = 256 },
281         .m1 = { .min = 0, .max = 0 },
282         .m2 = { .min = 0, .max = 254 },
283         .p = { .min = 7, .max = 112 },
284         .p1 = { .min = 1, .max = 8 },
285         .p2 = { .dot_limit = 112000,
286                 .p2_slow = 14, .p2_fast = 14 },
287 };
288
289 /* Ironlake / Sandybridge
290  *
291  * We calculate clock using (register_value + 2) for N/M1/M2, so here
292  * the range value for them is (actual_value - 2).
293  */
294 static const intel_limit_t intel_limits_ironlake_dac = {
295         .dot = { .min = 25000, .max = 350000 },
296         .vco = { .min = 1760000, .max = 3510000 },
297         .n = { .min = 1, .max = 5 },
298         .m = { .min = 79, .max = 127 },
299         .m1 = { .min = 12, .max = 22 },
300         .m2 = { .min = 5, .max = 9 },
301         .p = { .min = 5, .max = 80 },
302         .p1 = { .min = 1, .max = 8 },
303         .p2 = { .dot_limit = 225000,
304                 .p2_slow = 10, .p2_fast = 5 },
305 };
306
307 static const intel_limit_t intel_limits_ironlake_single_lvds = {
308         .dot = { .min = 25000, .max = 350000 },
309         .vco = { .min = 1760000, .max = 3510000 },
310         .n = { .min = 1, .max = 3 },
311         .m = { .min = 79, .max = 118 },
312         .m1 = { .min = 12, .max = 22 },
313         .m2 = { .min = 5, .max = 9 },
314         .p = { .min = 28, .max = 112 },
315         .p1 = { .min = 2, .max = 8 },
316         .p2 = { .dot_limit = 225000,
317                 .p2_slow = 14, .p2_fast = 14 },
318 };
319
320 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
321         .dot = { .min = 25000, .max = 350000 },
322         .vco = { .min = 1760000, .max = 3510000 },
323         .n = { .min = 1, .max = 3 },
324         .m = { .min = 79, .max = 127 },
325         .m1 = { .min = 12, .max = 22 },
326         .m2 = { .min = 5, .max = 9 },
327         .p = { .min = 14, .max = 56 },
328         .p1 = { .min = 2, .max = 8 },
329         .p2 = { .dot_limit = 225000,
330                 .p2_slow = 7, .p2_fast = 7 },
331 };
332
333 /* LVDS 100mhz refclk limits. */
334 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
335         .dot = { .min = 25000, .max = 350000 },
336         .vco = { .min = 1760000, .max = 3510000 },
337         .n = { .min = 1, .max = 2 },
338         .m = { .min = 79, .max = 126 },
339         .m1 = { .min = 12, .max = 22 },
340         .m2 = { .min = 5, .max = 9 },
341         .p = { .min = 28, .max = 112 },
342         .p1 = { .min = 2, .max = 8 },
343         .p2 = { .dot_limit = 225000,
344                 .p2_slow = 14, .p2_fast = 14 },
345 };
346
347 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
348         .dot = { .min = 25000, .max = 350000 },
349         .vco = { .min = 1760000, .max = 3510000 },
350         .n = { .min = 1, .max = 3 },
351         .m = { .min = 79, .max = 126 },
352         .m1 = { .min = 12, .max = 22 },
353         .m2 = { .min = 5, .max = 9 },
354         .p = { .min = 14, .max = 42 },
355         .p1 = { .min = 2, .max = 6 },
356         .p2 = { .dot_limit = 225000,
357                 .p2_slow = 7, .p2_fast = 7 },
358 };
359
360 static const intel_limit_t intel_limits_vlv = {
361          /*
362           * These are the data rate limits (measured in fast clocks)
363           * since those are the strictest limits we have. The fast
364           * clock and actual rate limits are more relaxed, so checking
365           * them would make no difference.
366           */
367         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
368         .vco = { .min = 4000000, .max = 6000000 },
369         .n = { .min = 1, .max = 7 },
370         .m1 = { .min = 2, .max = 3 },
371         .m2 = { .min = 11, .max = 156 },
372         .p1 = { .min = 2, .max = 3 },
373         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
374 };
375
376 static const intel_limit_t intel_limits_chv = {
377         /*
378          * These are the data rate limits (measured in fast clocks)
379          * since those are the strictest limits we have.  The fast
380          * clock and actual rate limits are more relaxed, so checking
381          * them would make no difference.
382          */
383         .dot = { .min = 25000 * 5, .max = 540000 * 5},
384         .vco = { .min = 4860000, .max = 6700000 },
385         .n = { .min = 1, .max = 1 },
386         .m1 = { .min = 2, .max = 2 },
387         .m2 = { .min = 24 << 22, .max = 175 << 22 },
388         .p1 = { .min = 2, .max = 4 },
389         .p2 = { .p2_slow = 1, .p2_fast = 14 },
390 };
391
392 static void vlv_clock(int refclk, intel_clock_t *clock)
393 {
394         clock->m = clock->m1 * clock->m2;
395         clock->p = clock->p1 * clock->p2;
396         if (WARN_ON(clock->n == 0 || clock->p == 0))
397                 return;
398         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
399         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
400 }
401
402 /**
403  * Returns whether any output on the specified pipe is of the specified type
404  */
405 static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
406 {
407         struct drm_device *dev = crtc->dev;
408         struct intel_encoder *encoder;
409
410         for_each_encoder_on_crtc(dev, crtc, encoder)
411                 if (encoder->type == type)
412                         return true;
413
414         return false;
415 }
416
417 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
418                                                 int refclk)
419 {
420         struct drm_device *dev = crtc->dev;
421         const intel_limit_t *limit;
422
423         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
424                 if (intel_is_dual_link_lvds(dev)) {
425                         if (refclk == 100000)
426                                 limit = &intel_limits_ironlake_dual_lvds_100m;
427                         else
428                                 limit = &intel_limits_ironlake_dual_lvds;
429                 } else {
430                         if (refclk == 100000)
431                                 limit = &intel_limits_ironlake_single_lvds_100m;
432                         else
433                                 limit = &intel_limits_ironlake_single_lvds;
434                 }
435         } else
436                 limit = &intel_limits_ironlake_dac;
437
438         return limit;
439 }
440
441 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
442 {
443         struct drm_device *dev = crtc->dev;
444         const intel_limit_t *limit;
445
446         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
447                 if (intel_is_dual_link_lvds(dev))
448                         limit = &intel_limits_g4x_dual_channel_lvds;
449                 else
450                         limit = &intel_limits_g4x_single_channel_lvds;
451         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
452                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
453                 limit = &intel_limits_g4x_hdmi;
454         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
455                 limit = &intel_limits_g4x_sdvo;
456         } else /* The option is for other outputs */
457                 limit = &intel_limits_i9xx_sdvo;
458
459         return limit;
460 }
461
462 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
463 {
464         struct drm_device *dev = crtc->dev;
465         const intel_limit_t *limit;
466
467         if (HAS_PCH_SPLIT(dev))
468                 limit = intel_ironlake_limit(crtc, refclk);
469         else if (IS_G4X(dev)) {
470                 limit = intel_g4x_limit(crtc);
471         } else if (IS_PINEVIEW(dev)) {
472                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
473                         limit = &intel_limits_pineview_lvds;
474                 else
475                         limit = &intel_limits_pineview_sdvo;
476         } else if (IS_CHERRYVIEW(dev)) {
477                 limit = &intel_limits_chv;
478         } else if (IS_VALLEYVIEW(dev)) {
479                 limit = &intel_limits_vlv;
480         } else if (!IS_GEN2(dev)) {
481                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
482                         limit = &intel_limits_i9xx_lvds;
483                 else
484                         limit = &intel_limits_i9xx_sdvo;
485         } else {
486                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
487                         limit = &intel_limits_i8xx_lvds;
488                 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
489                         limit = &intel_limits_i8xx_dvo;
490                 else
491                         limit = &intel_limits_i8xx_dac;
492         }
493         return limit;
494 }
495
496 /* m1 is reserved as 0 in Pineview, n is a ring counter */
497 static void pineview_clock(int refclk, intel_clock_t *clock)
498 {
499         clock->m = clock->m2 + 2;
500         clock->p = clock->p1 * clock->p2;
501         if (WARN_ON(clock->n == 0 || clock->p == 0))
502                 return;
503         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
504         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
505 }
506
507 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
508 {
509         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
510 }
511
512 static void i9xx_clock(int refclk, intel_clock_t *clock)
513 {
514         clock->m = i9xx_dpll_compute_m(clock);
515         clock->p = clock->p1 * clock->p2;
516         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
517                 return;
518         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
519         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
520 }
521
522 static void chv_clock(int refclk, intel_clock_t *clock)
523 {
524         clock->m = clock->m1 * clock->m2;
525         clock->p = clock->p1 * clock->p2;
526         if (WARN_ON(clock->n == 0 || clock->p == 0))
527                 return;
528         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
529                         clock->n << 22);
530         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
531 }
532
533 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
534 /**
535  * Returns whether the given set of divisors are valid for a given refclk with
536  * the given connectors.
537  */
538
539 static bool intel_PLL_is_valid(struct drm_device *dev,
540                                const intel_limit_t *limit,
541                                const intel_clock_t *clock)
542 {
543         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
544                 INTELPllInvalid("n out of range\n");
545         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
546                 INTELPllInvalid("p1 out of range\n");
547         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
548                 INTELPllInvalid("m2 out of range\n");
549         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
550                 INTELPllInvalid("m1 out of range\n");
551
552         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
553                 if (clock->m1 <= clock->m2)
554                         INTELPllInvalid("m1 <= m2\n");
555
556         if (!IS_VALLEYVIEW(dev)) {
557                 if (clock->p < limit->p.min || limit->p.max < clock->p)
558                         INTELPllInvalid("p out of range\n");
559                 if (clock->m < limit->m.min || limit->m.max < clock->m)
560                         INTELPllInvalid("m out of range\n");
561         }
562
563         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
564                 INTELPllInvalid("vco out of range\n");
565         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
566          * connector, etc., rather than just a single range.
567          */
568         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
569                 INTELPllInvalid("dot out of range\n");
570
571         return true;
572 }
573
574 static bool
575 i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
576                     int target, int refclk, intel_clock_t *match_clock,
577                     intel_clock_t *best_clock)
578 {
579         struct drm_device *dev = crtc->dev;
580         intel_clock_t clock;
581         int err = target;
582
583         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
584                 /*
585                  * For LVDS just rely on its current settings for dual-channel.
586                  * We haven't figured out how to reliably set up different
587                  * single/dual channel state, if we even can.
588                  */
589                 if (intel_is_dual_link_lvds(dev))
590                         clock.p2 = limit->p2.p2_fast;
591                 else
592                         clock.p2 = limit->p2.p2_slow;
593         } else {
594                 if (target < limit->p2.dot_limit)
595                         clock.p2 = limit->p2.p2_slow;
596                 else
597                         clock.p2 = limit->p2.p2_fast;
598         }
599
600         memset(best_clock, 0, sizeof(*best_clock));
601
602         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
603              clock.m1++) {
604                 for (clock.m2 = limit->m2.min;
605                      clock.m2 <= limit->m2.max; clock.m2++) {
606                         if (clock.m2 >= clock.m1)
607                                 break;
608                         for (clock.n = limit->n.min;
609                              clock.n <= limit->n.max; clock.n++) {
610                                 for (clock.p1 = limit->p1.min;
611                                         clock.p1 <= limit->p1.max; clock.p1++) {
612                                         int this_err;
613
614                                         i9xx_clock(refclk, &clock);
615                                         if (!intel_PLL_is_valid(dev, limit,
616                                                                 &clock))
617                                                 continue;
618                                         if (match_clock &&
619                                             clock.p != match_clock->p)
620                                                 continue;
621
622                                         this_err = abs(clock.dot - target);
623                                         if (this_err < err) {
624                                                 *best_clock = clock;
625                                                 err = this_err;
626                                         }
627                                 }
628                         }
629                 }
630         }
631
632         return (err != target);
633 }
634
635 static bool
636 pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
637                    int target, int refclk, intel_clock_t *match_clock,
638                    intel_clock_t *best_clock)
639 {
640         struct drm_device *dev = crtc->dev;
641         intel_clock_t clock;
642         int err = target;
643
644         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
645                 /*
646                  * For LVDS just rely on its current settings for dual-channel.
647                  * We haven't figured out how to reliably set up different
648                  * single/dual channel state, if we even can.
649                  */
650                 if (intel_is_dual_link_lvds(dev))
651                         clock.p2 = limit->p2.p2_fast;
652                 else
653                         clock.p2 = limit->p2.p2_slow;
654         } else {
655                 if (target < limit->p2.dot_limit)
656                         clock.p2 = limit->p2.p2_slow;
657                 else
658                         clock.p2 = limit->p2.p2_fast;
659         }
660
661         memset(best_clock, 0, sizeof(*best_clock));
662
663         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
664              clock.m1++) {
665                 for (clock.m2 = limit->m2.min;
666                      clock.m2 <= limit->m2.max; clock.m2++) {
667                         for (clock.n = limit->n.min;
668                              clock.n <= limit->n.max; clock.n++) {
669                                 for (clock.p1 = limit->p1.min;
670                                         clock.p1 <= limit->p1.max; clock.p1++) {
671                                         int this_err;
672
673                                         pineview_clock(refclk, &clock);
674                                         if (!intel_PLL_is_valid(dev, limit,
675                                                                 &clock))
676                                                 continue;
677                                         if (match_clock &&
678                                             clock.p != match_clock->p)
679                                                 continue;
680
681                                         this_err = abs(clock.dot - target);
682                                         if (this_err < err) {
683                                                 *best_clock = clock;
684                                                 err = this_err;
685                                         }
686                                 }
687                         }
688                 }
689         }
690
691         return (err != target);
692 }
693
694 static bool
695 g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
696                    int target, int refclk, intel_clock_t *match_clock,
697                    intel_clock_t *best_clock)
698 {
699         struct drm_device *dev = crtc->dev;
700         intel_clock_t clock;
701         int max_n;
702         bool found;
703         /* approximately equals target * 0.00585 */
704         int err_most = (target >> 8) + (target >> 9);
705         found = false;
706
707         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
708                 if (intel_is_dual_link_lvds(dev))
709                         clock.p2 = limit->p2.p2_fast;
710                 else
711                         clock.p2 = limit->p2.p2_slow;
712         } else {
713                 if (target < limit->p2.dot_limit)
714                         clock.p2 = limit->p2.p2_slow;
715                 else
716                         clock.p2 = limit->p2.p2_fast;
717         }
718
719         memset(best_clock, 0, sizeof(*best_clock));
720         max_n = limit->n.max;
721         /* based on hardware requirement, prefer smaller n to precision */
722         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
723                 /* based on hardware requirement, prefere larger m1,m2 */
724                 for (clock.m1 = limit->m1.max;
725                      clock.m1 >= limit->m1.min; clock.m1--) {
726                         for (clock.m2 = limit->m2.max;
727                              clock.m2 >= limit->m2.min; clock.m2--) {
728                                 for (clock.p1 = limit->p1.max;
729                                      clock.p1 >= limit->p1.min; clock.p1--) {
730                                         int this_err;
731
732                                         i9xx_clock(refclk, &clock);
733                                         if (!intel_PLL_is_valid(dev, limit,
734                                                                 &clock))
735                                                 continue;
736
737                                         this_err = abs(clock.dot - target);
738                                         if (this_err < err_most) {
739                                                 *best_clock = clock;
740                                                 err_most = this_err;
741                                                 max_n = clock.n;
742                                                 found = true;
743                                         }
744                                 }
745                         }
746                 }
747         }
748         return found;
749 }
750
751 static bool
752 vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
753                    int target, int refclk, intel_clock_t *match_clock,
754                    intel_clock_t *best_clock)
755 {
756         struct drm_device *dev = crtc->dev;
757         intel_clock_t clock;
758         unsigned int bestppm = 1000000;
759         /* min update 19.2 MHz */
760         int max_n = min(limit->n.max, refclk / 19200);
761         bool found = false;
762
763         target *= 5; /* fast clock */
764
765         memset(best_clock, 0, sizeof(*best_clock));
766
767         /* based on hardware requirement, prefer smaller n to precision */
768         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
769                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
770                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
771                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
772                                 clock.p = clock.p1 * clock.p2;
773                                 /* based on hardware requirement, prefer bigger m1,m2 values */
774                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
775                                         unsigned int ppm, diff;
776
777                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
778                                                                      refclk * clock.m1);
779
780                                         vlv_clock(refclk, &clock);
781
782                                         if (!intel_PLL_is_valid(dev, limit,
783                                                                 &clock))
784                                                 continue;
785
786                                         diff = abs(clock.dot - target);
787                                         ppm = div_u64(1000000ULL * diff, target);
788
789                                         if (ppm < 100 && clock.p > best_clock->p) {
790                                                 bestppm = 0;
791                                                 *best_clock = clock;
792                                                 found = true;
793                                         }
794
795                                         if (bestppm >= 10 && ppm < bestppm - 10) {
796                                                 bestppm = ppm;
797                                                 *best_clock = clock;
798                                                 found = true;
799                                         }
800                                 }
801                         }
802                 }
803         }
804
805         return found;
806 }
807
808 static bool
809 chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
810                    int target, int refclk, intel_clock_t *match_clock,
811                    intel_clock_t *best_clock)
812 {
813         struct drm_device *dev = crtc->dev;
814         intel_clock_t clock;
815         uint64_t m2;
816         int found = false;
817
818         memset(best_clock, 0, sizeof(*best_clock));
819
820         /*
821          * Based on hardware doc, the n always set to 1, and m1 always
822          * set to 2.  If requires to support 200Mhz refclk, we need to
823          * revisit this because n may not 1 anymore.
824          */
825         clock.n = 1, clock.m1 = 2;
826         target *= 5;    /* fast clock */
827
828         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
829                 for (clock.p2 = limit->p2.p2_fast;
830                                 clock.p2 >= limit->p2.p2_slow;
831                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
832
833                         clock.p = clock.p1 * clock.p2;
834
835                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
836                                         clock.n) << 22, refclk * clock.m1);
837
838                         if (m2 > INT_MAX/clock.m1)
839                                 continue;
840
841                         clock.m2 = m2;
842
843                         chv_clock(refclk, &clock);
844
845                         if (!intel_PLL_is_valid(dev, limit, &clock))
846                                 continue;
847
848                         /* based on hardware requirement, prefer bigger p
849                          */
850                         if (clock.p > best_clock->p) {
851                                 *best_clock = clock;
852                                 found = true;
853                         }
854                 }
855         }
856
857         return found;
858 }
859
860 bool intel_crtc_active(struct drm_crtc *crtc)
861 {
862         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
863
864         /* Be paranoid as we can arrive here with only partial
865          * state retrieved from the hardware during setup.
866          *
867          * We can ditch the adjusted_mode.crtc_clock check as soon
868          * as Haswell has gained clock readout/fastboot support.
869          *
870          * We can ditch the crtc->primary->fb check as soon as we can
871          * properly reconstruct framebuffers.
872          */
873         return intel_crtc->active && crtc->primary->fb &&
874                 intel_crtc->config.adjusted_mode.crtc_clock;
875 }
876
877 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
878                                              enum pipe pipe)
879 {
880         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
881         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
882
883         return intel_crtc->config.cpu_transcoder;
884 }
885
886 static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
887 {
888         struct drm_i915_private *dev_priv = dev->dev_private;
889         u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
890
891         frame = I915_READ(frame_reg);
892
893         if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
894                 WARN(1, "vblank wait timed out\n");
895 }
896
897 /**
898  * intel_wait_for_vblank - wait for vblank on a given pipe
899  * @dev: drm device
900  * @pipe: pipe to wait for
901  *
902  * Wait for vblank to occur on a given pipe.  Needed for various bits of
903  * mode setting code.
904  */
905 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
906 {
907         struct drm_i915_private *dev_priv = dev->dev_private;
908         int pipestat_reg = PIPESTAT(pipe);
909
910         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
911                 g4x_wait_for_vblank(dev, pipe);
912                 return;
913         }
914
915         /* Clear existing vblank status. Note this will clear any other
916          * sticky status fields as well.
917          *
918          * This races with i915_driver_irq_handler() with the result
919          * that either function could miss a vblank event.  Here it is not
920          * fatal, as we will either wait upon the next vblank interrupt or
921          * timeout.  Generally speaking intel_wait_for_vblank() is only
922          * called during modeset at which time the GPU should be idle and
923          * should *not* be performing page flips and thus not waiting on
924          * vblanks...
925          * Currently, the result of us stealing a vblank from the irq
926          * handler is that a single frame will be skipped during swapbuffers.
927          */
928         I915_WRITE(pipestat_reg,
929                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
930
931         /* Wait for vblank interrupt bit to set */
932         if (wait_for(I915_READ(pipestat_reg) &
933                      PIPE_VBLANK_INTERRUPT_STATUS,
934                      50))
935                 DRM_DEBUG_KMS("vblank wait timed out\n");
936 }
937
938 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
939 {
940         struct drm_i915_private *dev_priv = dev->dev_private;
941         u32 reg = PIPEDSL(pipe);
942         u32 line1, line2;
943         u32 line_mask;
944
945         if (IS_GEN2(dev))
946                 line_mask = DSL_LINEMASK_GEN2;
947         else
948                 line_mask = DSL_LINEMASK_GEN3;
949
950         line1 = I915_READ(reg) & line_mask;
951         mdelay(5);
952         line2 = I915_READ(reg) & line_mask;
953
954         return line1 == line2;
955 }
956
957 /*
958  * intel_wait_for_pipe_off - wait for pipe to turn off
959  * @dev: drm device
960  * @pipe: pipe to wait for
961  *
962  * After disabling a pipe, we can't wait for vblank in the usual way,
963  * spinning on the vblank interrupt status bit, since we won't actually
964  * see an interrupt when the pipe is disabled.
965  *
966  * On Gen4 and above:
967  *   wait for the pipe register state bit to turn off
968  *
969  * Otherwise:
970  *   wait for the display line value to settle (it usually
971  *   ends up stopping at the start of the next frame).
972  *
973  */
974 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
975 {
976         struct drm_i915_private *dev_priv = dev->dev_private;
977         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
978                                                                       pipe);
979
980         if (INTEL_INFO(dev)->gen >= 4) {
981                 int reg = PIPECONF(cpu_transcoder);
982
983                 /* Wait for the Pipe State to go off */
984                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
985                              100))
986                         WARN(1, "pipe_off wait timed out\n");
987         } else {
988                 /* Wait for the display line to settle */
989                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
990                         WARN(1, "pipe_off wait timed out\n");
991         }
992 }
993
994 /*
995  * ibx_digital_port_connected - is the specified port connected?
996  * @dev_priv: i915 private structure
997  * @port: the port to test
998  *
999  * Returns true if @port is connected, false otherwise.
1000  */
1001 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
1002                                 struct intel_digital_port *port)
1003 {
1004         u32 bit;
1005
1006         if (HAS_PCH_IBX(dev_priv->dev)) {
1007                 switch (port->port) {
1008                 case PORT_B:
1009                         bit = SDE_PORTB_HOTPLUG;
1010                         break;
1011                 case PORT_C:
1012                         bit = SDE_PORTC_HOTPLUG;
1013                         break;
1014                 case PORT_D:
1015                         bit = SDE_PORTD_HOTPLUG;
1016                         break;
1017                 default:
1018                         return true;
1019                 }
1020         } else {
1021                 switch (port->port) {
1022                 case PORT_B:
1023                         bit = SDE_PORTB_HOTPLUG_CPT;
1024                         break;
1025                 case PORT_C:
1026                         bit = SDE_PORTC_HOTPLUG_CPT;
1027                         break;
1028                 case PORT_D:
1029                         bit = SDE_PORTD_HOTPLUG_CPT;
1030                         break;
1031                 default:
1032                         return true;
1033                 }
1034         }
1035
1036         return I915_READ(SDEISR) & bit;
1037 }
1038
1039 static const char *state_string(bool enabled)
1040 {
1041         return enabled ? "on" : "off";
1042 }
1043
1044 /* Only for pre-ILK configs */
1045 void assert_pll(struct drm_i915_private *dev_priv,
1046                 enum pipe pipe, bool state)
1047 {
1048         int reg;
1049         u32 val;
1050         bool cur_state;
1051
1052         reg = DPLL(pipe);
1053         val = I915_READ(reg);
1054         cur_state = !!(val & DPLL_VCO_ENABLE);
1055         WARN(cur_state != state,
1056              "PLL state assertion failure (expected %s, current %s)\n",
1057              state_string(state), state_string(cur_state));
1058 }
1059
1060 /* XXX: the dsi pll is shared between MIPI DSI ports */
1061 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1062 {
1063         u32 val;
1064         bool cur_state;
1065
1066         mutex_lock(&dev_priv->dpio_lock);
1067         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1068         mutex_unlock(&dev_priv->dpio_lock);
1069
1070         cur_state = val & DSI_PLL_VCO_EN;
1071         WARN(cur_state != state,
1072              "DSI PLL state assertion failure (expected %s, current %s)\n",
1073              state_string(state), state_string(cur_state));
1074 }
1075 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1076 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1077
1078 struct intel_shared_dpll *
1079 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1080 {
1081         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1082
1083         if (crtc->config.shared_dpll < 0)
1084                 return NULL;
1085
1086         return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1087 }
1088
1089 /* For ILK+ */
1090 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1091                         struct intel_shared_dpll *pll,
1092                         bool state)
1093 {
1094         bool cur_state;
1095         struct intel_dpll_hw_state hw_state;
1096
1097         if (HAS_PCH_LPT(dev_priv->dev)) {
1098                 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1099                 return;
1100         }
1101
1102         if (WARN (!pll,
1103                   "asserting DPLL %s with no DPLL\n", state_string(state)))
1104                 return;
1105
1106         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1107         WARN(cur_state != state,
1108              "%s assertion failure (expected %s, current %s)\n",
1109              pll->name, state_string(state), state_string(cur_state));
1110 }
1111
1112 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1113                           enum pipe pipe, bool state)
1114 {
1115         int reg;
1116         u32 val;
1117         bool cur_state;
1118         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1119                                                                       pipe);
1120
1121         if (HAS_DDI(dev_priv->dev)) {
1122                 /* DDI does not have a specific FDI_TX register */
1123                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1124                 val = I915_READ(reg);
1125                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1126         } else {
1127                 reg = FDI_TX_CTL(pipe);
1128                 val = I915_READ(reg);
1129                 cur_state = !!(val & FDI_TX_ENABLE);
1130         }
1131         WARN(cur_state != state,
1132              "FDI TX state assertion failure (expected %s, current %s)\n",
1133              state_string(state), state_string(cur_state));
1134 }
1135 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1136 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1137
1138 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1139                           enum pipe pipe, bool state)
1140 {
1141         int reg;
1142         u32 val;
1143         bool cur_state;
1144
1145         reg = FDI_RX_CTL(pipe);
1146         val = I915_READ(reg);
1147         cur_state = !!(val & FDI_RX_ENABLE);
1148         WARN(cur_state != state,
1149              "FDI RX state assertion failure (expected %s, current %s)\n",
1150              state_string(state), state_string(cur_state));
1151 }
1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1154
1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1156                                       enum pipe pipe)
1157 {
1158         int reg;
1159         u32 val;
1160
1161         /* ILK FDI PLL is always enabled */
1162         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1163                 return;
1164
1165         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1166         if (HAS_DDI(dev_priv->dev))
1167                 return;
1168
1169         reg = FDI_TX_CTL(pipe);
1170         val = I915_READ(reg);
1171         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1172 }
1173
1174 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1175                        enum pipe pipe, bool state)
1176 {
1177         int reg;
1178         u32 val;
1179         bool cur_state;
1180
1181         reg = FDI_RX_CTL(pipe);
1182         val = I915_READ(reg);
1183         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1184         WARN(cur_state != state,
1185              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1186              state_string(state), state_string(cur_state));
1187 }
1188
1189 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1190                                   enum pipe pipe)
1191 {
1192         int pp_reg, lvds_reg;
1193         u32 val;
1194         enum pipe panel_pipe = PIPE_A;
1195         bool locked = true;
1196
1197         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1198                 pp_reg = PCH_PP_CONTROL;
1199                 lvds_reg = PCH_LVDS;
1200         } else {
1201                 pp_reg = PP_CONTROL;
1202                 lvds_reg = LVDS;
1203         }
1204
1205         val = I915_READ(pp_reg);
1206         if (!(val & PANEL_POWER_ON) ||
1207             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1208                 locked = false;
1209
1210         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1211                 panel_pipe = PIPE_B;
1212
1213         WARN(panel_pipe == pipe && locked,
1214              "panel assertion failure, pipe %c regs locked\n",
1215              pipe_name(pipe));
1216 }
1217
1218 static void assert_cursor(struct drm_i915_private *dev_priv,
1219                           enum pipe pipe, bool state)
1220 {
1221         struct drm_device *dev = dev_priv->dev;
1222         bool cur_state;
1223
1224         if (IS_845G(dev) || IS_I865G(dev))
1225                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1226         else
1227                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1228
1229         WARN(cur_state != state,
1230              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1231              pipe_name(pipe), state_string(state), state_string(cur_state));
1232 }
1233 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1234 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1235
1236 void assert_pipe(struct drm_i915_private *dev_priv,
1237                  enum pipe pipe, bool state)
1238 {
1239         int reg;
1240         u32 val;
1241         bool cur_state;
1242         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1243                                                                       pipe);
1244
1245         /* if we need the pipe A quirk it must be always on */
1246         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1247                 state = true;
1248
1249         if (!intel_display_power_enabled(dev_priv,
1250                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1251                 cur_state = false;
1252         } else {
1253                 reg = PIPECONF(cpu_transcoder);
1254                 val = I915_READ(reg);
1255                 cur_state = !!(val & PIPECONF_ENABLE);
1256         }
1257
1258         WARN(cur_state != state,
1259              "pipe %c assertion failure (expected %s, current %s)\n",
1260              pipe_name(pipe), state_string(state), state_string(cur_state));
1261 }
1262
1263 static void assert_plane(struct drm_i915_private *dev_priv,
1264                          enum plane plane, bool state)
1265 {
1266         int reg;
1267         u32 val;
1268         bool cur_state;
1269
1270         reg = DSPCNTR(plane);
1271         val = I915_READ(reg);
1272         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1273         WARN(cur_state != state,
1274              "plane %c assertion failure (expected %s, current %s)\n",
1275              plane_name(plane), state_string(state), state_string(cur_state));
1276 }
1277
1278 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1279 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1280
1281 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1282                                    enum pipe pipe)
1283 {
1284         struct drm_device *dev = dev_priv->dev;
1285         int reg, i;
1286         u32 val;
1287         int cur_pipe;
1288
1289         /* Primary planes are fixed to pipes on gen4+ */
1290         if (INTEL_INFO(dev)->gen >= 4) {
1291                 reg = DSPCNTR(pipe);
1292                 val = I915_READ(reg);
1293                 WARN(val & DISPLAY_PLANE_ENABLE,
1294                      "plane %c assertion failure, should be disabled but not\n",
1295                      plane_name(pipe));
1296                 return;
1297         }
1298
1299         /* Need to check both planes against the pipe */
1300         for_each_pipe(i) {
1301                 reg = DSPCNTR(i);
1302                 val = I915_READ(reg);
1303                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1304                         DISPPLANE_SEL_PIPE_SHIFT;
1305                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1306                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1307                      plane_name(i), pipe_name(pipe));
1308         }
1309 }
1310
1311 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1312                                     enum pipe pipe)
1313 {
1314         struct drm_device *dev = dev_priv->dev;
1315         int reg, sprite;
1316         u32 val;
1317
1318         if (IS_VALLEYVIEW(dev)) {
1319                 for_each_sprite(pipe, sprite) {
1320                         reg = SPCNTR(pipe, sprite);
1321                         val = I915_READ(reg);
1322                         WARN(val & SP_ENABLE,
1323                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1324                              sprite_name(pipe, sprite), pipe_name(pipe));
1325                 }
1326         } else if (INTEL_INFO(dev)->gen >= 7) {
1327                 reg = SPRCTL(pipe);
1328                 val = I915_READ(reg);
1329                 WARN(val & SPRITE_ENABLE,
1330                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1331                      plane_name(pipe), pipe_name(pipe));
1332         } else if (INTEL_INFO(dev)->gen >= 5) {
1333                 reg = DVSCNTR(pipe);
1334                 val = I915_READ(reg);
1335                 WARN(val & DVS_ENABLE,
1336                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1337                      plane_name(pipe), pipe_name(pipe));
1338         }
1339 }
1340
1341 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1342 {
1343         u32 val;
1344         bool enabled;
1345
1346         WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1347
1348         val = I915_READ(PCH_DREF_CONTROL);
1349         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1350                             DREF_SUPERSPREAD_SOURCE_MASK));
1351         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1352 }
1353
1354 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1355                                            enum pipe pipe)
1356 {
1357         int reg;
1358         u32 val;
1359         bool enabled;
1360
1361         reg = PCH_TRANSCONF(pipe);
1362         val = I915_READ(reg);
1363         enabled = !!(val & TRANS_ENABLE);
1364         WARN(enabled,
1365              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1366              pipe_name(pipe));
1367 }
1368
1369 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1370                             enum pipe pipe, u32 port_sel, u32 val)
1371 {
1372         if ((val & DP_PORT_EN) == 0)
1373                 return false;
1374
1375         if (HAS_PCH_CPT(dev_priv->dev)) {
1376                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1377                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1378                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1379                         return false;
1380         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1381                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1382                         return false;
1383         } else {
1384                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1385                         return false;
1386         }
1387         return true;
1388 }
1389
1390 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1391                               enum pipe pipe, u32 val)
1392 {
1393         if ((val & SDVO_ENABLE) == 0)
1394                 return false;
1395
1396         if (HAS_PCH_CPT(dev_priv->dev)) {
1397                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1398                         return false;
1399         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1400                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1401                         return false;
1402         } else {
1403                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1404                         return false;
1405         }
1406         return true;
1407 }
1408
1409 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1410                               enum pipe pipe, u32 val)
1411 {
1412         if ((val & LVDS_PORT_EN) == 0)
1413                 return false;
1414
1415         if (HAS_PCH_CPT(dev_priv->dev)) {
1416                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1417                         return false;
1418         } else {
1419                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1420                         return false;
1421         }
1422         return true;
1423 }
1424
1425 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1426                               enum pipe pipe, u32 val)
1427 {
1428         if ((val & ADPA_DAC_ENABLE) == 0)
1429                 return false;
1430         if (HAS_PCH_CPT(dev_priv->dev)) {
1431                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1432                         return false;
1433         } else {
1434                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1435                         return false;
1436         }
1437         return true;
1438 }
1439
1440 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1441                                    enum pipe pipe, int reg, u32 port_sel)
1442 {
1443         u32 val = I915_READ(reg);
1444         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1445              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1446              reg, pipe_name(pipe));
1447
1448         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1449              && (val & DP_PIPEB_SELECT),
1450              "IBX PCH dp port still using transcoder B\n");
1451 }
1452
1453 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1454                                      enum pipe pipe, int reg)
1455 {
1456         u32 val = I915_READ(reg);
1457         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1458              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1459              reg, pipe_name(pipe));
1460
1461         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1462              && (val & SDVO_PIPE_B_SELECT),
1463              "IBX PCH hdmi port still using transcoder B\n");
1464 }
1465
1466 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1467                                       enum pipe pipe)
1468 {
1469         int reg;
1470         u32 val;
1471
1472         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1473         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1474         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1475
1476         reg = PCH_ADPA;
1477         val = I915_READ(reg);
1478         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1479              "PCH VGA enabled on transcoder %c, should be disabled\n",
1480              pipe_name(pipe));
1481
1482         reg = PCH_LVDS;
1483         val = I915_READ(reg);
1484         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1485              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1486              pipe_name(pipe));
1487
1488         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1489         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1490         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1491 }
1492
1493 static void intel_init_dpio(struct drm_device *dev)
1494 {
1495         struct drm_i915_private *dev_priv = dev->dev_private;
1496
1497         if (!IS_VALLEYVIEW(dev))
1498                 return;
1499
1500         /*
1501          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1502          * CHV x1 PHY (DP/HDMI D)
1503          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1504          */
1505         if (IS_CHERRYVIEW(dev)) {
1506                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1507                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1508         } else {
1509                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1510         }
1511 }
1512
1513 static void intel_reset_dpio(struct drm_device *dev)
1514 {
1515         struct drm_i915_private *dev_priv = dev->dev_private;
1516
1517         if (IS_CHERRYVIEW(dev)) {
1518                 enum dpio_phy phy;
1519                 u32 val;
1520
1521                 for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1522                         /* Poll for phypwrgood signal */
1523                         if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1524                                                 PHY_POWERGOOD(phy), 1))
1525                                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1526
1527                         /*
1528                          * Deassert common lane reset for PHY.
1529                          *
1530                          * This should only be done on init and resume from S3
1531                          * with both PLLs disabled, or we risk losing DPIO and
1532                          * PLL synchronization.
1533                          */
1534                         val = I915_READ(DISPLAY_PHY_CONTROL);
1535                         I915_WRITE(DISPLAY_PHY_CONTROL,
1536                                 PHY_COM_LANE_RESET_DEASSERT(phy, val));
1537                 }
1538         }
1539 }
1540
1541 static void vlv_enable_pll(struct intel_crtc *crtc)
1542 {
1543         struct drm_device *dev = crtc->base.dev;
1544         struct drm_i915_private *dev_priv = dev->dev_private;
1545         int reg = DPLL(crtc->pipe);
1546         u32 dpll = crtc->config.dpll_hw_state.dpll;
1547
1548         assert_pipe_disabled(dev_priv, crtc->pipe);
1549
1550         /* No really, not for ILK+ */
1551         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1552
1553         /* PLL is protected by panel, make sure we can write it */
1554         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1555                 assert_panel_unlocked(dev_priv, crtc->pipe);
1556
1557         I915_WRITE(reg, dpll);
1558         POSTING_READ(reg);
1559         udelay(150);
1560
1561         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1562                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1563
1564         I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1565         POSTING_READ(DPLL_MD(crtc->pipe));
1566
1567         /* We do this three times for luck */
1568         I915_WRITE(reg, dpll);
1569         POSTING_READ(reg);
1570         udelay(150); /* wait for warmup */
1571         I915_WRITE(reg, dpll);
1572         POSTING_READ(reg);
1573         udelay(150); /* wait for warmup */
1574         I915_WRITE(reg, dpll);
1575         POSTING_READ(reg);
1576         udelay(150); /* wait for warmup */
1577 }
1578
1579 static void chv_enable_pll(struct intel_crtc *crtc)
1580 {
1581         struct drm_device *dev = crtc->base.dev;
1582         struct drm_i915_private *dev_priv = dev->dev_private;
1583         int pipe = crtc->pipe;
1584         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1585         u32 tmp;
1586
1587         assert_pipe_disabled(dev_priv, crtc->pipe);
1588
1589         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1590
1591         mutex_lock(&dev_priv->dpio_lock);
1592
1593         /* Enable back the 10bit clock to display controller */
1594         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1595         tmp |= DPIO_DCLKP_EN;
1596         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1597
1598         /*
1599          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1600          */
1601         udelay(1);
1602
1603         /* Enable PLL */
1604         I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1605
1606         /* Check PLL is locked */
1607         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1608                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1609
1610         /* not sure when this should be written */
1611         I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1612         POSTING_READ(DPLL_MD(pipe));
1613
1614         mutex_unlock(&dev_priv->dpio_lock);
1615 }
1616
1617 static void i9xx_enable_pll(struct intel_crtc *crtc)
1618 {
1619         struct drm_device *dev = crtc->base.dev;
1620         struct drm_i915_private *dev_priv = dev->dev_private;
1621         int reg = DPLL(crtc->pipe);
1622         u32 dpll = crtc->config.dpll_hw_state.dpll;
1623
1624         assert_pipe_disabled(dev_priv, crtc->pipe);
1625
1626         /* No really, not for ILK+ */
1627         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1628
1629         /* PLL is protected by panel, make sure we can write it */
1630         if (IS_MOBILE(dev) && !IS_I830(dev))
1631                 assert_panel_unlocked(dev_priv, crtc->pipe);
1632
1633         I915_WRITE(reg, dpll);
1634
1635         /* Wait for the clocks to stabilize. */
1636         POSTING_READ(reg);
1637         udelay(150);
1638
1639         if (INTEL_INFO(dev)->gen >= 4) {
1640                 I915_WRITE(DPLL_MD(crtc->pipe),
1641                            crtc->config.dpll_hw_state.dpll_md);
1642         } else {
1643                 /* The pixel multiplier can only be updated once the
1644                  * DPLL is enabled and the clocks are stable.
1645                  *
1646                  * So write it again.
1647                  */
1648                 I915_WRITE(reg, dpll);
1649         }
1650
1651         /* We do this three times for luck */
1652         I915_WRITE(reg, dpll);
1653         POSTING_READ(reg);
1654         udelay(150); /* wait for warmup */
1655         I915_WRITE(reg, dpll);
1656         POSTING_READ(reg);
1657         udelay(150); /* wait for warmup */
1658         I915_WRITE(reg, dpll);
1659         POSTING_READ(reg);
1660         udelay(150); /* wait for warmup */
1661 }
1662
1663 /**
1664  * i9xx_disable_pll - disable a PLL
1665  * @dev_priv: i915 private structure
1666  * @pipe: pipe PLL to disable
1667  *
1668  * Disable the PLL for @pipe, making sure the pipe is off first.
1669  *
1670  * Note!  This is for pre-ILK only.
1671  */
1672 static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1673 {
1674         /* Don't disable pipe A or pipe A PLLs if needed */
1675         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1676                 return;
1677
1678         /* Make sure the pipe isn't still relying on us */
1679         assert_pipe_disabled(dev_priv, pipe);
1680
1681         I915_WRITE(DPLL(pipe), 0);
1682         POSTING_READ(DPLL(pipe));
1683 }
1684
1685 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1686 {
1687         u32 val = 0;
1688
1689         /* Make sure the pipe isn't still relying on us */
1690         assert_pipe_disabled(dev_priv, pipe);
1691
1692         /*
1693          * Leave integrated clock source and reference clock enabled for pipe B.
1694          * The latter is needed for VGA hotplug / manual detection.
1695          */
1696         if (pipe == PIPE_B)
1697                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1698         I915_WRITE(DPLL(pipe), val);
1699         POSTING_READ(DPLL(pipe));
1700
1701 }
1702
1703 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1704 {
1705         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1706         u32 val;
1707
1708         /* Make sure the pipe isn't still relying on us */
1709         assert_pipe_disabled(dev_priv, pipe);
1710
1711         /* Set PLL en = 0 */
1712         val = DPLL_SSC_REF_CLOCK_CHV;
1713         if (pipe != PIPE_A)
1714                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1715         I915_WRITE(DPLL(pipe), val);
1716         POSTING_READ(DPLL(pipe));
1717
1718         mutex_lock(&dev_priv->dpio_lock);
1719
1720         /* Disable 10bit clock to display controller */
1721         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1722         val &= ~DPIO_DCLKP_EN;
1723         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1724
1725         /* disable left/right clock distribution */
1726         if (pipe != PIPE_B) {
1727                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1728                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1729                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1730         } else {
1731                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1732                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1733                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1734         }
1735
1736         mutex_unlock(&dev_priv->dpio_lock);
1737 }
1738
1739 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1740                 struct intel_digital_port *dport)
1741 {
1742         u32 port_mask;
1743         int dpll_reg;
1744
1745         switch (dport->port) {
1746         case PORT_B:
1747                 port_mask = DPLL_PORTB_READY_MASK;
1748                 dpll_reg = DPLL(0);
1749                 break;
1750         case PORT_C:
1751                 port_mask = DPLL_PORTC_READY_MASK;
1752                 dpll_reg = DPLL(0);
1753                 break;
1754         case PORT_D:
1755                 port_mask = DPLL_PORTD_READY_MASK;
1756                 dpll_reg = DPIO_PHY_STATUS;
1757                 break;
1758         default:
1759                 BUG();
1760         }
1761
1762         if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1763                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1764                      port_name(dport->port), I915_READ(dpll_reg));
1765 }
1766
1767 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1768 {
1769         struct drm_device *dev = crtc->base.dev;
1770         struct drm_i915_private *dev_priv = dev->dev_private;
1771         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1772
1773         if (WARN_ON(pll == NULL))
1774                 return;
1775
1776         WARN_ON(!pll->refcount);
1777         if (pll->active == 0) {
1778                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1779                 WARN_ON(pll->on);
1780                 assert_shared_dpll_disabled(dev_priv, pll);
1781
1782                 pll->mode_set(dev_priv, pll);
1783         }
1784 }
1785
1786 /**
1787  * intel_enable_shared_dpll - enable PCH PLL
1788  * @dev_priv: i915 private structure
1789  * @pipe: pipe PLL to enable
1790  *
1791  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1792  * drives the transcoder clock.
1793  */
1794 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1795 {
1796         struct drm_device *dev = crtc->base.dev;
1797         struct drm_i915_private *dev_priv = dev->dev_private;
1798         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1799
1800         if (WARN_ON(pll == NULL))
1801                 return;
1802
1803         if (WARN_ON(pll->refcount == 0))
1804                 return;
1805
1806         DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1807                       pll->name, pll->active, pll->on,
1808                       crtc->base.base.id);
1809
1810         if (pll->active++) {
1811                 WARN_ON(!pll->on);
1812                 assert_shared_dpll_enabled(dev_priv, pll);
1813                 return;
1814         }
1815         WARN_ON(pll->on);
1816
1817         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1818         pll->enable(dev_priv, pll);
1819         pll->on = true;
1820 }
1821
1822 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1823 {
1824         struct drm_device *dev = crtc->base.dev;
1825         struct drm_i915_private *dev_priv = dev->dev_private;
1826         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1827
1828         /* PCH only available on ILK+ */
1829         BUG_ON(INTEL_INFO(dev)->gen < 5);
1830         if (WARN_ON(pll == NULL))
1831                return;
1832
1833         if (WARN_ON(pll->refcount == 0))
1834                 return;
1835
1836         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1837                       pll->name, pll->active, pll->on,
1838                       crtc->base.base.id);
1839
1840         if (WARN_ON(pll->active == 0)) {
1841                 assert_shared_dpll_disabled(dev_priv, pll);
1842                 return;
1843         }
1844
1845         assert_shared_dpll_enabled(dev_priv, pll);
1846         WARN_ON(!pll->on);
1847         if (--pll->active)
1848                 return;
1849
1850         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1851         pll->disable(dev_priv, pll);
1852         pll->on = false;
1853 }
1854
1855 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1856                                            enum pipe pipe)
1857 {
1858         struct drm_device *dev = dev_priv->dev;
1859         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1860         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1861         uint32_t reg, val, pipeconf_val;
1862
1863         /* PCH only available on ILK+ */
1864         BUG_ON(INTEL_INFO(dev)->gen < 5);
1865
1866         /* Make sure PCH DPLL is enabled */
1867         assert_shared_dpll_enabled(dev_priv,
1868                                    intel_crtc_to_shared_dpll(intel_crtc));
1869
1870         /* FDI must be feeding us bits for PCH ports */
1871         assert_fdi_tx_enabled(dev_priv, pipe);
1872         assert_fdi_rx_enabled(dev_priv, pipe);
1873
1874         if (HAS_PCH_CPT(dev)) {
1875                 /* Workaround: Set the timing override bit before enabling the
1876                  * pch transcoder. */
1877                 reg = TRANS_CHICKEN2(pipe);
1878                 val = I915_READ(reg);
1879                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1880                 I915_WRITE(reg, val);
1881         }
1882
1883         reg = PCH_TRANSCONF(pipe);
1884         val = I915_READ(reg);
1885         pipeconf_val = I915_READ(PIPECONF(pipe));
1886
1887         if (HAS_PCH_IBX(dev_priv->dev)) {
1888                 /*
1889                  * make the BPC in transcoder be consistent with
1890                  * that in pipeconf reg.
1891                  */
1892                 val &= ~PIPECONF_BPC_MASK;
1893                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1894         }
1895
1896         val &= ~TRANS_INTERLACE_MASK;
1897         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1898                 if (HAS_PCH_IBX(dev_priv->dev) &&
1899                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1900                         val |= TRANS_LEGACY_INTERLACED_ILK;
1901                 else
1902                         val |= TRANS_INTERLACED;
1903         else
1904                 val |= TRANS_PROGRESSIVE;
1905
1906         I915_WRITE(reg, val | TRANS_ENABLE);
1907         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1908                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1909 }
1910
1911 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1912                                       enum transcoder cpu_transcoder)
1913 {
1914         u32 val, pipeconf_val;
1915
1916         /* PCH only available on ILK+ */
1917         BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
1918
1919         /* FDI must be feeding us bits for PCH ports */
1920         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1921         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1922
1923         /* Workaround: set timing override bit. */
1924         val = I915_READ(_TRANSA_CHICKEN2);
1925         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1926         I915_WRITE(_TRANSA_CHICKEN2, val);
1927
1928         val = TRANS_ENABLE;
1929         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1930
1931         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1932             PIPECONF_INTERLACED_ILK)
1933                 val |= TRANS_INTERLACED;
1934         else
1935                 val |= TRANS_PROGRESSIVE;
1936
1937         I915_WRITE(LPT_TRANSCONF, val);
1938         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1939                 DRM_ERROR("Failed to enable PCH transcoder\n");
1940 }
1941
1942 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1943                                             enum pipe pipe)
1944 {
1945         struct drm_device *dev = dev_priv->dev;
1946         uint32_t reg, val;
1947
1948         /* FDI relies on the transcoder */
1949         assert_fdi_tx_disabled(dev_priv, pipe);
1950         assert_fdi_rx_disabled(dev_priv, pipe);
1951
1952         /* Ports must be off as well */
1953         assert_pch_ports_disabled(dev_priv, pipe);
1954
1955         reg = PCH_TRANSCONF(pipe);
1956         val = I915_READ(reg);
1957         val &= ~TRANS_ENABLE;
1958         I915_WRITE(reg, val);
1959         /* wait for PCH transcoder off, transcoder state */
1960         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1961                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1962
1963         if (!HAS_PCH_IBX(dev)) {
1964                 /* Workaround: Clear the timing override chicken bit again. */
1965                 reg = TRANS_CHICKEN2(pipe);
1966                 val = I915_READ(reg);
1967                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1968                 I915_WRITE(reg, val);
1969         }
1970 }
1971
1972 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1973 {
1974         u32 val;
1975
1976         val = I915_READ(LPT_TRANSCONF);
1977         val &= ~TRANS_ENABLE;
1978         I915_WRITE(LPT_TRANSCONF, val);
1979         /* wait for PCH transcoder off, transcoder state */
1980         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1981                 DRM_ERROR("Failed to disable PCH transcoder\n");
1982
1983         /* Workaround: clear timing override bit. */
1984         val = I915_READ(_TRANSA_CHICKEN2);
1985         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1986         I915_WRITE(_TRANSA_CHICKEN2, val);
1987 }
1988
1989 /**
1990  * intel_enable_pipe - enable a pipe, asserting requirements
1991  * @crtc: crtc responsible for the pipe
1992  *
1993  * Enable @crtc's pipe, making sure that various hardware specific requirements
1994  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1995  */
1996 static void intel_enable_pipe(struct intel_crtc *crtc)
1997 {
1998         struct drm_device *dev = crtc->base.dev;
1999         struct drm_i915_private *dev_priv = dev->dev_private;
2000         enum pipe pipe = crtc->pipe;
2001         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2002                                                                       pipe);
2003         enum pipe pch_transcoder;
2004         int reg;
2005         u32 val;
2006
2007         assert_planes_disabled(dev_priv, pipe);
2008         assert_cursor_disabled(dev_priv, pipe);
2009         assert_sprites_disabled(dev_priv, pipe);
2010
2011         if (HAS_PCH_LPT(dev_priv->dev))
2012                 pch_transcoder = TRANSCODER_A;
2013         else
2014                 pch_transcoder = pipe;
2015
2016         /*
2017          * A pipe without a PLL won't actually be able to drive bits from
2018          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2019          * need the check.
2020          */
2021         if (!HAS_PCH_SPLIT(dev_priv->dev))
2022                 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
2023                         assert_dsi_pll_enabled(dev_priv);
2024                 else
2025                         assert_pll_enabled(dev_priv, pipe);
2026         else {
2027                 if (crtc->config.has_pch_encoder) {
2028                         /* if driving the PCH, we need FDI enabled */
2029                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2030                         assert_fdi_tx_pll_enabled(dev_priv,
2031                                                   (enum pipe) cpu_transcoder);
2032                 }
2033                 /* FIXME: assert CPU port conditions for SNB+ */
2034         }
2035
2036         reg = PIPECONF(cpu_transcoder);
2037         val = I915_READ(reg);
2038         if (val & PIPECONF_ENABLE) {
2039                 WARN_ON(!(pipe == PIPE_A &&
2040                           dev_priv->quirks & QUIRK_PIPEA_FORCE));
2041                 return;
2042         }
2043
2044         I915_WRITE(reg, val | PIPECONF_ENABLE);
2045         POSTING_READ(reg);
2046 }
2047
2048 /**
2049  * intel_disable_pipe - disable a pipe, asserting requirements
2050  * @dev_priv: i915 private structure
2051  * @pipe: pipe to disable
2052  *
2053  * Disable @pipe, making sure that various hardware specific requirements
2054  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
2055  *
2056  * @pipe should be %PIPE_A or %PIPE_B.
2057  *
2058  * Will wait until the pipe has shut down before returning.
2059  */
2060 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2061                                enum pipe pipe)
2062 {
2063         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2064                                                                       pipe);
2065         int reg;
2066         u32 val;
2067
2068         /*
2069          * Make sure planes won't keep trying to pump pixels to us,
2070          * or we might hang the display.
2071          */
2072         assert_planes_disabled(dev_priv, pipe);
2073         assert_cursor_disabled(dev_priv, pipe);
2074         assert_sprites_disabled(dev_priv, pipe);
2075
2076         /* Don't disable pipe A or pipe A PLLs if needed */
2077         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2078                 return;
2079
2080         reg = PIPECONF(cpu_transcoder);
2081         val = I915_READ(reg);
2082         if ((val & PIPECONF_ENABLE) == 0)
2083                 return;
2084
2085         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
2086         intel_wait_for_pipe_off(dev_priv->dev, pipe);
2087 }
2088
2089 /*
2090  * Plane regs are double buffered, going from enabled->disabled needs a
2091  * trigger in order to latch.  The display address reg provides this.
2092  */
2093 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2094                                enum plane plane)
2095 {
2096         struct drm_device *dev = dev_priv->dev;
2097         u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2098
2099         I915_WRITE(reg, I915_READ(reg));
2100         POSTING_READ(reg);
2101 }
2102
2103 /**
2104  * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2105  * @dev_priv: i915 private structure
2106  * @plane: plane to enable
2107  * @pipe: pipe being fed
2108  *
2109  * Enable @plane on @pipe, making sure that @pipe is running first.
2110  */
2111 static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2112                                           enum plane plane, enum pipe pipe)
2113 {
2114         struct drm_device *dev = dev_priv->dev;
2115         struct intel_crtc *intel_crtc =
2116                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2117         int reg;
2118         u32 val;
2119
2120         /* If the pipe isn't enabled, we can't pump pixels and may hang */
2121         assert_pipe_enabled(dev_priv, pipe);
2122
2123         if (intel_crtc->primary_enabled)
2124                 return;
2125
2126         intel_crtc->primary_enabled = true;
2127
2128         reg = DSPCNTR(plane);
2129         val = I915_READ(reg);
2130         WARN_ON(val & DISPLAY_PLANE_ENABLE);
2131
2132         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2133         intel_flush_primary_plane(dev_priv, plane);
2134
2135         /*
2136          * BDW signals flip done immediately if the plane
2137          * is disabled, even if the plane enable is already
2138          * armed to occur at the next vblank :(
2139          */
2140         if (IS_BROADWELL(dev))
2141                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2142 }
2143
2144 /**
2145  * intel_disable_primary_hw_plane - disable the primary hardware plane
2146  * @dev_priv: i915 private structure
2147  * @plane: plane to disable
2148  * @pipe: pipe consuming the data
2149  *
2150  * Disable @plane; should be an independent operation.
2151  */
2152 static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
2153                                            enum plane plane, enum pipe pipe)
2154 {
2155         struct intel_crtc *intel_crtc =
2156                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2157         int reg;
2158         u32 val;
2159
2160         if (!intel_crtc->primary_enabled)
2161                 return;
2162
2163         intel_crtc->primary_enabled = false;
2164
2165         reg = DSPCNTR(plane);
2166         val = I915_READ(reg);
2167         WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
2168
2169         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
2170         intel_flush_primary_plane(dev_priv, plane);
2171 }
2172
2173 static bool need_vtd_wa(struct drm_device *dev)
2174 {
2175 #ifdef CONFIG_INTEL_IOMMU
2176         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2177                 return true;
2178 #endif
2179         return false;
2180 }
2181
2182 static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2183 {
2184         int tile_height;
2185
2186         tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2187         return ALIGN(height, tile_height);
2188 }
2189
2190 int
2191 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2192                            struct drm_i915_gem_object *obj,
2193                            struct intel_engine_cs *pipelined)
2194 {
2195         struct drm_i915_private *dev_priv = dev->dev_private;
2196         u32 alignment;
2197         int ret;
2198
2199         switch (obj->tiling_mode) {
2200         case I915_TILING_NONE:
2201                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2202                         alignment = 128 * 1024;
2203                 else if (INTEL_INFO(dev)->gen >= 4)
2204                         alignment = 4 * 1024;
2205                 else
2206                         alignment = 64 * 1024;
2207                 break;
2208         case I915_TILING_X:
2209                 /* pin() will align the object as required by fence */
2210                 alignment = 0;
2211                 break;
2212         case I915_TILING_Y:
2213                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
2214                 return -EINVAL;
2215         default:
2216                 BUG();
2217         }
2218
2219         /* Note that the w/a also requires 64 PTE of padding following the
2220          * bo. We currently fill all unused PTE with the shadow page and so
2221          * we should always have valid PTE following the scanout preventing
2222          * the VT-d warning.
2223          */
2224         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2225                 alignment = 256 * 1024;
2226
2227         dev_priv->mm.interruptible = false;
2228         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2229         if (ret)
2230                 goto err_interruptible;
2231
2232         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2233          * fence, whereas 965+ only requires a fence if using
2234          * framebuffer compression.  For simplicity, we always install
2235          * a fence as the cost is not that onerous.
2236          */
2237         ret = i915_gem_object_get_fence(obj);
2238         if (ret)
2239                 goto err_unpin;
2240
2241         i915_gem_object_pin_fence(obj);
2242
2243         dev_priv->mm.interruptible = true;
2244         return 0;
2245
2246 err_unpin:
2247         i915_gem_object_unpin_from_display_plane(obj);
2248 err_interruptible:
2249         dev_priv->mm.interruptible = true;
2250         return ret;
2251 }
2252
2253 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2254 {
2255         i915_gem_object_unpin_fence(obj);
2256         i915_gem_object_unpin_from_display_plane(obj);
2257 }
2258
2259 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2260  * is assumed to be a power-of-two. */
2261 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2262                                              unsigned int tiling_mode,
2263                                              unsigned int cpp,
2264                                              unsigned int pitch)
2265 {
2266         if (tiling_mode != I915_TILING_NONE) {
2267                 unsigned int tile_rows, tiles;
2268
2269                 tile_rows = *y / 8;
2270                 *y %= 8;
2271
2272                 tiles = *x / (512/cpp);
2273                 *x %= 512/cpp;
2274
2275                 return tile_rows * pitch * 8 + tiles * 4096;
2276         } else {
2277                 unsigned int offset;
2278
2279                 offset = *y * pitch + *x * cpp;
2280                 *y = 0;
2281                 *x = (offset & 4095) / cpp;
2282                 return offset & -4096;
2283         }
2284 }
2285
2286 int intel_format_to_fourcc(int format)
2287 {
2288         switch (format) {
2289         case DISPPLANE_8BPP:
2290                 return DRM_FORMAT_C8;
2291         case DISPPLANE_BGRX555:
2292                 return DRM_FORMAT_XRGB1555;
2293         case DISPPLANE_BGRX565:
2294                 return DRM_FORMAT_RGB565;
2295         default:
2296         case DISPPLANE_BGRX888:
2297                 return DRM_FORMAT_XRGB8888;
2298         case DISPPLANE_RGBX888:
2299                 return DRM_FORMAT_XBGR8888;
2300         case DISPPLANE_BGRX101010:
2301                 return DRM_FORMAT_XRGB2101010;
2302         case DISPPLANE_RGBX101010:
2303                 return DRM_FORMAT_XBGR2101010;
2304         }
2305 }
2306
2307 static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2308                                   struct intel_plane_config *plane_config)
2309 {
2310         struct drm_device *dev = crtc->base.dev;
2311         struct drm_i915_gem_object *obj = NULL;
2312         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2313         u32 base = plane_config->base;
2314
2315         if (plane_config->size == 0)
2316                 return false;
2317
2318         obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2319                                                              plane_config->size);
2320         if (!obj)
2321                 return false;
2322
2323         if (plane_config->tiled) {
2324                 obj->tiling_mode = I915_TILING_X;
2325                 obj->stride = crtc->base.primary->fb->pitches[0];
2326         }
2327
2328         mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2329         mode_cmd.width = crtc->base.primary->fb->width;
2330         mode_cmd.height = crtc->base.primary->fb->height;
2331         mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2332
2333         mutex_lock(&dev->struct_mutex);
2334
2335         if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2336                                    &mode_cmd, obj)) {
2337                 DRM_DEBUG_KMS("intel fb init failed\n");
2338                 goto out_unref_obj;
2339         }
2340
2341         obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2342         mutex_unlock(&dev->struct_mutex);
2343
2344         DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2345         return true;
2346
2347 out_unref_obj:
2348         drm_gem_object_unreference(&obj->base);
2349         mutex_unlock(&dev->struct_mutex);
2350         return false;
2351 }
2352
2353 static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2354                                  struct intel_plane_config *plane_config)
2355 {
2356         struct drm_device *dev = intel_crtc->base.dev;
2357         struct drm_crtc *c;
2358         struct intel_crtc *i;
2359         struct intel_framebuffer *fb;
2360
2361         if (!intel_crtc->base.primary->fb)
2362                 return;
2363
2364         if (intel_alloc_plane_obj(intel_crtc, plane_config))
2365                 return;
2366
2367         kfree(intel_crtc->base.primary->fb);
2368         intel_crtc->base.primary->fb = NULL;
2369
2370         /*
2371          * Failed to alloc the obj, check to see if we should share
2372          * an fb with another CRTC instead
2373          */
2374         for_each_crtc(dev, c) {
2375                 i = to_intel_crtc(c);
2376
2377                 if (c == &intel_crtc->base)
2378                         continue;
2379
2380                 if (!i->active || !c->primary->fb)
2381                         continue;
2382
2383                 fb = to_intel_framebuffer(c->primary->fb);
2384                 if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
2385                         drm_framebuffer_reference(c->primary->fb);
2386                         intel_crtc->base.primary->fb = c->primary->fb;
2387                         fb->obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2388                         break;
2389                 }
2390         }
2391 }
2392
2393 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2394                                       struct drm_framebuffer *fb,
2395                                       int x, int y)
2396 {
2397         struct drm_device *dev = crtc->dev;
2398         struct drm_i915_private *dev_priv = dev->dev_private;
2399         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2400         struct intel_framebuffer *intel_fb;
2401         struct drm_i915_gem_object *obj;
2402         int plane = intel_crtc->plane;
2403         unsigned long linear_offset;
2404         u32 dspcntr;
2405         u32 reg;
2406
2407         intel_fb = to_intel_framebuffer(fb);
2408         obj = intel_fb->obj;
2409
2410         reg = DSPCNTR(plane);
2411         dspcntr = I915_READ(reg);
2412         /* Mask out pixel format bits in case we change it */
2413         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2414         switch (fb->pixel_format) {
2415         case DRM_FORMAT_C8:
2416                 dspcntr |= DISPPLANE_8BPP;
2417                 break;
2418         case DRM_FORMAT_XRGB1555:
2419         case DRM_FORMAT_ARGB1555:
2420                 dspcntr |= DISPPLANE_BGRX555;
2421                 break;
2422         case DRM_FORMAT_RGB565:
2423                 dspcntr |= DISPPLANE_BGRX565;
2424                 break;
2425         case DRM_FORMAT_XRGB8888:
2426         case DRM_FORMAT_ARGB8888:
2427                 dspcntr |= DISPPLANE_BGRX888;
2428                 break;
2429         case DRM_FORMAT_XBGR8888:
2430         case DRM_FORMAT_ABGR8888:
2431                 dspcntr |= DISPPLANE_RGBX888;
2432                 break;
2433         case DRM_FORMAT_XRGB2101010:
2434         case DRM_FORMAT_ARGB2101010:
2435                 dspcntr |= DISPPLANE_BGRX101010;
2436                 break;
2437         case DRM_FORMAT_XBGR2101010:
2438         case DRM_FORMAT_ABGR2101010:
2439                 dspcntr |= DISPPLANE_RGBX101010;
2440                 break;
2441         default:
2442                 BUG();
2443         }
2444
2445         if (INTEL_INFO(dev)->gen >= 4) {
2446                 if (obj->tiling_mode != I915_TILING_NONE)
2447                         dspcntr |= DISPPLANE_TILED;
2448                 else
2449                         dspcntr &= ~DISPPLANE_TILED;
2450         }
2451
2452         if (IS_G4X(dev))
2453                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2454
2455         I915_WRITE(reg, dspcntr);
2456
2457         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2458
2459         if (INTEL_INFO(dev)->gen >= 4) {
2460                 intel_crtc->dspaddr_offset =
2461                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2462                                                        fb->bits_per_pixel / 8,
2463                                                        fb->pitches[0]);
2464                 linear_offset -= intel_crtc->dspaddr_offset;
2465         } else {
2466                 intel_crtc->dspaddr_offset = linear_offset;
2467         }
2468
2469         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2470                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2471                       fb->pitches[0]);
2472         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2473         if (INTEL_INFO(dev)->gen >= 4) {
2474                 I915_WRITE(DSPSURF(plane),
2475                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2476                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2477                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2478         } else
2479                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2480         POSTING_READ(reg);
2481 }
2482
2483 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2484                                           struct drm_framebuffer *fb,
2485                                           int x, int y)
2486 {
2487         struct drm_device *dev = crtc->dev;
2488         struct drm_i915_private *dev_priv = dev->dev_private;
2489         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2490         struct intel_framebuffer *intel_fb;
2491         struct drm_i915_gem_object *obj;
2492         int plane = intel_crtc->plane;
2493         unsigned long linear_offset;
2494         u32 dspcntr;
2495         u32 reg;
2496
2497         intel_fb = to_intel_framebuffer(fb);
2498         obj = intel_fb->obj;
2499
2500         reg = DSPCNTR(plane);
2501         dspcntr = I915_READ(reg);
2502         /* Mask out pixel format bits in case we change it */
2503         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2504         switch (fb->pixel_format) {
2505         case DRM_FORMAT_C8:
2506                 dspcntr |= DISPPLANE_8BPP;
2507                 break;
2508         case DRM_FORMAT_RGB565:
2509                 dspcntr |= DISPPLANE_BGRX565;
2510                 break;
2511         case DRM_FORMAT_XRGB8888:
2512         case DRM_FORMAT_ARGB8888:
2513                 dspcntr |= DISPPLANE_BGRX888;
2514                 break;
2515         case DRM_FORMAT_XBGR8888:
2516         case DRM_FORMAT_ABGR8888:
2517                 dspcntr |= DISPPLANE_RGBX888;
2518                 break;
2519         case DRM_FORMAT_XRGB2101010:
2520         case DRM_FORMAT_ARGB2101010:
2521                 dspcntr |= DISPPLANE_BGRX101010;
2522                 break;
2523         case DRM_FORMAT_XBGR2101010:
2524         case DRM_FORMAT_ABGR2101010:
2525                 dspcntr |= DISPPLANE_RGBX101010;
2526                 break;
2527         default:
2528                 BUG();
2529         }
2530
2531         if (obj->tiling_mode != I915_TILING_NONE)
2532                 dspcntr |= DISPPLANE_TILED;
2533         else
2534                 dspcntr &= ~DISPPLANE_TILED;
2535
2536         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2537                 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2538         else
2539                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2540
2541         I915_WRITE(reg, dspcntr);
2542
2543         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2544         intel_crtc->dspaddr_offset =
2545                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2546                                                fb->bits_per_pixel / 8,
2547                                                fb->pitches[0]);
2548         linear_offset -= intel_crtc->dspaddr_offset;
2549
2550         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2551                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2552                       fb->pitches[0]);
2553         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2554         I915_WRITE(DSPSURF(plane),
2555                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2556         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2557                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2558         } else {
2559                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2560                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2561         }
2562         POSTING_READ(reg);
2563 }
2564
2565 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2566 static int
2567 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2568                            int x, int y, enum mode_set_atomic state)
2569 {
2570         struct drm_device *dev = crtc->dev;
2571         struct drm_i915_private *dev_priv = dev->dev_private;
2572
2573         if (dev_priv->display.disable_fbc)
2574                 dev_priv->display.disable_fbc(dev);
2575         intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
2576
2577         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2578
2579         return 0;
2580 }
2581
2582 void intel_display_handle_reset(struct drm_device *dev)
2583 {
2584         struct drm_i915_private *dev_priv = dev->dev_private;
2585         struct drm_crtc *crtc;
2586
2587         /*
2588          * Flips in the rings have been nuked by the reset,
2589          * so complete all pending flips so that user space
2590          * will get its events and not get stuck.
2591          *
2592          * Also update the base address of all primary
2593          * planes to the the last fb to make sure we're
2594          * showing the correct fb after a reset.
2595          *
2596          * Need to make two loops over the crtcs so that we
2597          * don't try to grab a crtc mutex before the
2598          * pending_flip_queue really got woken up.
2599          */
2600
2601         for_each_crtc(dev, crtc) {
2602                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2603                 enum plane plane = intel_crtc->plane;
2604
2605                 intel_prepare_page_flip(dev, plane);
2606                 intel_finish_page_flip_plane(dev, plane);
2607         }
2608
2609         for_each_crtc(dev, crtc) {
2610                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2611
2612                 drm_modeset_lock(&crtc->mutex, NULL);
2613                 /*
2614                  * FIXME: Once we have proper support for primary planes (and
2615                  * disabling them without disabling the entire crtc) allow again
2616                  * a NULL crtc->primary->fb.
2617                  */
2618                 if (intel_crtc->active && crtc->primary->fb)
2619                         dev_priv->display.update_primary_plane(crtc,
2620                                                                crtc->primary->fb,
2621                                                                crtc->x,
2622                                                                crtc->y);
2623                 drm_modeset_unlock(&crtc->mutex);
2624         }
2625 }
2626
2627 static int
2628 intel_finish_fb(struct drm_framebuffer *old_fb)
2629 {
2630         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2631         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2632         bool was_interruptible = dev_priv->mm.interruptible;
2633         int ret;
2634
2635         /* Big Hammer, we also need to ensure that any pending
2636          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2637          * current scanout is retired before unpinning the old
2638          * framebuffer.
2639          *
2640          * This should only fail upon a hung GPU, in which case we
2641          * can safely continue.
2642          */
2643         dev_priv->mm.interruptible = false;
2644         ret = i915_gem_object_finish_gpu(obj);
2645         dev_priv->mm.interruptible = was_interruptible;
2646
2647         return ret;
2648 }
2649
2650 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2651 {
2652         struct drm_device *dev = crtc->dev;
2653         struct drm_i915_private *dev_priv = dev->dev_private;
2654         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2655         unsigned long flags;
2656         bool pending;
2657
2658         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2659             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2660                 return false;
2661
2662         spin_lock_irqsave(&dev->event_lock, flags);
2663         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2664         spin_unlock_irqrestore(&dev->event_lock, flags);
2665
2666         return pending;
2667 }
2668
2669 static int
2670 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2671                     struct drm_framebuffer *fb)
2672 {
2673         struct drm_device *dev = crtc->dev;
2674         struct drm_i915_private *dev_priv = dev->dev_private;
2675         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2676         enum pipe pipe = intel_crtc->pipe;
2677         struct drm_framebuffer *old_fb;
2678         struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
2679         struct drm_i915_gem_object *old_obj;
2680         int ret;
2681
2682         if (intel_crtc_has_pending_flip(crtc)) {
2683                 DRM_ERROR("pipe is still busy with an old pageflip\n");
2684                 return -EBUSY;
2685         }
2686
2687         /* no fb bound */
2688         if (!fb) {
2689                 DRM_ERROR("No FB bound\n");
2690                 return 0;
2691         }
2692
2693         if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2694                 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2695                           plane_name(intel_crtc->plane),
2696                           INTEL_INFO(dev)->num_pipes);
2697                 return -EINVAL;
2698         }
2699
2700         old_fb = crtc->primary->fb;
2701         old_obj = old_fb ? to_intel_framebuffer(old_fb)->obj : NULL;
2702
2703         mutex_lock(&dev->struct_mutex);
2704         ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
2705         if (ret == 0)
2706                 i915_gem_track_fb(old_obj, obj,
2707                                   INTEL_FRONTBUFFER_PRIMARY(pipe));
2708         mutex_unlock(&dev->struct_mutex);
2709         if (ret != 0) {
2710                 DRM_ERROR("pin & fence failed\n");
2711                 return ret;
2712         }
2713
2714         /*
2715          * Update pipe size and adjust fitter if needed: the reason for this is
2716          * that in compute_mode_changes we check the native mode (not the pfit
2717          * mode) to see if we can flip rather than do a full mode set. In the
2718          * fastboot case, we'll flip, but if we don't update the pipesrc and
2719          * pfit state, we'll end up with a big fb scanned out into the wrong
2720          * sized surface.
2721          *
2722          * To fix this properly, we need to hoist the checks up into
2723          * compute_mode_changes (or above), check the actual pfit state and
2724          * whether the platform allows pfit disable with pipe active, and only
2725          * then update the pipesrc and pfit state, even on the flip path.
2726          */
2727         if (i915.fastboot) {
2728                 const struct drm_display_mode *adjusted_mode =
2729                         &intel_crtc->config.adjusted_mode;
2730
2731                 I915_WRITE(PIPESRC(intel_crtc->pipe),
2732                            ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2733                            (adjusted_mode->crtc_vdisplay - 1));
2734                 if (!intel_crtc->config.pch_pfit.enabled &&
2735                     (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2736                      intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2737                         I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2738                         I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2739                         I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2740                 }
2741                 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2742                 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2743         }
2744
2745         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2746
2747         if (intel_crtc->active)
2748                 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
2749
2750         crtc->primary->fb = fb;
2751         crtc->x = x;
2752         crtc->y = y;
2753
2754         if (old_fb) {
2755                 if (intel_crtc->active && old_fb != fb)
2756                         intel_wait_for_vblank(dev, intel_crtc->pipe);
2757                 mutex_lock(&dev->struct_mutex);
2758                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2759                 mutex_unlock(&dev->struct_mutex);
2760         }
2761
2762         mutex_lock(&dev->struct_mutex);
2763         intel_update_fbc(dev);
2764         mutex_unlock(&dev->struct_mutex);
2765
2766         return 0;
2767 }
2768
2769 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2770 {
2771         struct drm_device *dev = crtc->dev;
2772         struct drm_i915_private *dev_priv = dev->dev_private;
2773         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2774         int pipe = intel_crtc->pipe;
2775         u32 reg, temp;
2776
2777         /* enable normal train */
2778         reg = FDI_TX_CTL(pipe);
2779         temp = I915_READ(reg);
2780         if (IS_IVYBRIDGE(dev)) {
2781                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2782                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2783         } else {
2784                 temp &= ~FDI_LINK_TRAIN_NONE;
2785                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2786         }
2787         I915_WRITE(reg, temp);
2788
2789         reg = FDI_RX_CTL(pipe);
2790         temp = I915_READ(reg);
2791         if (HAS_PCH_CPT(dev)) {
2792                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2793                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2794         } else {
2795                 temp &= ~FDI_LINK_TRAIN_NONE;
2796                 temp |= FDI_LINK_TRAIN_NONE;
2797         }
2798         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2799
2800         /* wait one idle pattern time */
2801         POSTING_READ(reg);
2802         udelay(1000);
2803
2804         /* IVB wants error correction enabled */
2805         if (IS_IVYBRIDGE(dev))
2806                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2807                            FDI_FE_ERRC_ENABLE);
2808 }
2809
2810 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2811 {
2812         return crtc->base.enabled && crtc->active &&
2813                 crtc->config.has_pch_encoder;
2814 }
2815
2816 static void ivb_modeset_global_resources(struct drm_device *dev)
2817 {
2818         struct drm_i915_private *dev_priv = dev->dev_private;
2819         struct intel_crtc *pipe_B_crtc =
2820                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2821         struct intel_crtc *pipe_C_crtc =
2822                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2823         uint32_t temp;
2824
2825         /*
2826          * When everything is off disable fdi C so that we could enable fdi B
2827          * with all lanes. Note that we don't care about enabled pipes without
2828          * an enabled pch encoder.
2829          */
2830         if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2831             !pipe_has_enabled_pch(pipe_C_crtc)) {
2832                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2833                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2834
2835                 temp = I915_READ(SOUTH_CHICKEN1);
2836                 temp &= ~FDI_BC_BIFURCATION_SELECT;
2837                 DRM_DEBUG_KMS("disabling fdi C rx\n");
2838                 I915_WRITE(SOUTH_CHICKEN1, temp);
2839         }
2840 }
2841
2842 /* The FDI link training functions for ILK/Ibexpeak. */
2843 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2844 {
2845         struct drm_device *dev = crtc->dev;
2846         struct drm_i915_private *dev_priv = dev->dev_private;
2847         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2848         int pipe = intel_crtc->pipe;
2849         u32 reg, temp, tries;
2850
2851         /* FDI needs bits from pipe first */
2852         assert_pipe_enabled(dev_priv, pipe);
2853
2854         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2855            for train result */
2856         reg = FDI_RX_IMR(pipe);
2857         temp = I915_READ(reg);
2858         temp &= ~FDI_RX_SYMBOL_LOCK;
2859         temp &= ~FDI_RX_BIT_LOCK;
2860         I915_WRITE(reg, temp);
2861         I915_READ(reg);
2862         udelay(150);
2863
2864         /* enable CPU FDI TX and PCH FDI RX */
2865         reg = FDI_TX_CTL(pipe);
2866         temp = I915_READ(reg);
2867         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2868         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2869         temp &= ~FDI_LINK_TRAIN_NONE;
2870         temp |= FDI_LINK_TRAIN_PATTERN_1;
2871         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2872
2873         reg = FDI_RX_CTL(pipe);
2874         temp = I915_READ(reg);
2875         temp &= ~FDI_LINK_TRAIN_NONE;
2876         temp |= FDI_LINK_TRAIN_PATTERN_1;
2877         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2878
2879         POSTING_READ(reg);
2880         udelay(150);
2881
2882         /* Ironlake workaround, enable clock pointer after FDI enable*/
2883         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2884         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2885                    FDI_RX_PHASE_SYNC_POINTER_EN);
2886
2887         reg = FDI_RX_IIR(pipe);
2888         for (tries = 0; tries < 5; tries++) {
2889                 temp = I915_READ(reg);
2890                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2891
2892                 if ((temp & FDI_RX_BIT_LOCK)) {
2893                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2894                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2895                         break;
2896                 }
2897         }
2898         if (tries == 5)
2899                 DRM_ERROR("FDI train 1 fail!\n");
2900
2901         /* Train 2 */
2902         reg = FDI_TX_CTL(pipe);
2903         temp = I915_READ(reg);
2904         temp &= ~FDI_LINK_TRAIN_NONE;
2905         temp |= FDI_LINK_TRAIN_PATTERN_2;
2906         I915_WRITE(reg, temp);
2907
2908         reg = FDI_RX_CTL(pipe);
2909         temp = I915_READ(reg);
2910         temp &= ~FDI_LINK_TRAIN_NONE;
2911         temp |= FDI_LINK_TRAIN_PATTERN_2;
2912         I915_WRITE(reg, temp);
2913
2914         POSTING_READ(reg);
2915         udelay(150);
2916
2917         reg = FDI_RX_IIR(pipe);
2918         for (tries = 0; tries < 5; tries++) {
2919                 temp = I915_READ(reg);
2920                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2921
2922                 if (temp & FDI_RX_SYMBOL_LOCK) {
2923                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2924                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2925                         break;
2926                 }
2927         }
2928         if (tries == 5)
2929                 DRM_ERROR("FDI train 2 fail!\n");
2930
2931         DRM_DEBUG_KMS("FDI train done\n");
2932
2933 }
2934
2935 static const int snb_b_fdi_train_param[] = {
2936         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2937         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2938         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2939         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2940 };
2941
2942 /* The FDI link training functions for SNB/Cougarpoint. */
2943 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2944 {
2945         struct drm_device *dev = crtc->dev;
2946         struct drm_i915_private *dev_priv = dev->dev_private;
2947         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2948         int pipe = intel_crtc->pipe;
2949         u32 reg, temp, i, retry;
2950
2951         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2952            for train result */
2953         reg = FDI_RX_IMR(pipe);
2954         temp = I915_READ(reg);
2955         temp &= ~FDI_RX_SYMBOL_LOCK;
2956         temp &= ~FDI_RX_BIT_LOCK;
2957         I915_WRITE(reg, temp);
2958
2959         POSTING_READ(reg);
2960         udelay(150);
2961
2962         /* enable CPU FDI TX and PCH FDI RX */
2963         reg = FDI_TX_CTL(pipe);
2964         temp = I915_READ(reg);
2965         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2966         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2967         temp &= ~FDI_LINK_TRAIN_NONE;
2968         temp |= FDI_LINK_TRAIN_PATTERN_1;
2969         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2970         /* SNB-B */
2971         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2972         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2973
2974         I915_WRITE(FDI_RX_MISC(pipe),
2975                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2976
2977         reg = FDI_RX_CTL(pipe);
2978         temp = I915_READ(reg);
2979         if (HAS_PCH_CPT(dev)) {
2980                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2981                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2982         } else {
2983                 temp &= ~FDI_LINK_TRAIN_NONE;
2984                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2985         }
2986         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2987
2988         POSTING_READ(reg);
2989         udelay(150);
2990
2991         for (i = 0; i < 4; i++) {
2992                 reg = FDI_TX_CTL(pipe);
2993                 temp = I915_READ(reg);
2994                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2995                 temp |= snb_b_fdi_train_param[i];
2996                 I915_WRITE(reg, temp);
2997
2998                 POSTING_READ(reg);
2999                 udelay(500);
3000
3001                 for (retry = 0; retry < 5; retry++) {
3002                         reg = FDI_RX_IIR(pipe);
3003                         temp = I915_READ(reg);
3004                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3005                         if (temp & FDI_RX_BIT_LOCK) {
3006                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3007                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3008                                 break;
3009                         }
3010                         udelay(50);
3011                 }
3012                 if (retry < 5)
3013                         break;
3014         }
3015         if (i == 4)
3016                 DRM_ERROR("FDI train 1 fail!\n");
3017
3018         /* Train 2 */
3019         reg = FDI_TX_CTL(pipe);
3020         temp = I915_READ(reg);
3021         temp &= ~FDI_LINK_TRAIN_NONE;
3022         temp |= FDI_LINK_TRAIN_PATTERN_2;
3023         if (IS_GEN6(dev)) {
3024                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3025                 /* SNB-B */
3026                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3027         }
3028         I915_WRITE(reg, temp);
3029
3030         reg = FDI_RX_CTL(pipe);
3031         temp = I915_READ(reg);
3032         if (HAS_PCH_CPT(dev)) {
3033                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3034                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3035         } else {
3036                 temp &= ~FDI_LINK_TRAIN_NONE;
3037                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3038         }
3039         I915_WRITE(reg, temp);
3040
3041         POSTING_READ(reg);
3042         udelay(150);
3043
3044         for (i = 0; i < 4; i++) {
3045                 reg = FDI_TX_CTL(pipe);
3046                 temp = I915_READ(reg);
3047                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3048                 temp |= snb_b_fdi_train_param[i];
3049                 I915_WRITE(reg, temp);
3050
3051                 POSTING_READ(reg);
3052                 udelay(500);
3053
3054                 for (retry = 0; retry < 5; retry++) {
3055                         reg = FDI_RX_IIR(pipe);
3056                         temp = I915_READ(reg);
3057                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3058                         if (temp & FDI_RX_SYMBOL_LOCK) {
3059                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3060                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3061                                 break;
3062                         }
3063                         udelay(50);
3064                 }
3065                 if (retry < 5)
3066                         break;
3067         }
3068         if (i == 4)
3069                 DRM_ERROR("FDI train 2 fail!\n");
3070
3071         DRM_DEBUG_KMS("FDI train done.\n");
3072 }
3073
3074 /* Manual link training for Ivy Bridge A0 parts */
3075 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3076 {
3077         struct drm_device *dev = crtc->dev;
3078         struct drm_i915_private *dev_priv = dev->dev_private;
3079         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3080         int pipe = intel_crtc->pipe;
3081         u32 reg, temp, i, j;
3082
3083         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3084            for train result */
3085         reg = FDI_RX_IMR(pipe);
3086         temp = I915_READ(reg);
3087         temp &= ~FDI_RX_SYMBOL_LOCK;
3088         temp &= ~FDI_RX_BIT_LOCK;
3089         I915_WRITE(reg, temp);
3090
3091         POSTING_READ(reg);
3092         udelay(150);
3093
3094         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3095                       I915_READ(FDI_RX_IIR(pipe)));
3096
3097         /* Try each vswing and preemphasis setting twice before moving on */
3098         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3099                 /* disable first in case we need to retry */
3100                 reg = FDI_TX_CTL(pipe);
3101                 temp = I915_READ(reg);
3102                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3103                 temp &= ~FDI_TX_ENABLE;
3104                 I915_WRITE(reg, temp);
3105
3106                 reg = FDI_RX_CTL(pipe);
3107                 temp = I915_READ(reg);
3108                 temp &= ~FDI_LINK_TRAIN_AUTO;
3109                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3110                 temp &= ~FDI_RX_ENABLE;
3111                 I915_WRITE(reg, temp);
3112
3113                 /* enable CPU FDI TX and PCH FDI RX */
3114                 reg = FDI_TX_CTL(pipe);
3115                 temp = I915_READ(reg);
3116                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3117                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3118                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3119                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3120                 temp |= snb_b_fdi_train_param[j/2];
3121                 temp |= FDI_COMPOSITE_SYNC;
3122                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3123
3124                 I915_WRITE(FDI_RX_MISC(pipe),
3125                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3126
3127                 reg = FDI_RX_CTL(pipe);
3128                 temp = I915_READ(reg);
3129                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3130                 temp |= FDI_COMPOSITE_SYNC;
3131                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3132
3133                 POSTING_READ(reg);
3134                 udelay(1); /* should be 0.5us */
3135
3136                 for (i = 0; i < 4; i++) {
3137                         reg = FDI_RX_IIR(pipe);
3138                         temp = I915_READ(reg);
3139                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3140
3141                         if (temp & FDI_RX_BIT_LOCK ||
3142                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3143                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3144                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3145                                               i);
3146                                 break;
3147                         }
3148                         udelay(1); /* should be 0.5us */
3149                 }
3150                 if (i == 4) {
3151                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3152                         continue;
3153                 }
3154
3155                 /* Train 2 */
3156                 reg = FDI_TX_CTL(pipe);
3157                 temp = I915_READ(reg);
3158                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3159                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3160                 I915_WRITE(reg, temp);
3161
3162                 reg = FDI_RX_CTL(pipe);
3163                 temp = I915_READ(reg);
3164                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3165                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3166                 I915_WRITE(reg, temp);
3167
3168                 POSTING_READ(reg);
3169                 udelay(2); /* should be 1.5us */
3170
3171                 for (i = 0; i < 4; i++) {
3172                         reg = FDI_RX_IIR(pipe);
3173                         temp = I915_READ(reg);
3174                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3175
3176                         if (temp & FDI_RX_SYMBOL_LOCK ||
3177                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3178                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3179                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3180                                               i);
3181                                 goto train_done;
3182                         }
3183                         udelay(2); /* should be 1.5us */
3184                 }
3185                 if (i == 4)
3186                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3187         }
3188
3189 train_done:
3190         DRM_DEBUG_KMS("FDI train done.\n");
3191 }
3192
3193 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3194 {
3195         struct drm_device *dev = intel_crtc->base.dev;
3196         struct drm_i915_private *dev_priv = dev->dev_private;
3197         int pipe = intel_crtc->pipe;
3198         u32 reg, temp;
3199
3200
3201         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3202         reg = FDI_RX_CTL(pipe);
3203         temp = I915_READ(reg);
3204         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3205         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3206         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3207         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3208
3209         POSTING_READ(reg);
3210         udelay(200);
3211
3212         /* Switch from Rawclk to PCDclk */
3213         temp = I915_READ(reg);
3214         I915_WRITE(reg, temp | FDI_PCDCLK);
3215
3216         POSTING_READ(reg);
3217         udelay(200);
3218
3219         /* Enable CPU FDI TX PLL, always on for Ironlake */
3220         reg = FDI_TX_CTL(pipe);
3221         temp = I915_READ(reg);
3222         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3223                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3224
3225                 POSTING_READ(reg);
3226                 udelay(100);
3227         }
3228 }
3229
3230 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3231 {
3232         struct drm_device *dev = intel_crtc->base.dev;
3233         struct drm_i915_private *dev_priv = dev->dev_private;
3234         int pipe = intel_crtc->pipe;
3235         u32 reg, temp;
3236
3237         /* Switch from PCDclk to Rawclk */
3238         reg = FDI_RX_CTL(pipe);
3239         temp = I915_READ(reg);
3240         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3241
3242         /* Disable CPU FDI TX PLL */
3243         reg = FDI_TX_CTL(pipe);
3244         temp = I915_READ(reg);
3245         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3246
3247         POSTING_READ(reg);
3248         udelay(100);
3249
3250         reg = FDI_RX_CTL(pipe);
3251         temp = I915_READ(reg);
3252         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3253
3254         /* Wait for the clocks to turn off. */
3255         POSTING_READ(reg);
3256         udelay(100);
3257 }
3258
3259 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3260 {
3261         struct drm_device *dev = crtc->dev;
3262         struct drm_i915_private *dev_priv = dev->dev_private;
3263         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3264         int pipe = intel_crtc->pipe;
3265         u32 reg, temp;
3266
3267         /* disable CPU FDI tx and PCH FDI rx */
3268         reg = FDI_TX_CTL(pipe);
3269         temp = I915_READ(reg);
3270         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3271         POSTING_READ(reg);
3272
3273         reg = FDI_RX_CTL(pipe);
3274         temp = I915_READ(reg);
3275         temp &= ~(0x7 << 16);
3276         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3277         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3278
3279         POSTING_READ(reg);
3280         udelay(100);
3281
3282         /* Ironlake workaround, disable clock pointer after downing FDI */
3283         if (HAS_PCH_IBX(dev))
3284                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3285
3286         /* still set train pattern 1 */
3287         reg = FDI_TX_CTL(pipe);
3288         temp = I915_READ(reg);
3289         temp &= ~FDI_LINK_TRAIN_NONE;
3290         temp |= FDI_LINK_TRAIN_PATTERN_1;
3291         I915_WRITE(reg, temp);
3292
3293         reg = FDI_RX_CTL(pipe);
3294         temp = I915_READ(reg);
3295         if (HAS_PCH_CPT(dev)) {
3296                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3297                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3298         } else {
3299                 temp &= ~FDI_LINK_TRAIN_NONE;
3300                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3301         }
3302         /* BPC in FDI rx is consistent with that in PIPECONF */
3303         temp &= ~(0x07 << 16);
3304         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3305         I915_WRITE(reg, temp);
3306
3307         POSTING_READ(reg);
3308         udelay(100);
3309 }
3310
3311 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3312 {
3313         struct intel_crtc *crtc;
3314
3315         /* Note that we don't need to be called with mode_config.lock here
3316          * as our list of CRTC objects is static for the lifetime of the
3317          * device and so cannot disappear as we iterate. Similarly, we can
3318          * happily treat the predicates as racy, atomic checks as userspace
3319          * cannot claim and pin a new fb without at least acquring the
3320          * struct_mutex and so serialising with us.
3321          */
3322         for_each_intel_crtc(dev, crtc) {
3323                 if (atomic_read(&crtc->unpin_work_count) == 0)
3324                         continue;
3325
3326                 if (crtc->unpin_work)
3327                         intel_wait_for_vblank(dev, crtc->pipe);
3328
3329                 return true;
3330         }
3331
3332         return false;
3333 }
3334
3335 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3336 {
3337         struct drm_device *dev = crtc->dev;
3338         struct drm_i915_private *dev_priv = dev->dev_private;
3339
3340         if (crtc->primary->fb == NULL)
3341                 return;
3342
3343         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3344
3345         WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3346                                    !intel_crtc_has_pending_flip(crtc),
3347                                    60*HZ) == 0);
3348
3349         mutex_lock(&dev->struct_mutex);
3350         intel_finish_fb(crtc->primary->fb);
3351         mutex_unlock(&dev->struct_mutex);
3352 }
3353
3354 /* Program iCLKIP clock to the desired frequency */
3355 static void lpt_program_iclkip(struct drm_crtc *crtc)
3356 {
3357         struct drm_device *dev = crtc->dev;
3358         struct drm_i915_private *dev_priv = dev->dev_private;
3359         int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3360         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3361         u32 temp;
3362
3363         mutex_lock(&dev_priv->dpio_lock);
3364
3365         /* It is necessary to ungate the pixclk gate prior to programming
3366          * the divisors, and gate it back when it is done.
3367          */
3368         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3369
3370         /* Disable SSCCTL */
3371         intel_sbi_write(dev_priv, SBI_SSCCTL6,
3372                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3373                                 SBI_SSCCTL_DISABLE,
3374                         SBI_ICLK);
3375
3376         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3377         if (clock == 20000) {
3378                 auxdiv = 1;
3379                 divsel = 0x41;
3380                 phaseinc = 0x20;
3381         } else {
3382                 /* The iCLK virtual clock root frequency is in MHz,
3383                  * but the adjusted_mode->crtc_clock in in KHz. To get the
3384                  * divisors, it is necessary to divide one by another, so we
3385                  * convert the virtual clock precision to KHz here for higher
3386                  * precision.
3387                  */
3388                 u32 iclk_virtual_root_freq = 172800 * 1000;
3389                 u32 iclk_pi_range = 64;
3390                 u32 desired_divisor, msb_divisor_value, pi_value;
3391
3392                 desired_divisor = (iclk_virtual_root_freq / clock);
3393                 msb_divisor_value = desired_divisor / iclk_pi_range;
3394                 pi_value = desired_divisor % iclk_pi_range;
3395
3396                 auxdiv = 0;
3397                 divsel = msb_divisor_value - 2;
3398                 phaseinc = pi_value;
3399         }
3400
3401         /* This should not happen with any sane values */
3402         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3403                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3404         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3405                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3406
3407         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3408                         clock,
3409                         auxdiv,
3410                         divsel,
3411                         phasedir,
3412                         phaseinc);
3413
3414         /* Program SSCDIVINTPHASE6 */
3415         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3416         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3417         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3418         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3419         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3420         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3421         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3422         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3423
3424         /* Program SSCAUXDIV */
3425         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3426         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3427         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3428         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3429
3430         /* Enable modulator and associated divider */
3431         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3432         temp &= ~SBI_SSCCTL_DISABLE;
3433         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3434
3435         /* Wait for initialization time */
3436         udelay(24);
3437
3438         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3439
3440         mutex_unlock(&dev_priv->dpio_lock);
3441 }
3442
3443 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3444                                                 enum pipe pch_transcoder)
3445 {
3446         struct drm_device *dev = crtc->base.dev;
3447         struct drm_i915_private *dev_priv = dev->dev_private;
3448         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3449
3450         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3451                    I915_READ(HTOTAL(cpu_transcoder)));
3452         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3453                    I915_READ(HBLANK(cpu_transcoder)));
3454         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3455                    I915_READ(HSYNC(cpu_transcoder)));
3456
3457         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3458                    I915_READ(VTOTAL(cpu_transcoder)));
3459         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3460                    I915_READ(VBLANK(cpu_transcoder)));
3461         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3462                    I915_READ(VSYNC(cpu_transcoder)));
3463         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3464                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
3465 }
3466
3467 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3468 {
3469         struct drm_i915_private *dev_priv = dev->dev_private;
3470         uint32_t temp;
3471
3472         temp = I915_READ(SOUTH_CHICKEN1);
3473         if (temp & FDI_BC_BIFURCATION_SELECT)
3474                 return;
3475
3476         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3477         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3478
3479         temp |= FDI_BC_BIFURCATION_SELECT;
3480         DRM_DEBUG_KMS("enabling fdi C rx\n");
3481         I915_WRITE(SOUTH_CHICKEN1, temp);
3482         POSTING_READ(SOUTH_CHICKEN1);
3483 }
3484
3485 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3486 {
3487         struct drm_device *dev = intel_crtc->base.dev;
3488         struct drm_i915_private *dev_priv = dev->dev_private;
3489
3490         switch (intel_crtc->pipe) {
3491         case PIPE_A:
3492                 break;
3493         case PIPE_B:
3494                 if (intel_crtc->config.fdi_lanes > 2)
3495                         WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3496                 else
3497                         cpt_enable_fdi_bc_bifurcation(dev);
3498
3499                 break;
3500         case PIPE_C:
3501                 cpt_enable_fdi_bc_bifurcation(dev);
3502
3503                 break;
3504         default:
3505                 BUG();
3506         }
3507 }
3508
3509 /*
3510  * Enable PCH resources required for PCH ports:
3511  *   - PCH PLLs
3512  *   - FDI training & RX/TX
3513  *   - update transcoder timings
3514  *   - DP transcoding bits
3515  *   - transcoder
3516  */
3517 static void ironlake_pch_enable(struct drm_crtc *crtc)
3518 {
3519         struct drm_device *dev = crtc->dev;
3520         struct drm_i915_private *dev_priv = dev->dev_private;
3521         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3522         int pipe = intel_crtc->pipe;
3523         u32 reg, temp;
3524
3525         assert_pch_transcoder_disabled(dev_priv, pipe);
3526
3527         if (IS_IVYBRIDGE(dev))
3528                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3529
3530         /* Write the TU size bits before fdi link training, so that error
3531          * detection works. */
3532         I915_WRITE(FDI_RX_TUSIZE1(pipe),
3533                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3534
3535         /* For PCH output, training FDI link */
3536         dev_priv->display.fdi_link_train(crtc);
3537
3538         /* We need to program the right clock selection before writing the pixel
3539          * mutliplier into the DPLL. */
3540         if (HAS_PCH_CPT(dev)) {
3541                 u32 sel;
3542
3543                 temp = I915_READ(PCH_DPLL_SEL);
3544                 temp |= TRANS_DPLL_ENABLE(pipe);
3545                 sel = TRANS_DPLLB_SEL(pipe);
3546                 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3547                         temp |= sel;
3548                 else
3549                         temp &= ~sel;
3550                 I915_WRITE(PCH_DPLL_SEL, temp);
3551         }
3552
3553         /* XXX: pch pll's can be enabled any time before we enable the PCH
3554          * transcoder, and we actually should do this to not upset any PCH
3555          * transcoder that already use the clock when we share it.
3556          *
3557          * Note that enable_shared_dpll tries to do the right thing, but
3558          * get_shared_dpll unconditionally resets the pll - we need that to have
3559          * the right LVDS enable sequence. */
3560         intel_enable_shared_dpll(intel_crtc);
3561
3562         /* set transcoder timing, panel must allow it */
3563         assert_panel_unlocked(dev_priv, pipe);
3564         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3565
3566         intel_fdi_normal_train(crtc);
3567
3568         /* For PCH DP, enable TRANS_DP_CTL */
3569         if (HAS_PCH_CPT(dev) &&
3570             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3571              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3572                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3573                 reg = TRANS_DP_CTL(pipe);
3574                 temp = I915_READ(reg);
3575                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3576                           TRANS_DP_SYNC_MASK |
3577                           TRANS_DP_BPC_MASK);
3578                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3579                          TRANS_DP_ENH_FRAMING);
3580                 temp |= bpc << 9; /* same format but at 11:9 */
3581
3582                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3583                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3584                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3585                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3586
3587                 switch (intel_trans_dp_port_sel(crtc)) {
3588                 case PCH_DP_B:
3589                         temp |= TRANS_DP_PORT_SEL_B;
3590                         break;
3591                 case PCH_DP_C:
3592                         temp |= TRANS_DP_PORT_SEL_C;
3593                         break;
3594                 case PCH_DP_D:
3595                         temp |= TRANS_DP_PORT_SEL_D;
3596                         break;
3597                 default:
3598                         BUG();
3599                 }
3600
3601                 I915_WRITE(reg, temp);
3602         }
3603
3604         ironlake_enable_pch_transcoder(dev_priv, pipe);
3605 }
3606
3607 static void lpt_pch_enable(struct drm_crtc *crtc)
3608 {
3609         struct drm_device *dev = crtc->dev;
3610         struct drm_i915_private *dev_priv = dev->dev_private;
3611         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3612         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3613
3614         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3615
3616         lpt_program_iclkip(crtc);
3617
3618         /* Set transcoder timing. */
3619         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3620
3621         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3622 }
3623
3624 static void intel_put_shared_dpll(struct intel_crtc *crtc)
3625 {
3626         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3627
3628         if (pll == NULL)
3629                 return;
3630
3631         if (pll->refcount == 0) {
3632                 WARN(1, "bad %s refcount\n", pll->name);
3633                 return;
3634         }
3635
3636         if (--pll->refcount == 0) {
3637                 WARN_ON(pll->on);
3638                 WARN_ON(pll->active);
3639         }
3640
3641         crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3642 }
3643
3644 static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3645 {
3646         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3647         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3648         enum intel_dpll_id i;
3649
3650         if (pll) {
3651                 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3652                               crtc->base.base.id, pll->name);
3653                 intel_put_shared_dpll(crtc);
3654         }
3655
3656         if (HAS_PCH_IBX(dev_priv->dev)) {
3657                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3658                 i = (enum intel_dpll_id) crtc->pipe;
3659                 pll = &dev_priv->shared_dplls[i];
3660
3661                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3662                               crtc->base.base.id, pll->name);
3663
3664                 WARN_ON(pll->refcount);
3665
3666                 goto found;
3667         }
3668
3669         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3670                 pll = &dev_priv->shared_dplls[i];
3671
3672                 /* Only want to check enabled timings first */
3673                 if (pll->refcount == 0)
3674                         continue;
3675
3676                 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3677                            sizeof(pll->hw_state)) == 0) {
3678                         DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3679                                       crtc->base.base.id,
3680                                       pll->name, pll->refcount, pll->active);
3681
3682                         goto found;
3683                 }
3684         }
3685
3686         /* Ok no matching timings, maybe there's a free one? */
3687         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3688                 pll = &dev_priv->shared_dplls[i];
3689                 if (pll->refcount == 0) {
3690                         DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3691                                       crtc->base.base.id, pll->name);
3692                         goto found;
3693                 }
3694         }
3695
3696         return NULL;
3697
3698 found:
3699         if (pll->refcount == 0)
3700                 pll->hw_state = crtc->config.dpll_hw_state;
3701
3702         crtc->config.shared_dpll = i;
3703         DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3704                          pipe_name(crtc->pipe));
3705
3706         pll->refcount++;
3707
3708         return pll;
3709 }
3710
3711 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3712 {
3713         struct drm_i915_private *dev_priv = dev->dev_private;
3714         int dslreg = PIPEDSL(pipe);
3715         u32 temp;
3716
3717         temp = I915_READ(dslreg);
3718         udelay(500);
3719         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3720                 if (wait_for(I915_READ(dslreg) != temp, 5))
3721                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3722         }
3723 }
3724
3725 static void ironlake_pfit_enable(struct intel_crtc *crtc)
3726 {
3727         struct drm_device *dev = crtc->base.dev;
3728         struct drm_i915_private *dev_priv = dev->dev_private;
3729         int pipe = crtc->pipe;
3730
3731         if (crtc->config.pch_pfit.enabled) {
3732                 /* Force use of hard-coded filter coefficients
3733                  * as some pre-programmed values are broken,
3734                  * e.g. x201.
3735                  */
3736                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3737                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3738                                                  PF_PIPE_SEL_IVB(pipe));
3739                 else
3740                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3741                 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3742                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3743         }
3744 }
3745
3746 static void intel_enable_planes(struct drm_crtc *crtc)
3747 {
3748         struct drm_device *dev = crtc->dev;
3749         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3750         struct drm_plane *plane;
3751         struct intel_plane *intel_plane;
3752
3753         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3754                 intel_plane = to_intel_plane(plane);
3755                 if (intel_plane->pipe == pipe)
3756                         intel_plane_restore(&intel_plane->base);
3757         }
3758 }
3759
3760 static void intel_disable_planes(struct drm_crtc *crtc)
3761 {
3762         struct drm_device *dev = crtc->dev;
3763         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3764         struct drm_plane *plane;
3765         struct intel_plane *intel_plane;
3766
3767         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3768                 intel_plane = to_intel_plane(plane);
3769                 if (intel_plane->pipe == pipe)
3770                         intel_plane_disable(&intel_plane->base);
3771         }
3772 }
3773
3774 void hsw_enable_ips(struct intel_crtc *crtc)
3775 {
3776         struct drm_device *dev = crtc->base.dev;
3777         struct drm_i915_private *dev_priv = dev->dev_private;
3778
3779         if (!crtc->config.ips_enabled)
3780                 return;
3781
3782         /* We can only enable IPS after we enable a plane and wait for a vblank */
3783         intel_wait_for_vblank(dev, crtc->pipe);
3784
3785         assert_plane_enabled(dev_priv, crtc->plane);
3786         if (IS_BROADWELL(dev)) {
3787                 mutex_lock(&dev_priv->rps.hw_lock);
3788                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3789                 mutex_unlock(&dev_priv->rps.hw_lock);
3790                 /* Quoting Art Runyan: "its not safe to expect any particular
3791                  * value in IPS_CTL bit 31 after enabling IPS through the
3792                  * mailbox." Moreover, the mailbox may return a bogus state,
3793                  * so we need to just enable it and continue on.
3794                  */
3795         } else {
3796                 I915_WRITE(IPS_CTL, IPS_ENABLE);
3797                 /* The bit only becomes 1 in the next vblank, so this wait here
3798                  * is essentially intel_wait_for_vblank. If we don't have this
3799                  * and don't wait for vblanks until the end of crtc_enable, then
3800                  * the HW state readout code will complain that the expected
3801                  * IPS_CTL value is not the one we read. */
3802                 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3803                         DRM_ERROR("Timed out waiting for IPS enable\n");
3804         }
3805 }
3806
3807 void hsw_disable_ips(struct intel_crtc *crtc)
3808 {
3809         struct drm_device *dev = crtc->base.dev;
3810         struct drm_i915_private *dev_priv = dev->dev_private;
3811
3812         if (!crtc->config.ips_enabled)
3813                 return;
3814
3815         assert_plane_enabled(dev_priv, crtc->plane);
3816         if (IS_BROADWELL(dev)) {
3817                 mutex_lock(&dev_priv->rps.hw_lock);
3818                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3819                 mutex_unlock(&dev_priv->rps.hw_lock);
3820                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
3821                 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3822                         DRM_ERROR("Timed out waiting for IPS disable\n");
3823         } else {
3824                 I915_WRITE(IPS_CTL, 0);
3825                 POSTING_READ(IPS_CTL);
3826         }
3827
3828         /* We need to wait for a vblank before we can disable the plane. */
3829         intel_wait_for_vblank(dev, crtc->pipe);
3830 }
3831
3832 /** Loads the palette/gamma unit for the CRTC with the prepared values */
3833 static void intel_crtc_load_lut(struct drm_crtc *crtc)
3834 {
3835         struct drm_device *dev = crtc->dev;
3836         struct drm_i915_private *dev_priv = dev->dev_private;
3837         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3838         enum pipe pipe = intel_crtc->pipe;
3839         int palreg = PALETTE(pipe);
3840         int i;
3841         bool reenable_ips = false;
3842
3843         /* The clocks have to be on to load the palette. */
3844         if (!crtc->enabled || !intel_crtc->active)
3845                 return;
3846
3847         if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3848                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3849                         assert_dsi_pll_enabled(dev_priv);
3850                 else
3851                         assert_pll_enabled(dev_priv, pipe);
3852         }
3853
3854         /* use legacy palette for Ironlake */
3855         if (HAS_PCH_SPLIT(dev))
3856                 palreg = LGC_PALETTE(pipe);
3857
3858         /* Workaround : Do not read or write the pipe palette/gamma data while
3859          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3860          */
3861         if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3862             ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3863              GAMMA_MODE_MODE_SPLIT)) {
3864                 hsw_disable_ips(intel_crtc);
3865                 reenable_ips = true;
3866         }
3867
3868         for (i = 0; i < 256; i++) {
3869                 I915_WRITE(palreg + 4 * i,
3870                            (intel_crtc->lut_r[i] << 16) |
3871                            (intel_crtc->lut_g[i] << 8) |
3872                            intel_crtc->lut_b[i]);
3873         }
3874
3875         if (reenable_ips)
3876                 hsw_enable_ips(intel_crtc);
3877 }
3878
3879 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3880 {
3881         if (!enable && intel_crtc->overlay) {
3882                 struct drm_device *dev = intel_crtc->base.dev;
3883                 struct drm_i915_private *dev_priv = dev->dev_private;
3884
3885                 mutex_lock(&dev->struct_mutex);
3886                 dev_priv->mm.interruptible = false;
3887                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3888                 dev_priv->mm.interruptible = true;
3889                 mutex_unlock(&dev->struct_mutex);
3890         }
3891
3892         /* Let userspace switch the overlay on again. In most cases userspace
3893          * has to recompute where to put it anyway.
3894          */
3895 }
3896
3897 static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3898 {
3899         struct drm_device *dev = crtc->dev;
3900         struct drm_i915_private *dev_priv = dev->dev_private;
3901         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3902         int pipe = intel_crtc->pipe;
3903         int plane = intel_crtc->plane;
3904
3905         drm_vblank_on(dev, pipe);
3906
3907         intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3908         intel_enable_planes(crtc);
3909         intel_crtc_update_cursor(crtc, true);
3910         intel_crtc_dpms_overlay(intel_crtc, true);
3911
3912         hsw_enable_ips(intel_crtc);
3913
3914         mutex_lock(&dev->struct_mutex);
3915         intel_update_fbc(dev);
3916         mutex_unlock(&dev->struct_mutex);
3917
3918         /*
3919          * FIXME: Once we grow proper nuclear flip support out of this we need
3920          * to compute the mask of flip planes precisely. For the time being
3921          * consider this a flip from a NULL plane.
3922          */
3923         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3924 }
3925
3926 static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3927 {
3928         struct drm_device *dev = crtc->dev;
3929         struct drm_i915_private *dev_priv = dev->dev_private;
3930         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3931         int pipe = intel_crtc->pipe;
3932         int plane = intel_crtc->plane;
3933
3934         intel_crtc_wait_for_pending_flips(crtc);
3935
3936         if (dev_priv->fbc.plane == plane)
3937                 intel_disable_fbc(dev);
3938
3939         hsw_disable_ips(intel_crtc);
3940
3941         intel_crtc_dpms_overlay(intel_crtc, false);
3942         intel_crtc_update_cursor(crtc, false);
3943         intel_disable_planes(crtc);
3944         intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3945
3946         /*
3947          * FIXME: Once we grow proper nuclear flip support out of this we need
3948          * to compute the mask of flip planes precisely. For the time being
3949          * consider this a flip to a NULL plane.
3950          */
3951         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3952
3953         drm_vblank_off(dev, pipe);
3954 }
3955
3956 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3957 {
3958         struct drm_device *dev = crtc->dev;
3959         struct drm_i915_private *dev_priv = dev->dev_private;
3960         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3961         struct intel_encoder *encoder;
3962         int pipe = intel_crtc->pipe;
3963         enum plane plane = intel_crtc->plane;
3964
3965         WARN_ON(!crtc->enabled);
3966
3967         if (intel_crtc->active)
3968                 return;
3969
3970         if (intel_crtc->config.has_pch_encoder)
3971                 intel_prepare_shared_dpll(intel_crtc);
3972
3973         if (intel_crtc->config.has_dp_encoder)
3974                 intel_dp_set_m_n(intel_crtc);
3975
3976         intel_set_pipe_timings(intel_crtc);
3977
3978         if (intel_crtc->config.has_pch_encoder) {
3979                 intel_cpu_transcoder_set_m_n(intel_crtc,
3980                                              &intel_crtc->config.fdi_m_n);
3981         }
3982
3983         ironlake_set_pipeconf(crtc);
3984
3985         /* Set up the display plane register */
3986         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
3987         POSTING_READ(DSPCNTR(plane));
3988
3989         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
3990                                                crtc->x, crtc->y);
3991
3992         intel_crtc->active = true;
3993
3994         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3995         intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3996
3997         for_each_encoder_on_crtc(dev, crtc, encoder)
3998                 if (encoder->pre_enable)
3999                         encoder->pre_enable(encoder);
4000
4001         if (intel_crtc->config.has_pch_encoder) {
4002                 /* Note: FDI PLL enabling _must_ be done before we enable the
4003                  * cpu pipes, hence this is separate from all the other fdi/pch
4004                  * enabling. */
4005                 ironlake_fdi_pll_enable(intel_crtc);
4006         } else {
4007                 assert_fdi_tx_disabled(dev_priv, pipe);
4008                 assert_fdi_rx_disabled(dev_priv, pipe);
4009         }
4010
4011         ironlake_pfit_enable(intel_crtc);
4012
4013         /*
4014          * On ILK+ LUT must be loaded before the pipe is running but with
4015          * clocks enabled
4016          */
4017         intel_crtc_load_lut(crtc);
4018
4019         intel_update_watermarks(crtc);
4020         intel_enable_pipe(intel_crtc);
4021
4022         if (intel_crtc->config.has_pch_encoder)
4023                 ironlake_pch_enable(crtc);
4024
4025         for_each_encoder_on_crtc(dev, crtc, encoder)
4026                 encoder->enable(encoder);
4027
4028         if (HAS_PCH_CPT(dev))
4029                 cpt_verify_modeset(dev, intel_crtc->pipe);
4030
4031         intel_crtc_enable_planes(crtc);
4032 }
4033
4034 /* IPS only exists on ULT machines and is tied to pipe A. */
4035 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4036 {
4037         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4038 }
4039
4040 /*
4041  * This implements the workaround described in the "notes" section of the mode
4042  * set sequence documentation. When going from no pipes or single pipe to
4043  * multiple pipes, and planes are enabled after the pipe, we need to wait at
4044  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4045  */
4046 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4047 {
4048         struct drm_device *dev = crtc->base.dev;
4049         struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4050
4051         /* We want to get the other_active_crtc only if there's only 1 other
4052          * active crtc. */
4053         for_each_intel_crtc(dev, crtc_it) {
4054                 if (!crtc_it->active || crtc_it == crtc)
4055                         continue;
4056
4057                 if (other_active_crtc)
4058                         return;
4059
4060                 other_active_crtc = crtc_it;
4061         }
4062         if (!other_active_crtc)
4063                 return;
4064
4065         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4066         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4067 }
4068
4069 static void haswell_crtc_enable(struct drm_crtc *crtc)
4070 {
4071         struct drm_device *dev = crtc->dev;
4072         struct drm_i915_private *dev_priv = dev->dev_private;
4073         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4074         struct intel_encoder *encoder;
4075         int pipe = intel_crtc->pipe;
4076         enum plane plane = intel_crtc->plane;
4077
4078         WARN_ON(!crtc->enabled);
4079
4080         if (intel_crtc->active)
4081                 return;
4082
4083         if (intel_crtc->config.has_dp_encoder)
4084                 intel_dp_set_m_n(intel_crtc);
4085
4086         intel_set_pipe_timings(intel_crtc);
4087
4088         if (intel_crtc->config.has_pch_encoder) {
4089                 intel_cpu_transcoder_set_m_n(intel_crtc,
4090                                              &intel_crtc->config.fdi_m_n);
4091         }
4092
4093         haswell_set_pipeconf(crtc);
4094
4095         intel_set_pipe_csc(crtc);
4096
4097         /* Set up the display plane register */
4098         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4099         POSTING_READ(DSPCNTR(plane));
4100
4101         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4102                                                crtc->x, crtc->y);
4103
4104         intel_crtc->active = true;
4105
4106         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4107         if (intel_crtc->config.has_pch_encoder)
4108                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4109
4110         if (intel_crtc->config.has_pch_encoder)
4111                 dev_priv->display.fdi_link_train(crtc);
4112
4113         for_each_encoder_on_crtc(dev, crtc, encoder)
4114                 if (encoder->pre_enable)
4115                         encoder->pre_enable(encoder);
4116
4117         intel_ddi_enable_pipe_clock(intel_crtc);
4118
4119         ironlake_pfit_enable(intel_crtc);
4120
4121         /*
4122          * On ILK+ LUT must be loaded before the pipe is running but with
4123          * clocks enabled
4124          */
4125         intel_crtc_load_lut(crtc);
4126
4127         intel_ddi_set_pipe_settings(crtc);
4128         intel_ddi_enable_transcoder_func(crtc);
4129
4130         intel_update_watermarks(crtc);
4131         intel_enable_pipe(intel_crtc);
4132
4133         if (intel_crtc->config.has_pch_encoder)
4134                 lpt_pch_enable(crtc);
4135
4136         for_each_encoder_on_crtc(dev, crtc, encoder) {
4137                 encoder->enable(encoder);
4138                 intel_opregion_notify_encoder(encoder, true);
4139         }
4140
4141         /* If we change the relative order between pipe/planes enabling, we need
4142          * to change the workaround. */
4143         haswell_mode_set_planes_workaround(intel_crtc);
4144         intel_crtc_enable_planes(crtc);
4145 }
4146
4147 static void ironlake_pfit_disable(struct intel_crtc *crtc)
4148 {
4149         struct drm_device *dev = crtc->base.dev;
4150         struct drm_i915_private *dev_priv = dev->dev_private;
4151         int pipe = crtc->pipe;
4152
4153         /* To avoid upsetting the power well on haswell only disable the pfit if
4154          * it's in use. The hw state code will make sure we get this right. */
4155         if (crtc->config.pch_pfit.enabled) {
4156                 I915_WRITE(PF_CTL(pipe), 0);
4157                 I915_WRITE(PF_WIN_POS(pipe), 0);
4158                 I915_WRITE(PF_WIN_SZ(pipe), 0);
4159         }
4160 }
4161
4162 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4163 {
4164         struct drm_device *dev = crtc->dev;
4165         struct drm_i915_private *dev_priv = dev->dev_private;
4166         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4167         struct intel_encoder *encoder;
4168         int pipe = intel_crtc->pipe;
4169         u32 reg, temp;
4170
4171         if (!intel_crtc->active)
4172                 return;
4173
4174         intel_crtc_disable_planes(crtc);
4175
4176         for_each_encoder_on_crtc(dev, crtc, encoder)
4177                 encoder->disable(encoder);
4178
4179         if (intel_crtc->config.has_pch_encoder)
4180                 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4181
4182         intel_disable_pipe(dev_priv, pipe);
4183
4184         ironlake_pfit_disable(intel_crtc);
4185
4186         for_each_encoder_on_crtc(dev, crtc, encoder)
4187                 if (encoder->post_disable)
4188                         encoder->post_disable(encoder);
4189
4190         if (intel_crtc->config.has_pch_encoder) {
4191                 ironlake_fdi_disable(crtc);
4192
4193                 ironlake_disable_pch_transcoder(dev_priv, pipe);
4194                 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4195
4196                 if (HAS_PCH_CPT(dev)) {
4197                         /* disable TRANS_DP_CTL */
4198                         reg = TRANS_DP_CTL(pipe);
4199                         temp = I915_READ(reg);
4200                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4201                                   TRANS_DP_PORT_SEL_MASK);
4202                         temp |= TRANS_DP_PORT_SEL_NONE;
4203                         I915_WRITE(reg, temp);
4204
4205                         /* disable DPLL_SEL */
4206                         temp = I915_READ(PCH_DPLL_SEL);
4207                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4208                         I915_WRITE(PCH_DPLL_SEL, temp);
4209                 }
4210
4211                 /* disable PCH DPLL */
4212                 intel_disable_shared_dpll(intel_crtc);
4213
4214                 ironlake_fdi_pll_disable(intel_crtc);
4215         }
4216
4217         intel_crtc->active = false;
4218         intel_update_watermarks(crtc);
4219
4220         mutex_lock(&dev->struct_mutex);
4221         intel_update_fbc(dev);
4222         mutex_unlock(&dev->struct_mutex);
4223 }
4224
4225 static void haswell_crtc_disable(struct drm_crtc *crtc)
4226 {
4227         struct drm_device *dev = crtc->dev;
4228         struct drm_i915_private *dev_priv = dev->dev_private;
4229         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4230         struct intel_encoder *encoder;
4231         int pipe = intel_crtc->pipe;
4232         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4233
4234         if (!intel_crtc->active)
4235                 return;
4236
4237         intel_crtc_disable_planes(crtc);
4238
4239         for_each_encoder_on_crtc(dev, crtc, encoder) {
4240                 intel_opregion_notify_encoder(encoder, false);
4241                 encoder->disable(encoder);
4242         }
4243
4244         if (intel_crtc->config.has_pch_encoder)
4245                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4246         intel_disable_pipe(dev_priv, pipe);
4247
4248         intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4249
4250         ironlake_pfit_disable(intel_crtc);
4251
4252         intel_ddi_disable_pipe_clock(intel_crtc);
4253
4254         for_each_encoder_on_crtc(dev, crtc, encoder)
4255                 if (encoder->post_disable)
4256                         encoder->post_disable(encoder);
4257
4258         if (intel_crtc->config.has_pch_encoder) {
4259                 lpt_disable_pch_transcoder(dev_priv);
4260                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4261                 intel_ddi_fdi_disable(crtc);
4262         }
4263
4264         intel_crtc->active = false;
4265         intel_update_watermarks(crtc);
4266
4267         mutex_lock(&dev->struct_mutex);
4268         intel_update_fbc(dev);
4269         mutex_unlock(&dev->struct_mutex);
4270 }
4271
4272 static void ironlake_crtc_off(struct drm_crtc *crtc)
4273 {
4274         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4275         intel_put_shared_dpll(intel_crtc);
4276 }
4277
4278 static void haswell_crtc_off(struct drm_crtc *crtc)
4279 {
4280         intel_ddi_put_crtc_pll(crtc);
4281 }
4282
4283 static void i9xx_pfit_enable(struct intel_crtc *crtc)
4284 {
4285         struct drm_device *dev = crtc->base.dev;
4286         struct drm_i915_private *dev_priv = dev->dev_private;
4287         struct intel_crtc_config *pipe_config = &crtc->config;
4288
4289         if (!crtc->config.gmch_pfit.control)
4290                 return;
4291
4292         /*
4293          * The panel fitter should only be adjusted whilst the pipe is disabled,
4294          * according to register description and PRM.
4295          */
4296         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4297         assert_pipe_disabled(dev_priv, crtc->pipe);
4298
4299         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4300         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4301
4302         /* Border color in case we don't scale up to the full screen. Black by
4303          * default, change to something else for debugging. */
4304         I915_WRITE(BCLRPAT(crtc->pipe), 0);
4305 }
4306
4307 #define for_each_power_domain(domain, mask)                             \
4308         for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
4309                 if ((1 << (domain)) & (mask))
4310
4311 enum intel_display_power_domain
4312 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4313 {
4314         struct drm_device *dev = intel_encoder->base.dev;
4315         struct intel_digital_port *intel_dig_port;
4316
4317         switch (intel_encoder->type) {
4318         case INTEL_OUTPUT_UNKNOWN:
4319                 /* Only DDI platforms should ever use this output type */
4320                 WARN_ON_ONCE(!HAS_DDI(dev));
4321         case INTEL_OUTPUT_DISPLAYPORT:
4322         case INTEL_OUTPUT_HDMI:
4323         case INTEL_OUTPUT_EDP:
4324                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4325                 switch (intel_dig_port->port) {
4326                 case PORT_A:
4327                         return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4328                 case PORT_B:
4329                         return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4330                 case PORT_C:
4331                         return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4332                 case PORT_D:
4333                         return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4334                 default:
4335                         WARN_ON_ONCE(1);
4336                         return POWER_DOMAIN_PORT_OTHER;
4337                 }
4338         case INTEL_OUTPUT_ANALOG:
4339                 return POWER_DOMAIN_PORT_CRT;
4340         case INTEL_OUTPUT_DSI:
4341                 return POWER_DOMAIN_PORT_DSI;
4342         default:
4343                 return POWER_DOMAIN_PORT_OTHER;
4344         }
4345 }
4346
4347 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4348 {
4349         struct drm_device *dev = crtc->dev;
4350         struct intel_encoder *intel_encoder;
4351         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4352         enum pipe pipe = intel_crtc->pipe;
4353         unsigned long mask;
4354         enum transcoder transcoder;
4355
4356         transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4357
4358         mask = BIT(POWER_DOMAIN_PIPE(pipe));
4359         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4360         if (intel_crtc->config.pch_pfit.enabled ||
4361             intel_crtc->config.pch_pfit.force_thru)
4362                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4363
4364         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4365                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
4366
4367         return mask;
4368 }
4369
4370 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4371                                   bool enable)
4372 {
4373         if (dev_priv->power_domains.init_power_on == enable)
4374                 return;
4375
4376         if (enable)
4377                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4378         else
4379                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4380
4381         dev_priv->power_domains.init_power_on = enable;
4382 }
4383
4384 static void modeset_update_crtc_power_domains(struct drm_device *dev)
4385 {
4386         struct drm_i915_private *dev_priv = dev->dev_private;
4387         unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4388         struct intel_crtc *crtc;
4389
4390         /*
4391          * First get all needed power domains, then put all unneeded, to avoid
4392          * any unnecessary toggling of the power wells.
4393          */
4394         for_each_intel_crtc(dev, crtc) {
4395                 enum intel_display_power_domain domain;
4396
4397                 if (!crtc->base.enabled)
4398                         continue;
4399
4400                 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4401
4402                 for_each_power_domain(domain, pipe_domains[crtc->pipe])
4403                         intel_display_power_get(dev_priv, domain);
4404         }
4405
4406         for_each_intel_crtc(dev, crtc) {
4407                 enum intel_display_power_domain domain;
4408
4409                 for_each_power_domain(domain, crtc->enabled_power_domains)
4410                         intel_display_power_put(dev_priv, domain);
4411
4412                 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4413         }
4414
4415         intel_display_set_init_power(dev_priv, false);
4416 }
4417
4418 /* returns HPLL frequency in kHz */
4419 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4420 {
4421         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4422
4423         /* Obtain SKU information */
4424         mutex_lock(&dev_priv->dpio_lock);
4425         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4426                 CCK_FUSE_HPLL_FREQ_MASK;
4427         mutex_unlock(&dev_priv->dpio_lock);
4428
4429         return vco_freq[hpll_freq] * 1000;
4430 }
4431
4432 static void vlv_update_cdclk(struct drm_device *dev)
4433 {
4434         struct drm_i915_private *dev_priv = dev->dev_private;
4435
4436         dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4437         DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
4438                          dev_priv->vlv_cdclk_freq);
4439
4440         /*
4441          * Program the gmbus_freq based on the cdclk frequency.
4442          * BSpec erroneously claims we should aim for 4MHz, but
4443          * in fact 1MHz is the correct frequency.
4444          */
4445         I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
4446 }
4447
4448 /* Adjust CDclk dividers to allow high res or save power if possible */
4449 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4450 {
4451         struct drm_i915_private *dev_priv = dev->dev_private;
4452         u32 val, cmd;
4453
4454         WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4455
4456         if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4457                 cmd = 2;
4458         else if (cdclk == 266667)
4459                 cmd = 1;
4460         else
4461                 cmd = 0;
4462
4463         mutex_lock(&dev_priv->rps.hw_lock);
4464         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4465         val &= ~DSPFREQGUAR_MASK;
4466         val |= (cmd << DSPFREQGUAR_SHIFT);
4467         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4468         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4469                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4470                      50)) {
4471                 DRM_ERROR("timed out waiting for CDclk change\n");
4472         }
4473         mutex_unlock(&dev_priv->rps.hw_lock);
4474
4475         if (cdclk == 400000) {
4476                 u32 divider, vco;
4477
4478                 vco = valleyview_get_vco(dev_priv);
4479                 divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4480
4481                 mutex_lock(&dev_priv->dpio_lock);
4482                 /* adjust cdclk divider */
4483                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4484                 val &= ~DISPLAY_FREQUENCY_VALUES;
4485                 val |= divider;
4486                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4487
4488                 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4489                               DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4490                              50))
4491                         DRM_ERROR("timed out waiting for CDclk change\n");
4492                 mutex_unlock(&dev_priv->dpio_lock);
4493         }
4494
4495         mutex_lock(&dev_priv->dpio_lock);
4496         /* adjust self-refresh exit latency value */
4497         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4498         val &= ~0x7f;
4499
4500         /*
4501          * For high bandwidth configs, we set a higher latency in the bunit
4502          * so that the core display fetch happens in time to avoid underruns.
4503          */
4504         if (cdclk == 400000)
4505                 val |= 4500 / 250; /* 4.5 usec */
4506         else
4507                 val |= 3000 / 250; /* 3.0 usec */
4508         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4509         mutex_unlock(&dev_priv->dpio_lock);
4510
4511         vlv_update_cdclk(dev);
4512 }
4513
4514 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4515                                  int max_pixclk)
4516 {
4517         int vco = valleyview_get_vco(dev_priv);
4518         int freq_320 = (vco <<  1) % 320000 != 0 ? 333333 : 320000;
4519
4520         /*
4521          * Really only a few cases to deal with, as only 4 CDclks are supported:
4522          *   200MHz
4523          *   267MHz
4524          *   320/333MHz (depends on HPLL freq)
4525          *   400MHz
4526          * So we check to see whether we're above 90% of the lower bin and
4527          * adjust if needed.
4528          *
4529          * We seem to get an unstable or solid color picture at 200MHz.
4530          * Not sure what's wrong. For now use 200MHz only when all pipes
4531          * are off.
4532          */
4533         if (max_pixclk > freq_320*9/10)
4534                 return 400000;
4535         else if (max_pixclk > 266667*9/10)
4536                 return freq_320;
4537         else if (max_pixclk > 0)
4538                 return 266667;
4539         else
4540                 return 200000;
4541 }
4542
4543 /* compute the max pixel clock for new configuration */
4544 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4545 {
4546         struct drm_device *dev = dev_priv->dev;
4547         struct intel_crtc *intel_crtc;
4548         int max_pixclk = 0;
4549
4550         for_each_intel_crtc(dev, intel_crtc) {
4551                 if (intel_crtc->new_enabled)
4552                         max_pixclk = max(max_pixclk,
4553                                          intel_crtc->new_config->adjusted_mode.crtc_clock);
4554         }
4555
4556         return max_pixclk;
4557 }
4558
4559 static void valleyview_modeset_global_pipes(struct drm_device *dev,
4560                                             unsigned *prepare_pipes)
4561 {
4562         struct drm_i915_private *dev_priv = dev->dev_private;
4563         struct intel_crtc *intel_crtc;
4564         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4565
4566         if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4567             dev_priv->vlv_cdclk_freq)
4568                 return;
4569
4570         /* disable/enable all currently active pipes while we change cdclk */
4571         for_each_intel_crtc(dev, intel_crtc)
4572                 if (intel_crtc->base.enabled)
4573                         *prepare_pipes |= (1 << intel_crtc->pipe);
4574 }
4575
4576 static void valleyview_modeset_global_resources(struct drm_device *dev)
4577 {
4578         struct drm_i915_private *dev_priv = dev->dev_private;
4579         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4580         int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4581
4582         if (req_cdclk != dev_priv->vlv_cdclk_freq)
4583                 valleyview_set_cdclk(dev, req_cdclk);
4584         modeset_update_crtc_power_domains(dev);
4585 }
4586
4587 static void valleyview_crtc_enable(struct drm_crtc *crtc)
4588 {
4589         struct drm_device *dev = crtc->dev;
4590         struct drm_i915_private *dev_priv = dev->dev_private;
4591         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4592         struct intel_encoder *encoder;
4593         int pipe = intel_crtc->pipe;
4594         int plane = intel_crtc->plane;
4595         bool is_dsi;
4596         u32 dspcntr;
4597
4598         WARN_ON(!crtc->enabled);
4599
4600         if (intel_crtc->active)
4601                 return;
4602
4603         is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4604
4605         if (!is_dsi && !IS_CHERRYVIEW(dev))
4606                 vlv_prepare_pll(intel_crtc);
4607
4608         /* Set up the display plane register */
4609         dspcntr = DISPPLANE_GAMMA_ENABLE;
4610
4611         if (intel_crtc->config.has_dp_encoder)
4612                 intel_dp_set_m_n(intel_crtc);
4613
4614         intel_set_pipe_timings(intel_crtc);
4615
4616         /* pipesrc and dspsize control the size that is scaled from,
4617          * which should always be the user's requested size.
4618          */
4619         I915_WRITE(DSPSIZE(plane),
4620                    ((intel_crtc->config.pipe_src_h - 1) << 16) |
4621                    (intel_crtc->config.pipe_src_w - 1));
4622         I915_WRITE(DSPPOS(plane), 0);
4623
4624         i9xx_set_pipeconf(intel_crtc);
4625
4626         I915_WRITE(DSPCNTR(plane), dspcntr);
4627         POSTING_READ(DSPCNTR(plane));
4628
4629         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4630                                                crtc->x, crtc->y);
4631
4632         intel_crtc->active = true;
4633
4634         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4635
4636         for_each_encoder_on_crtc(dev, crtc, encoder)
4637                 if (encoder->pre_pll_enable)
4638                         encoder->pre_pll_enable(encoder);
4639
4640         if (!is_dsi) {
4641                 if (IS_CHERRYVIEW(dev))
4642                         chv_enable_pll(intel_crtc);
4643                 else
4644                         vlv_enable_pll(intel_crtc);
4645         }
4646
4647         for_each_encoder_on_crtc(dev, crtc, encoder)
4648                 if (encoder->pre_enable)
4649                         encoder->pre_enable(encoder);
4650
4651         i9xx_pfit_enable(intel_crtc);
4652
4653         intel_crtc_load_lut(crtc);
4654
4655         intel_update_watermarks(crtc);
4656         intel_enable_pipe(intel_crtc);
4657
4658         for_each_encoder_on_crtc(dev, crtc, encoder)
4659                 encoder->enable(encoder);
4660
4661         intel_crtc_enable_planes(crtc);
4662
4663         /* Underruns don't raise interrupts, so check manually. */
4664         i9xx_check_fifo_underruns(dev);
4665 }
4666
4667 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4668 {
4669         struct drm_device *dev = crtc->base.dev;
4670         struct drm_i915_private *dev_priv = dev->dev_private;
4671
4672         I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4673         I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4674 }
4675
4676 static void i9xx_crtc_enable(struct drm_crtc *crtc)
4677 {
4678         struct drm_device *dev = crtc->dev;
4679         struct drm_i915_private *dev_priv = dev->dev_private;
4680         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4681         struct intel_encoder *encoder;
4682         int pipe = intel_crtc->pipe;
4683         int plane = intel_crtc->plane;
4684         u32 dspcntr;
4685
4686         WARN_ON(!crtc->enabled);
4687
4688         if (intel_crtc->active)
4689                 return;
4690
4691         i9xx_set_pll_dividers(intel_crtc);
4692
4693         /* Set up the display plane register */
4694         dspcntr = DISPPLANE_GAMMA_ENABLE;
4695
4696         if (pipe == 0)
4697                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4698         else
4699                 dspcntr |= DISPPLANE_SEL_PIPE_B;
4700
4701         if (intel_crtc->config.has_dp_encoder)
4702                 intel_dp_set_m_n(intel_crtc);
4703
4704         intel_set_pipe_timings(intel_crtc);
4705
4706         /* pipesrc and dspsize control the size that is scaled from,
4707          * which should always be the user's requested size.
4708          */
4709         I915_WRITE(DSPSIZE(plane),
4710                    ((intel_crtc->config.pipe_src_h - 1) << 16) |
4711                    (intel_crtc->config.pipe_src_w - 1));
4712         I915_WRITE(DSPPOS(plane), 0);
4713
4714         i9xx_set_pipeconf(intel_crtc);
4715
4716         I915_WRITE(DSPCNTR(plane), dspcntr);
4717         POSTING_READ(DSPCNTR(plane));
4718
4719         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4720                                                crtc->x, crtc->y);
4721
4722         intel_crtc->active = true;
4723
4724         if (!IS_GEN2(dev))
4725                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4726
4727         for_each_encoder_on_crtc(dev, crtc, encoder)
4728                 if (encoder->pre_enable)
4729                         encoder->pre_enable(encoder);
4730
4731         i9xx_enable_pll(intel_crtc);
4732
4733         i9xx_pfit_enable(intel_crtc);
4734
4735         intel_crtc_load_lut(crtc);
4736
4737         intel_update_watermarks(crtc);
4738         intel_enable_pipe(intel_crtc);
4739
4740         for_each_encoder_on_crtc(dev, crtc, encoder)
4741                 encoder->enable(encoder);
4742
4743         intel_crtc_enable_planes(crtc);
4744
4745         /*
4746          * Gen2 reports pipe underruns whenever all planes are disabled.
4747          * So don't enable underrun reporting before at least some planes
4748          * are enabled.
4749          * FIXME: Need to fix the logic to work when we turn off all planes
4750          * but leave the pipe running.
4751          */
4752         if (IS_GEN2(dev))
4753                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4754
4755         /* Underruns don't raise interrupts, so check manually. */
4756         i9xx_check_fifo_underruns(dev);
4757 }
4758
4759 static void i9xx_pfit_disable(struct intel_crtc *crtc)
4760 {
4761         struct drm_device *dev = crtc->base.dev;
4762         struct drm_i915_private *dev_priv = dev->dev_private;
4763
4764         if (!crtc->config.gmch_pfit.control)
4765                 return;
4766
4767         assert_pipe_disabled(dev_priv, crtc->pipe);
4768
4769         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4770                          I915_READ(PFIT_CONTROL));
4771         I915_WRITE(PFIT_CONTROL, 0);
4772 }
4773
4774 static void i9xx_crtc_disable(struct drm_crtc *crtc)
4775 {
4776         struct drm_device *dev = crtc->dev;
4777         struct drm_i915_private *dev_priv = dev->dev_private;
4778         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4779         struct intel_encoder *encoder;
4780         int pipe = intel_crtc->pipe;
4781
4782         if (!intel_crtc->active)
4783                 return;
4784
4785         /*
4786          * Gen2 reports pipe underruns whenever all planes are disabled.
4787          * So diasble underrun reporting before all the planes get disabled.
4788          * FIXME: Need to fix the logic to work when we turn off all planes
4789          * but leave the pipe running.
4790          */
4791         if (IS_GEN2(dev))
4792                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4793
4794         /*
4795          * Vblank time updates from the shadow to live plane control register
4796          * are blocked if the memory self-refresh mode is active at that
4797          * moment. So to make sure the plane gets truly disabled, disable
4798          * first the self-refresh mode. The self-refresh enable bit in turn
4799          * will be checked/applied by the HW only at the next frame start
4800          * event which is after the vblank start event, so we need to have a
4801          * wait-for-vblank between disabling the plane and the pipe.
4802          */
4803         intel_set_memory_cxsr(dev_priv, false);
4804         intel_crtc_disable_planes(crtc);
4805
4806         for_each_encoder_on_crtc(dev, crtc, encoder)
4807                 encoder->disable(encoder);
4808
4809         /*
4810          * On gen2 planes are double buffered but the pipe isn't, so we must
4811          * wait for planes to fully turn off before disabling the pipe.
4812          * We also need to wait on all gmch platforms because of the
4813          * self-refresh mode constraint explained above.
4814          */
4815         intel_wait_for_vblank(dev, pipe);
4816
4817         intel_disable_pipe(dev_priv, pipe);
4818
4819         i9xx_pfit_disable(intel_crtc);
4820
4821         for_each_encoder_on_crtc(dev, crtc, encoder)
4822                 if (encoder->post_disable)
4823                         encoder->post_disable(encoder);
4824
4825         if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
4826                 if (IS_CHERRYVIEW(dev))
4827                         chv_disable_pll(dev_priv, pipe);
4828                 else if (IS_VALLEYVIEW(dev))
4829                         vlv_disable_pll(dev_priv, pipe);
4830                 else
4831                         i9xx_disable_pll(dev_priv, pipe);
4832         }
4833
4834         if (!IS_GEN2(dev))
4835                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4836
4837         intel_crtc->active = false;
4838         intel_update_watermarks(crtc);
4839
4840         mutex_lock(&dev->struct_mutex);
4841         intel_update_fbc(dev);
4842         mutex_unlock(&dev->struct_mutex);
4843 }
4844
4845 static void i9xx_crtc_off(struct drm_crtc *crtc)
4846 {
4847 }
4848
4849 static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4850                                     bool enabled)
4851 {
4852         struct drm_device *dev = crtc->dev;
4853         struct drm_i915_master_private *master_priv;
4854         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4855         int pipe = intel_crtc->pipe;
4856
4857         if (!dev->primary->master)
4858                 return;
4859
4860         master_priv = dev->primary->master->driver_priv;
4861         if (!master_priv->sarea_priv)
4862                 return;
4863
4864         switch (pipe) {
4865         case 0:
4866                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4867                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4868                 break;
4869         case 1:
4870                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4871                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4872                 break;
4873         default:
4874                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4875                 break;
4876         }
4877 }
4878
4879 /**
4880  * Sets the power management mode of the pipe and plane.
4881  */
4882 void intel_crtc_update_dpms(struct drm_crtc *crtc)
4883 {
4884         struct drm_device *dev = crtc->dev;
4885         struct drm_i915_private *dev_priv = dev->dev_private;
4886         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4887         struct intel_encoder *intel_encoder;
4888         enum intel_display_power_domain domain;
4889         unsigned long domains;
4890         bool enable = false;
4891
4892         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4893                 enable |= intel_encoder->connectors_active;
4894
4895         if (enable) {
4896                 if (!intel_crtc->active) {
4897                         /*
4898                          * FIXME: DDI plls and relevant code isn't converted
4899                          * yet, so do runtime PM for DPMS only for all other
4900                          * platforms for now.
4901                          */
4902                         if (!HAS_DDI(dev)) {
4903                                 domains = get_crtc_power_domains(crtc);
4904                                 for_each_power_domain(domain, domains)
4905                                         intel_display_power_get(dev_priv, domain);
4906                                 intel_crtc->enabled_power_domains = domains;
4907                         }
4908
4909                         dev_priv->display.crtc_enable(crtc);
4910                 }
4911         } else {
4912                 if (intel_crtc->active) {
4913                         dev_priv->display.crtc_disable(crtc);
4914
4915                         if (!HAS_DDI(dev)) {
4916                                 domains = intel_crtc->enabled_power_domains;
4917                                 for_each_power_domain(domain, domains)
4918                                         intel_display_power_put(dev_priv, domain);
4919                                 intel_crtc->enabled_power_domains = 0;
4920                         }
4921                 }
4922         }
4923
4924         intel_crtc_update_sarea(crtc, enable);
4925 }
4926
4927 static void intel_crtc_disable(struct drm_crtc *crtc)
4928 {
4929         struct drm_device *dev = crtc->dev;
4930         struct drm_connector *connector;
4931         struct drm_i915_private *dev_priv = dev->dev_private;
4932         struct drm_i915_gem_object *old_obj;
4933         enum pipe pipe = to_intel_crtc(crtc)->pipe;
4934
4935         /* crtc should still be enabled when we disable it. */
4936         WARN_ON(!crtc->enabled);
4937
4938         dev_priv->display.crtc_disable(crtc);
4939         intel_crtc_update_sarea(crtc, false);
4940         dev_priv->display.off(crtc);
4941
4942         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4943         assert_cursor_disabled(dev_priv, pipe);
4944         assert_pipe_disabled(dev->dev_private, pipe);
4945
4946         if (crtc->primary->fb) {
4947                 old_obj = to_intel_framebuffer(crtc->primary->fb)->obj;
4948                 mutex_lock(&dev->struct_mutex);
4949                 intel_unpin_fb_obj(old_obj);
4950                 i915_gem_track_fb(old_obj, NULL,
4951                                   INTEL_FRONTBUFFER_PRIMARY(pipe));
4952                 mutex_unlock(&dev->struct_mutex);
4953                 crtc->primary->fb = NULL;
4954         }
4955
4956         /* Update computed state. */
4957         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4958                 if (!connector->encoder || !connector->encoder->crtc)
4959                         continue;
4960
4961                 if (connector->encoder->crtc != crtc)
4962                         continue;
4963
4964                 connector->dpms = DRM_MODE_DPMS_OFF;
4965                 to_intel_encoder(connector->encoder)->connectors_active = false;
4966         }
4967 }
4968
4969 void intel_encoder_destroy(struct drm_encoder *encoder)
4970 {
4971         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4972
4973         drm_encoder_cleanup(encoder);
4974         kfree(intel_encoder);
4975 }
4976
4977 /* Simple dpms helper for encoders with just one connector, no cloning and only
4978  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4979  * state of the entire output pipe. */
4980 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
4981 {
4982         if (mode == DRM_MODE_DPMS_ON) {
4983                 encoder->connectors_active = true;
4984
4985                 intel_crtc_update_dpms(encoder->base.crtc);
4986         } else {
4987                 encoder->connectors_active = false;
4988
4989                 intel_crtc_update_dpms(encoder->base.crtc);
4990         }
4991 }
4992
4993 /* Cross check the actual hw state with our own modeset state tracking (and it's
4994  * internal consistency). */
4995 static void intel_connector_check_state(struct intel_connector *connector)
4996 {
4997         if (connector->get_hw_state(connector)) {
4998                 struct intel_encoder *encoder = connector->encoder;
4999                 struct drm_crtc *crtc;
5000                 bool encoder_enabled;
5001                 enum pipe pipe;
5002
5003                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5004                               connector->base.base.id,
5005                               connector->base.name);
5006
5007                 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5008                      "wrong connector dpms state\n");
5009                 WARN(connector->base.encoder != &encoder->base,
5010                      "active connector not linked to encoder\n");
5011                 WARN(!encoder->connectors_active,
5012                      "encoder->connectors_active not set\n");
5013
5014                 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5015                 WARN(!encoder_enabled, "encoder not enabled\n");
5016                 if (WARN_ON(!encoder->base.crtc))
5017                         return;
5018
5019                 crtc = encoder->base.crtc;
5020
5021                 WARN(!crtc->enabled, "crtc not enabled\n");
5022                 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5023                 WARN(pipe != to_intel_crtc(crtc)->pipe,
5024                      "encoder active on the wrong pipe\n");
5025         }
5026 }
5027
5028 /* Even simpler default implementation, if there's really no special case to
5029  * consider. */
5030 void intel_connector_dpms(struct drm_connector *connector, int mode)
5031 {
5032         /* All the simple cases only support two dpms states. */
5033         if (mode != DRM_MODE_DPMS_ON)
5034                 mode = DRM_MODE_DPMS_OFF;
5035
5036         if (mode == connector->dpms)
5037                 return;
5038
5039         connector->dpms = mode;
5040
5041         /* Only need to change hw state when actually enabled */
5042         if (connector->encoder)
5043                 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
5044
5045         intel_modeset_check_state(connector->dev);
5046 }
5047
5048 /* Simple connector->get_hw_state implementation for encoders that support only
5049  * one connector and no cloning and hence the encoder state determines the state
5050  * of the connector. */
5051 bool intel_connector_get_hw_state(struct intel_connector *connector)
5052 {
5053         enum pipe pipe = 0;
5054         struct intel_encoder *encoder = connector->encoder;
5055
5056         return encoder->get_hw_state(encoder, &pipe);
5057 }
5058
5059 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5060                                      struct intel_crtc_config *pipe_config)
5061 {
5062         struct drm_i915_private *dev_priv = dev->dev_private;
5063         struct intel_crtc *pipe_B_crtc =
5064                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5065
5066         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5067                       pipe_name(pipe), pipe_config->fdi_lanes);
5068         if (pipe_config->fdi_lanes > 4) {
5069                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5070                               pipe_name(pipe), pipe_config->fdi_lanes);
5071                 return false;
5072         }
5073
5074         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5075                 if (pipe_config->fdi_lanes > 2) {
5076                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5077                                       pipe_config->fdi_lanes);
5078                         return false;
5079                 } else {
5080                         return true;
5081                 }
5082         }
5083
5084         if (INTEL_INFO(dev)->num_pipes == 2)
5085                 return true;
5086
5087         /* Ivybridge 3 pipe is really complicated */
5088         switch (pipe) {
5089         case PIPE_A:
5090                 return true;
5091         case PIPE_B:
5092                 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5093                     pipe_config->fdi_lanes > 2) {
5094                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5095                                       pipe_name(pipe), pipe_config->fdi_lanes);
5096                         return false;
5097                 }
5098                 return true;
5099         case PIPE_C:
5100                 if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5101                     pipe_B_crtc->config.fdi_lanes <= 2) {
5102                         if (pipe_config->fdi_lanes > 2) {
5103                                 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5104                                               pipe_name(pipe), pipe_config->fdi_lanes);
5105                                 return false;
5106                         }
5107                 } else {
5108                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5109                         return false;
5110                 }
5111                 return true;
5112         default:
5113                 BUG();
5114         }
5115 }
5116
5117 #define RETRY 1
5118 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5119                                        struct intel_crtc_config *pipe_config)
5120 {
5121         struct drm_device *dev = intel_crtc->base.dev;
5122         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5123         int lane, link_bw, fdi_dotclock;
5124         bool setup_ok, needs_recompute = false;
5125
5126 retry:
5127         /* FDI is a binary signal running at ~2.7GHz, encoding
5128          * each output octet as 10 bits. The actual frequency
5129          * is stored as a divider into a 100MHz clock, and the
5130          * mode pixel clock is stored in units of 1KHz.
5131          * Hence the bw of each lane in terms of the mode signal
5132          * is:
5133          */
5134         link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5135
5136         fdi_dotclock = adjusted_mode->crtc_clock;
5137
5138         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5139                                            pipe_config->pipe_bpp);
5140
5141         pipe_config->fdi_lanes = lane;
5142
5143         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5144                                link_bw, &pipe_config->fdi_m_n);
5145
5146         setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5147                                             intel_crtc->pipe, pipe_config);
5148         if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5149                 pipe_config->pipe_bpp -= 2*3;
5150                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5151                               pipe_config->pipe_bpp);
5152                 needs_recompute = true;
5153                 pipe_config->bw_constrained = true;
5154
5155                 goto retry;
5156         }
5157
5158         if (needs_recompute)
5159                 return RETRY;
5160
5161         return setup_ok ? 0 : -EINVAL;
5162 }
5163
5164 static void hsw_compute_ips_config(struct intel_crtc *crtc,
5165                                    struct intel_crtc_config *pipe_config)
5166 {
5167         pipe_config->ips_enabled = i915.enable_ips &&
5168                                    hsw_crtc_supports_ips(crtc) &&
5169                                    pipe_config->pipe_bpp <= 24;
5170 }
5171
5172 static int intel_crtc_compute_config(struct intel_crtc *crtc,
5173                                      struct intel_crtc_config *pipe_config)
5174 {
5175         struct drm_device *dev = crtc->base.dev;
5176         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5177
5178         /* FIXME should check pixel clock limits on all platforms */
5179         if (INTEL_INFO(dev)->gen < 4) {
5180                 struct drm_i915_private *dev_priv = dev->dev_private;
5181                 int clock_limit =
5182                         dev_priv->display.get_display_clock_speed(dev);
5183
5184                 /*
5185                  * Enable pixel doubling when the dot clock
5186                  * is > 90% of the (display) core speed.
5187                  *
5188                  * GDG double wide on either pipe,
5189                  * otherwise pipe A only.
5190                  */
5191                 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5192                     adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5193                         clock_limit *= 2;
5194                         pipe_config->double_wide = true;
5195                 }
5196
5197                 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5198                         return -EINVAL;
5199         }
5200
5201         /*
5202          * Pipe horizontal size must be even in:
5203          * - DVO ganged mode
5204          * - LVDS dual channel mode
5205          * - Double wide pipe
5206          */
5207         if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5208              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5209                 pipe_config->pipe_src_w &= ~1;
5210
5211         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
5212          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5213          */
5214         if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5215                 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5216                 return -EINVAL;
5217
5218         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5219                 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5220         } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5221                 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
5222                  * for lvds. */
5223                 pipe_config->pipe_bpp = 8*3;
5224         }
5225
5226         if (HAS_IPS(dev))
5227                 hsw_compute_ips_config(crtc, pipe_config);
5228
5229         /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
5230          * clock survives for now. */
5231         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5232                 pipe_config->shared_dpll = crtc->config.shared_dpll;
5233
5234         if (pipe_config->has_pch_encoder)
5235                 return ironlake_fdi_compute_config(crtc, pipe_config);
5236
5237         return 0;
5238 }
5239
5240 static int valleyview_get_display_clock_speed(struct drm_device *dev)
5241 {
5242         struct drm_i915_private *dev_priv = dev->dev_private;
5243         int vco = valleyview_get_vco(dev_priv);
5244         u32 val;
5245         int divider;
5246
5247         mutex_lock(&dev_priv->dpio_lock);
5248         val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5249         mutex_unlock(&dev_priv->dpio_lock);
5250
5251         divider = val & DISPLAY_FREQUENCY_VALUES;
5252
5253         WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5254              (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5255              "cdclk change in progress\n");
5256
5257         return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
5258 }
5259
5260 static int i945_get_display_clock_speed(struct drm_device *dev)
5261 {
5262         return 400000;
5263 }
5264
5265 static int i915_get_display_clock_speed(struct drm_device *dev)
5266 {
5267         return 333000;
5268 }
5269
5270 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5271 {
5272         return 200000;
5273 }
5274
5275 static int pnv_get_display_clock_speed(struct drm_device *dev)
5276 {
5277         u16 gcfgc = 0;
5278
5279         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5280
5281         switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5282         case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5283                 return 267000;
5284         case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5285                 return 333000;
5286         case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5287                 return 444000;
5288         case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5289                 return 200000;
5290         default:
5291                 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5292         case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5293                 return 133000;
5294         case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5295                 return 167000;
5296         }
5297 }
5298
5299 static int i915gm_get_display_clock_speed(struct drm_device *dev)
5300 {
5301         u16 gcfgc = 0;
5302
5303         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5304
5305         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5306                 return 133000;
5307         else {
5308                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5309                 case GC_DISPLAY_CLOCK_333_MHZ:
5310                         return 333000;
5311                 default:
5312                 case GC_DISPLAY_CLOCK_190_200_MHZ:
5313                         return 190000;
5314                 }
5315         }
5316 }
5317
5318 static int i865_get_display_clock_speed(struct drm_device *dev)
5319 {
5320         return 266000;
5321 }
5322
5323 static int i855_get_display_clock_speed(struct drm_device *dev)
5324 {
5325         u16 hpllcc = 0;
5326         /* Assume that the hardware is in the high speed state.  This
5327          * should be the default.
5328          */
5329         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5330         case GC_CLOCK_133_200:
5331         case GC_CLOCK_100_200:
5332                 return 200000;
5333         case GC_CLOCK_166_250:
5334                 return 250000;
5335         case GC_CLOCK_100_133:
5336                 return 133000;
5337         }
5338
5339         /* Shouldn't happen */
5340         return 0;
5341 }
5342
5343 static int i830_get_display_clock_speed(struct drm_device *dev)
5344 {
5345         return 133000;
5346 }
5347
5348 static void
5349 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5350 {
5351         while (*num > DATA_LINK_M_N_MASK ||
5352                *den > DATA_LINK_M_N_MASK) {
5353                 *num >>= 1;
5354                 *den >>= 1;
5355         }
5356 }
5357
5358 static void compute_m_n(unsigned int m, unsigned int n,
5359                         uint32_t *ret_m, uint32_t *ret_n)
5360 {
5361         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5362         *ret_m = div_u64((uint64_t) m * *ret_n, n);
5363         intel_reduce_m_n_ratio(ret_m, ret_n);
5364 }
5365
5366 void
5367 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5368                        int pixel_clock, int link_clock,
5369                        struct intel_link_m_n *m_n)
5370 {
5371         m_n->tu = 64;
5372
5373         compute_m_n(bits_per_pixel * pixel_clock,
5374                     link_clock * nlanes * 8,
5375                     &m_n->gmch_m, &m_n->gmch_n);
5376
5377         compute_m_n(pixel_clock, link_clock,
5378                     &m_n->link_m, &m_n->link_n);
5379 }
5380
5381 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5382 {
5383         if (i915.panel_use_ssc >= 0)
5384                 return i915.panel_use_ssc != 0;
5385         return dev_priv->vbt.lvds_use_ssc
5386                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5387 }
5388
5389 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5390 {
5391         struct drm_device *dev = crtc->dev;
5392         struct drm_i915_private *dev_priv = dev->dev_private;
5393         int refclk;
5394
5395         if (IS_VALLEYVIEW(dev)) {
5396                 refclk = 100000;
5397         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5398             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5399                 refclk = dev_priv->vbt.lvds_ssc_freq;
5400                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5401         } else if (!IS_GEN2(dev)) {
5402                 refclk = 96000;
5403         } else {
5404                 refclk = 48000;
5405         }
5406
5407         return refclk;
5408 }
5409
5410 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5411 {
5412         return (1 << dpll->n) << 16 | dpll->m2;
5413 }
5414
5415 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5416 {
5417         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5418 }
5419
5420 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5421                                      intel_clock_t *reduced_clock)
5422 {
5423         struct drm_device *dev = crtc->base.dev;
5424         u32 fp, fp2 = 0;
5425
5426         if (IS_PINEVIEW(dev)) {
5427                 fp = pnv_dpll_compute_fp(&crtc->config.dpll);
5428                 if (reduced_clock)
5429                         fp2 = pnv_dpll_compute_fp(reduced_clock);
5430         } else {
5431                 fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
5432                 if (reduced_clock)
5433                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
5434         }
5435
5436         crtc->config.dpll_hw_state.fp0 = fp;
5437
5438         crtc->lowfreq_avail = false;
5439         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5440             reduced_clock && i915.powersave) {
5441                 crtc->config.dpll_hw_state.fp1 = fp2;
5442                 crtc->lowfreq_avail = true;
5443         } else {
5444                 crtc->config.dpll_hw_state.fp1 = fp;
5445         }
5446 }
5447
5448 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5449                 pipe)
5450 {
5451         u32 reg_val;
5452
5453         /*
5454          * PLLB opamp always calibrates to max value of 0x3f, force enable it
5455          * and set it to a reasonable value instead.
5456          */
5457         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5458         reg_val &= 0xffffff00;
5459         reg_val |= 0x00000030;
5460         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5461
5462         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5463         reg_val &= 0x8cffffff;
5464         reg_val = 0x8c000000;
5465         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5466
5467         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5468         reg_val &= 0xffffff00;
5469         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5470
5471         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5472         reg_val &= 0x00ffffff;
5473         reg_val |= 0xb0000000;
5474         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5475 }
5476
5477 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5478                                          struct intel_link_m_n *m_n)
5479 {
5480         struct drm_device *dev = crtc->base.dev;
5481         struct drm_i915_private *dev_priv = dev->dev_private;
5482         int pipe = crtc->pipe;
5483
5484         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5485         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5486         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5487         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5488 }
5489
5490 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5491                                          struct intel_link_m_n *m_n)
5492 {
5493         struct drm_device *dev = crtc->base.dev;
5494         struct drm_i915_private *dev_priv = dev->dev_private;
5495         int pipe = crtc->pipe;
5496         enum transcoder transcoder = crtc->config.cpu_transcoder;
5497
5498         if (INTEL_INFO(dev)->gen >= 5) {
5499                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5500                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5501                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5502                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5503         } else {
5504                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5505                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5506                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5507                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5508         }
5509 }
5510
5511 static void intel_dp_set_m_n(struct intel_crtc *crtc)
5512 {
5513         if (crtc->config.has_pch_encoder)
5514                 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5515         else
5516                 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5517 }
5518
5519 static void vlv_update_pll(struct intel_crtc *crtc)
5520 {
5521         u32 dpll, dpll_md;
5522
5523         /*
5524          * Enable DPIO clock input. We should never disable the reference
5525          * clock for pipe B, since VGA hotplug / manual detection depends
5526          * on it.
5527          */
5528         dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5529                 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5530         /* We should never disable this, set it here for state tracking */
5531         if (crtc->pipe == PIPE_B)
5532                 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5533         dpll |= DPLL_VCO_ENABLE;
5534         crtc->config.dpll_hw_state.dpll = dpll;
5535
5536         dpll_md = (crtc->config.pixel_multiplier - 1)
5537                 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5538         crtc->config.dpll_hw_state.dpll_md = dpll_md;
5539 }
5540
5541 static void vlv_prepare_pll(struct intel_crtc *crtc)
5542 {
5543         struct drm_device *dev = crtc->base.dev;
5544         struct drm_i915_private *dev_priv = dev->dev_private;
5545         int pipe = crtc->pipe;
5546         u32 mdiv;
5547         u32 bestn, bestm1, bestm2, bestp1, bestp2;
5548         u32 coreclk, reg_val;
5549
5550         mutex_lock(&dev_priv->dpio_lock);
5551
5552         bestn = crtc->config.dpll.n;
5553         bestm1 = crtc->config.dpll.m1;
5554         bestm2 = crtc->config.dpll.m2;
5555         bestp1 = crtc->config.dpll.p1;
5556         bestp2 = crtc->config.dpll.p2;
5557
5558         /* See eDP HDMI DPIO driver vbios notes doc */
5559
5560         /* PLL B needs special handling */
5561         if (pipe == PIPE_B)
5562                 vlv_pllb_recal_opamp(dev_priv, pipe);
5563
5564         /* Set up Tx target for periodic Rcomp update */
5565         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5566
5567         /* Disable target IRef on PLL */
5568         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5569         reg_val &= 0x00ffffff;
5570         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5571
5572         /* Disable fast lock */
5573         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5574
5575         /* Set idtafcrecal before PLL is enabled */
5576         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5577         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5578         mdiv |= ((bestn << DPIO_N_SHIFT));
5579         mdiv |= (1 << DPIO_K_SHIFT);
5580
5581         /*
5582          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5583          * but we don't support that).
5584          * Note: don't use the DAC post divider as it seems unstable.
5585          */
5586         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5587         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5588
5589         mdiv |= DPIO_ENABLE_CALIBRATION;
5590         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5591
5592         /* Set HBR and RBR LPF coefficients */
5593         if (crtc->config.port_clock == 162000 ||
5594             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
5595             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
5596                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5597                                  0x009f0003);
5598         else
5599                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5600                                  0x00d0000f);
5601
5602         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5603             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5604                 /* Use SSC source */
5605                 if (pipe == PIPE_A)
5606                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5607                                          0x0df40000);
5608                 else
5609                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5610                                          0x0df70000);
5611         } else { /* HDMI or VGA */
5612                 /* Use bend source */
5613                 if (pipe == PIPE_A)
5614                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5615                                          0x0df70000);
5616                 else
5617                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5618                                          0x0df40000);
5619         }
5620
5621         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5622         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5623         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5624             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5625                 coreclk |= 0x01000000;
5626         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5627
5628         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5629         mutex_unlock(&dev_priv->dpio_lock);
5630 }
5631
5632 static void chv_update_pll(struct intel_crtc *crtc)
5633 {
5634         struct drm_device *dev = crtc->base.dev;
5635         struct drm_i915_private *dev_priv = dev->dev_private;
5636         int pipe = crtc->pipe;
5637         int dpll_reg = DPLL(crtc->pipe);
5638         enum dpio_channel port = vlv_pipe_to_channel(pipe);
5639         u32 loopfilter, intcoeff;
5640         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5641         int refclk;
5642
5643         crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5644                 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5645                 DPLL_VCO_ENABLE;
5646         if (pipe != PIPE_A)
5647                 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5648
5649         crtc->config.dpll_hw_state.dpll_md =
5650                 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5651
5652         bestn = crtc->config.dpll.n;
5653         bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5654         bestm1 = crtc->config.dpll.m1;
5655         bestm2 = crtc->config.dpll.m2 >> 22;
5656         bestp1 = crtc->config.dpll.p1;
5657         bestp2 = crtc->config.dpll.p2;
5658
5659         /*
5660          * Enable Refclk and SSC
5661          */
5662         I915_WRITE(dpll_reg,
5663                    crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5664
5665         mutex_lock(&dev_priv->dpio_lock);
5666
5667         /* p1 and p2 divider */
5668         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5669                         5 << DPIO_CHV_S1_DIV_SHIFT |
5670                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5671                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5672                         1 << DPIO_CHV_K_DIV_SHIFT);
5673
5674         /* Feedback post-divider - m2 */
5675         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5676
5677         /* Feedback refclk divider - n and m1 */
5678         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5679                         DPIO_CHV_M1_DIV_BY_2 |
5680                         1 << DPIO_CHV_N_DIV_SHIFT);
5681
5682         /* M2 fraction division */
5683         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5684
5685         /* M2 fraction division enable */
5686         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5687                        DPIO_CHV_FRAC_DIV_EN |
5688                        (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5689
5690         /* Loop filter */
5691         refclk = i9xx_get_refclk(&crtc->base, 0);
5692         loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5693                 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5694         if (refclk == 100000)
5695                 intcoeff = 11;
5696         else if (refclk == 38400)
5697                 intcoeff = 10;
5698         else
5699                 intcoeff = 9;
5700         loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5701         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5702
5703         /* AFC Recal */
5704         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5705                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5706                         DPIO_AFC_RECAL);
5707
5708         mutex_unlock(&dev_priv->dpio_lock);
5709 }
5710
5711 static void i9xx_update_pll(struct intel_crtc *crtc,
5712                             intel_clock_t *reduced_clock,
5713                             int num_connectors)
5714 {
5715         struct drm_device *dev = crtc->base.dev;
5716         struct drm_i915_private *dev_priv = dev->dev_private;
5717         u32 dpll;
5718         bool is_sdvo;
5719         struct dpll *clock = &crtc->config.dpll;
5720
5721         i9xx_update_pll_dividers(crtc, reduced_clock);
5722
5723         is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5724                 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5725
5726         dpll = DPLL_VGA_MODE_DIS;
5727
5728         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5729                 dpll |= DPLLB_MODE_LVDS;
5730         else
5731                 dpll |= DPLLB_MODE_DAC_SERIAL;
5732
5733         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5734                 dpll |= (crtc->config.pixel_multiplier - 1)
5735                         << SDVO_MULTIPLIER_SHIFT_HIRES;
5736         }
5737
5738         if (is_sdvo)
5739                 dpll |= DPLL_SDVO_HIGH_SPEED;
5740
5741         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5742                 dpll |= DPLL_SDVO_HIGH_SPEED;
5743
5744         /* compute bitmask from p1 value */
5745         if (IS_PINEVIEW(dev))
5746                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5747         else {
5748                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5749                 if (IS_G4X(dev) && reduced_clock)
5750                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5751         }
5752         switch (clock->p2) {
5753         case 5:
5754                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5755                 break;
5756         case 7:
5757                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5758                 break;
5759         case 10:
5760                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5761                 break;
5762         case 14:
5763                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5764                 break;
5765         }
5766         if (INTEL_INFO(dev)->gen >= 4)
5767                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5768
5769         if (crtc->config.sdvo_tv_clock)
5770                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5771         else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5772                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5773                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5774         else
5775                 dpll |= PLL_REF_INPUT_DREFCLK;
5776
5777         dpll |= DPLL_VCO_ENABLE;
5778         crtc->config.dpll_hw_state.dpll = dpll;
5779
5780         if (INTEL_INFO(dev)->gen >= 4) {
5781                 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5782                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5783                 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5784         }
5785 }
5786
5787 static void i8xx_update_pll(struct intel_crtc *crtc,
5788                             intel_clock_t *reduced_clock,
5789                             int num_connectors)
5790 {
5791         struct drm_device *dev = crtc->base.dev;
5792         struct drm_i915_private *dev_priv = dev->dev_private;
5793         u32 dpll;
5794         struct dpll *clock = &crtc->config.dpll;
5795
5796         i9xx_update_pll_dividers(crtc, reduced_clock);
5797
5798         dpll = DPLL_VGA_MODE_DIS;
5799
5800         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5801                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5802         } else {
5803                 if (clock->p1 == 2)
5804                         dpll |= PLL_P1_DIVIDE_BY_TWO;
5805                 else
5806                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5807                 if (clock->p2 == 4)
5808                         dpll |= PLL_P2_DIVIDE_BY_4;
5809         }
5810
5811         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5812                 dpll |= DPLL_DVO_2X_MODE;
5813
5814         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5815                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5816                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5817         else
5818                 dpll |= PLL_REF_INPUT_DREFCLK;
5819
5820         dpll |= DPLL_VCO_ENABLE;
5821         crtc->config.dpll_hw_state.dpll = dpll;
5822 }
5823
5824 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5825 {
5826         struct drm_device *dev = intel_crtc->base.dev;
5827         struct drm_i915_private *dev_priv = dev->dev_private;
5828         enum pipe pipe = intel_crtc->pipe;
5829         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5830         struct drm_display_mode *adjusted_mode =
5831                 &intel_crtc->config.adjusted_mode;
5832         uint32_t crtc_vtotal, crtc_vblank_end;
5833         int vsyncshift = 0;
5834
5835         /* We need to be careful not to changed the adjusted mode, for otherwise
5836          * the hw state checker will get angry at the mismatch. */
5837         crtc_vtotal = adjusted_mode->crtc_vtotal;
5838         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5839
5840         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5841                 /* the chip adds 2 halflines automatically */
5842                 crtc_vtotal -= 1;
5843                 crtc_vblank_end -= 1;
5844
5845                 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5846                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5847                 else
5848                         vsyncshift = adjusted_mode->crtc_hsync_start -
5849                                 adjusted_mode->crtc_htotal / 2;
5850                 if (vsyncshift < 0)
5851                         vsyncshift += adjusted_mode->crtc_htotal;
5852         }
5853
5854         if (INTEL_INFO(dev)->gen > 3)
5855                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5856
5857         I915_WRITE(HTOTAL(cpu_transcoder),
5858                    (adjusted_mode->crtc_hdisplay - 1) |
5859                    ((adjusted_mode->crtc_htotal - 1) << 16));
5860         I915_WRITE(HBLANK(cpu_transcoder),
5861                    (adjusted_mode->crtc_hblank_start - 1) |
5862                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5863         I915_WRITE(HSYNC(cpu_transcoder),
5864                    (adjusted_mode->crtc_hsync_start - 1) |
5865                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5866
5867         I915_WRITE(VTOTAL(cpu_transcoder),
5868                    (adjusted_mode->crtc_vdisplay - 1) |
5869                    ((crtc_vtotal - 1) << 16));
5870         I915_WRITE(VBLANK(cpu_transcoder),
5871                    (adjusted_mode->crtc_vblank_start - 1) |
5872                    ((crtc_vblank_end - 1) << 16));
5873         I915_WRITE(VSYNC(cpu_transcoder),
5874                    (adjusted_mode->crtc_vsync_start - 1) |
5875                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5876
5877         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5878          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5879          * documented on the DDI_FUNC_CTL register description, EDP Input Select
5880          * bits. */
5881         if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5882             (pipe == PIPE_B || pipe == PIPE_C))
5883                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5884
5885         /* pipesrc controls the size that is scaled from, which should
5886          * always be the user's requested size.
5887          */
5888         I915_WRITE(PIPESRC(pipe),
5889                    ((intel_crtc->config.pipe_src_w - 1) << 16) |
5890                    (intel_crtc->config.pipe_src_h - 1));
5891 }
5892
5893 static void intel_get_pipe_timings(struct intel_crtc *crtc,
5894                                    struct intel_crtc_config *pipe_config)
5895 {
5896         struct drm_device *dev = crtc->base.dev;
5897         struct drm_i915_private *dev_priv = dev->dev_private;
5898         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5899         uint32_t tmp;
5900
5901         tmp = I915_READ(HTOTAL(cpu_transcoder));
5902         pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5903         pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5904         tmp = I915_READ(HBLANK(cpu_transcoder));
5905         pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5906         pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5907         tmp = I915_READ(HSYNC(cpu_transcoder));
5908         pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5909         pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5910
5911         tmp = I915_READ(VTOTAL(cpu_transcoder));
5912         pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5913         pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5914         tmp = I915_READ(VBLANK(cpu_transcoder));
5915         pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5916         pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5917         tmp = I915_READ(VSYNC(cpu_transcoder));
5918         pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5919         pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5920
5921         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5922                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5923                 pipe_config->adjusted_mode.crtc_vtotal += 1;
5924                 pipe_config->adjusted_mode.crtc_vblank_end += 1;
5925         }
5926
5927         tmp = I915_READ(PIPESRC(crtc->pipe));
5928         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5929         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5930
5931         pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5932         pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5933 }
5934
5935 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5936                                  struct intel_crtc_config *pipe_config)
5937 {
5938         mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5939         mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5940         mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5941         mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5942
5943         mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5944         mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5945         mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5946         mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5947
5948         mode->flags = pipe_config->adjusted_mode.flags;
5949
5950         mode->clock = pipe_config->adjusted_mode.crtc_clock;
5951         mode->flags |= pipe_config->adjusted_mode.flags;
5952 }
5953
5954 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5955 {
5956         struct drm_device *dev = intel_crtc->base.dev;
5957         struct drm_i915_private *dev_priv = dev->dev_private;
5958         uint32_t pipeconf;
5959
5960         pipeconf = 0;
5961
5962         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5963             I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5964                 pipeconf |= PIPECONF_ENABLE;
5965
5966         if (intel_crtc->config.double_wide)
5967                 pipeconf |= PIPECONF_DOUBLE_WIDE;
5968
5969         /* only g4x and later have fancy bpc/dither controls */
5970         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5971                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
5972                 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
5973                         pipeconf |= PIPECONF_DITHER_EN |
5974                                     PIPECONF_DITHER_TYPE_SP;
5975
5976                 switch (intel_crtc->config.pipe_bpp) {
5977                 case 18:
5978                         pipeconf |= PIPECONF_6BPC;
5979                         break;
5980                 case 24:
5981                         pipeconf |= PIPECONF_8BPC;
5982                         break;
5983                 case 30:
5984                         pipeconf |= PIPECONF_10BPC;
5985                         break;
5986                 default:
5987                         /* Case prevented by intel_choose_pipe_bpp_dither. */
5988                         BUG();
5989                 }
5990         }
5991
5992         if (HAS_PIPE_CXSR(dev)) {
5993                 if (intel_crtc->lowfreq_avail) {
5994                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5995                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5996                 } else {
5997                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5998                 }
5999         }
6000
6001         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6002                 if (INTEL_INFO(dev)->gen < 4 ||
6003                     intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
6004                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6005                 else
6006                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6007         } else
6008                 pipeconf |= PIPECONF_PROGRESSIVE;
6009
6010         if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
6011                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6012
6013         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6014         POSTING_READ(PIPECONF(intel_crtc->pipe));
6015 }
6016
6017 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6018                               int x, int y,
6019                               struct drm_framebuffer *fb)
6020 {
6021         struct drm_device *dev = crtc->dev;
6022         struct drm_i915_private *dev_priv = dev->dev_private;
6023         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6024         int refclk, num_connectors = 0;
6025         intel_clock_t clock, reduced_clock;
6026         bool ok, has_reduced_clock = false;
6027         bool is_lvds = false, is_dsi = false;
6028         struct intel_encoder *encoder;
6029         const intel_limit_t *limit;
6030
6031         for_each_encoder_on_crtc(dev, crtc, encoder) {
6032                 switch (encoder->type) {
6033                 case INTEL_OUTPUT_LVDS:
6034                         is_lvds = true;
6035                         break;
6036                 case INTEL_OUTPUT_DSI:
6037                         is_dsi = true;
6038                         break;
6039                 }
6040
6041                 num_connectors++;
6042         }
6043
6044         if (is_dsi)
6045                 return 0;
6046
6047         if (!intel_crtc->config.clock_set) {
6048                 refclk = i9xx_get_refclk(crtc, num_connectors);
6049
6050                 /*
6051                  * Returns a set of divisors for the desired target clock with
6052                  * the given refclk, or FALSE.  The returned values represent
6053                  * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6054                  * 2) / p1 / p2.
6055                  */
6056                 limit = intel_limit(crtc, refclk);
6057                 ok = dev_priv->display.find_dpll(limit, crtc,
6058                                                  intel_crtc->config.port_clock,
6059                                                  refclk, NULL, &clock);
6060                 if (!ok) {
6061                         DRM_ERROR("Couldn't find PLL settings for mode!\n");
6062                         return -EINVAL;
6063                 }
6064
6065                 if (is_lvds && dev_priv->lvds_downclock_avail) {
6066                         /*
6067                          * Ensure we match the reduced clock's P to the target
6068                          * clock.  If the clocks don't match, we can't switch
6069                          * the display clock by using the FP0/FP1. In such case
6070                          * we will disable the LVDS downclock feature.
6071                          */
6072                         has_reduced_clock =
6073                                 dev_priv->display.find_dpll(limit, crtc,
6074                                                             dev_priv->lvds_downclock,
6075                                                             refclk, &clock,
6076                                                             &reduced_clock);
6077                 }
6078                 /* Compat-code for transition, will disappear. */
6079                 intel_crtc->config.dpll.n = clock.n;
6080                 intel_crtc->config.dpll.m1 = clock.m1;
6081                 intel_crtc->config.dpll.m2 = clock.m2;
6082                 intel_crtc->config.dpll.p1 = clock.p1;
6083                 intel_crtc->config.dpll.p2 = clock.p2;
6084         }
6085
6086         if (IS_GEN2(dev)) {
6087                 i8xx_update_pll(intel_crtc,
6088                                 has_reduced_clock ? &reduced_clock : NULL,
6089                                 num_connectors);
6090         } else if (IS_CHERRYVIEW(dev)) {
6091                 chv_update_pll(intel_crtc);
6092         } else if (IS_VALLEYVIEW(dev)) {
6093                 vlv_update_pll(intel_crtc);
6094         } else {
6095                 i9xx_update_pll(intel_crtc,
6096                                 has_reduced_clock ? &reduced_clock : NULL,
6097                                 num_connectors);
6098         }
6099
6100         return 0;
6101 }
6102
6103 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6104                                  struct intel_crtc_config *pipe_config)
6105 {
6106         struct drm_device *dev = crtc->base.dev;
6107         struct drm_i915_private *dev_priv = dev->dev_private;
6108         uint32_t tmp;
6109
6110         if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6111                 return;
6112
6113         tmp = I915_READ(PFIT_CONTROL);
6114         if (!(tmp & PFIT_ENABLE))
6115                 return;
6116
6117         /* Check whether the pfit is attached to our pipe. */
6118         if (INTEL_INFO(dev)->gen < 4) {
6119                 if (crtc->pipe != PIPE_B)
6120                         return;
6121         } else {
6122                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6123                         return;
6124         }
6125
6126         pipe_config->gmch_pfit.control = tmp;
6127         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6128         if (INTEL_INFO(dev)->gen < 5)
6129                 pipe_config->gmch_pfit.lvds_border_bits =
6130                         I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6131 }
6132
6133 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6134                                struct intel_crtc_config *pipe_config)
6135 {
6136         struct drm_device *dev = crtc->base.dev;
6137         struct drm_i915_private *dev_priv = dev->dev_private;
6138         int pipe = pipe_config->cpu_transcoder;
6139         intel_clock_t clock;
6140         u32 mdiv;
6141         int refclk = 100000;
6142
6143         mutex_lock(&dev_priv->dpio_lock);
6144         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6145         mutex_unlock(&dev_priv->dpio_lock);
6146
6147         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6148         clock.m2 = mdiv & DPIO_M2DIV_MASK;
6149         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6150         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6151         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6152
6153         vlv_clock(refclk, &clock);
6154
6155         /* clock.dot is the fast clock */
6156         pipe_config->port_clock = clock.dot / 5;
6157 }
6158
6159 static void i9xx_get_plane_config(struct intel_crtc *crtc,
6160                                   struct intel_plane_config *plane_config)
6161 {
6162         struct drm_device *dev = crtc->base.dev;
6163         struct drm_i915_private *dev_priv = dev->dev_private;
6164         u32 val, base, offset;
6165         int pipe = crtc->pipe, plane = crtc->plane;
6166         int fourcc, pixel_format;
6167         int aligned_height;
6168
6169         crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6170         if (!crtc->base.primary->fb) {
6171                 DRM_DEBUG_KMS("failed to alloc fb\n");
6172                 return;
6173         }
6174
6175         val = I915_READ(DSPCNTR(plane));
6176
6177         if (INTEL_INFO(dev)->gen >= 4)
6178                 if (val & DISPPLANE_TILED)
6179                         plane_config->tiled = true;
6180
6181         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6182         fourcc = intel_format_to_fourcc(pixel_format);
6183         crtc->base.primary->fb->pixel_format = fourcc;
6184         crtc->base.primary->fb->bits_per_pixel =
6185                 drm_format_plane_cpp(fourcc, 0) * 8;
6186
6187         if (INTEL_INFO(dev)->gen >= 4) {
6188                 if (plane_config->tiled)
6189                         offset = I915_READ(DSPTILEOFF(plane));
6190                 else
6191                         offset = I915_READ(DSPLINOFF(plane));
6192                 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6193         } else {
6194                 base = I915_READ(DSPADDR(plane));
6195         }
6196         plane_config->base = base;
6197
6198         val = I915_READ(PIPESRC(pipe));
6199         crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6200         crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6201
6202         val = I915_READ(DSPSTRIDE(pipe));
6203         crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
6204
6205         aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6206                                             plane_config->tiled);
6207
6208         plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
6209                                         aligned_height);
6210
6211         DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6212                       pipe, plane, crtc->base.primary->fb->width,
6213                       crtc->base.primary->fb->height,
6214                       crtc->base.primary->fb->bits_per_pixel, base,
6215                       crtc->base.primary->fb->pitches[0],
6216                       plane_config->size);
6217
6218 }
6219
6220 static void chv_crtc_clock_get(struct intel_crtc *crtc,
6221                                struct intel_crtc_config *pipe_config)
6222 {
6223         struct drm_device *dev = crtc->base.dev;
6224         struct drm_i915_private *dev_priv = dev->dev_private;
6225         int pipe = pipe_config->cpu_transcoder;
6226         enum dpio_channel port = vlv_pipe_to_channel(pipe);
6227         intel_clock_t clock;
6228         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6229         int refclk = 100000;
6230
6231         mutex_lock(&dev_priv->dpio_lock);
6232         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6233         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6234         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6235         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6236         mutex_unlock(&dev_priv->dpio_lock);
6237
6238         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6239         clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6240         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6241         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6242         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6243
6244         chv_clock(refclk, &clock);
6245
6246         /* clock.dot is the fast clock */
6247         pipe_config->port_clock = clock.dot / 5;
6248 }
6249
6250 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6251                                  struct intel_crtc_config *pipe_config)
6252 {
6253         struct drm_device *dev = crtc->base.dev;
6254         struct drm_i915_private *dev_priv = dev->dev_private;
6255         uint32_t tmp;
6256
6257         if (!intel_display_power_enabled(dev_priv,
6258                                          POWER_DOMAIN_PIPE(crtc->pipe)))
6259                 return false;
6260
6261         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6262         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6263
6264         tmp = I915_READ(PIPECONF(crtc->pipe));
6265         if (!(tmp & PIPECONF_ENABLE))
6266                 return false;
6267
6268         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6269                 switch (tmp & PIPECONF_BPC_MASK) {
6270                 case PIPECONF_6BPC:
6271                         pipe_config->pipe_bpp = 18;
6272                         break;
6273                 case PIPECONF_8BPC:
6274                         pipe_config->pipe_bpp = 24;
6275                         break;
6276                 case PIPECONF_10BPC:
6277                         pipe_config->pipe_bpp = 30;
6278                         break;
6279                 default:
6280                         break;
6281                 }
6282         }
6283
6284         if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6285                 pipe_config->limited_color_range = true;
6286
6287         if (INTEL_INFO(dev)->gen < 4)
6288                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6289
6290         intel_get_pipe_timings(crtc, pipe_config);
6291
6292         i9xx_get_pfit_config(crtc, pipe_config);
6293
6294         if (INTEL_INFO(dev)->gen >= 4) {
6295                 tmp = I915_READ(DPLL_MD(crtc->pipe));
6296                 pipe_config->pixel_multiplier =
6297                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6298                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6299                 pipe_config->dpll_hw_state.dpll_md = tmp;
6300         } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6301                 tmp = I915_READ(DPLL(crtc->pipe));
6302                 pipe_config->pixel_multiplier =
6303                         ((tmp & SDVO_MULTIPLIER_MASK)
6304                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6305         } else {
6306                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
6307                  * port and will be fixed up in the encoder->get_config
6308                  * function. */
6309                 pipe_config->pixel_multiplier = 1;
6310         }
6311         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6312         if (!IS_VALLEYVIEW(dev)) {
6313                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6314                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6315         } else {
6316                 /* Mask out read-only status bits. */
6317                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6318                                                      DPLL_PORTC_READY_MASK |
6319                                                      DPLL_PORTB_READY_MASK);
6320         }
6321
6322         if (IS_CHERRYVIEW(dev))
6323                 chv_crtc_clock_get(crtc, pipe_config);
6324         else if (IS_VALLEYVIEW(dev))
6325                 vlv_crtc_clock_get(crtc, pipe_config);
6326         else
6327                 i9xx_crtc_clock_get(crtc, pipe_config);
6328
6329         return true;
6330 }
6331
6332 static void ironlake_init_pch_refclk(struct drm_device *dev)
6333 {
6334         struct drm_i915_private *dev_priv = dev->dev_private;
6335         struct drm_mode_config *mode_config = &dev->mode_config;
6336         struct intel_encoder *encoder;
6337         u32 val, final;
6338         bool has_lvds = false;
6339         bool has_cpu_edp = false;
6340         bool has_panel = false;
6341         bool has_ck505 = false;
6342         bool can_ssc = false;
6343
6344         /* We need to take the global config into account */
6345         list_for_each_entry(encoder, &mode_config->encoder_list,
6346                             base.head) {
6347                 switch (encoder->type) {
6348                 case INTEL_OUTPUT_LVDS:
6349                         has_panel = true;
6350                         has_lvds = true;
6351                         break;
6352                 case INTEL_OUTPUT_EDP:
6353                         has_panel = true;
6354                         if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6355                                 has_cpu_edp = true;
6356                         break;
6357                 }
6358         }
6359
6360         if (HAS_PCH_IBX(dev)) {
6361                 has_ck505 = dev_priv->vbt.display_clock_mode;
6362                 can_ssc = has_ck505;
6363         } else {
6364                 has_ck505 = false;
6365                 can_ssc = true;
6366         }
6367
6368         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6369                       has_panel, has_lvds, has_ck505);
6370
6371         /* Ironlake: try to setup display ref clock before DPLL
6372          * enabling. This is only under driver's control after
6373          * PCH B stepping, previous chipset stepping should be
6374          * ignoring this setting.
6375          */
6376         val = I915_READ(PCH_DREF_CONTROL);
6377
6378         /* As we must carefully and slowly disable/enable each source in turn,
6379          * compute the final state we want first and check if we need to
6380          * make any changes at all.
6381          */
6382         final = val;
6383         final &= ~DREF_NONSPREAD_SOURCE_MASK;
6384         if (has_ck505)
6385                 final |= DREF_NONSPREAD_CK505_ENABLE;
6386         else
6387                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
6388
6389         final &= ~DREF_SSC_SOURCE_MASK;
6390         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6391         final &= ~DREF_SSC1_ENABLE;
6392
6393         if (has_panel) {
6394                 final |= DREF_SSC_SOURCE_ENABLE;
6395
6396                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
6397                         final |= DREF_SSC1_ENABLE;
6398
6399                 if (has_cpu_edp) {
6400                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
6401                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6402                         else
6403                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6404                 } else
6405                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6406         } else {
6407                 final |= DREF_SSC_SOURCE_DISABLE;
6408                 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6409         }
6410
6411         if (final == val)
6412                 return;
6413
6414         /* Always enable nonspread source */
6415         val &= ~DREF_NONSPREAD_SOURCE_MASK;
6416
6417         if (has_ck505)
6418                 val |= DREF_NONSPREAD_CK505_ENABLE;
6419         else
6420                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
6421
6422         if (has_panel) {
6423                 val &= ~DREF_SSC_SOURCE_MASK;
6424                 val |= DREF_SSC_SOURCE_ENABLE;
6425
6426                 /* SSC must be turned on before enabling the CPU output  */
6427                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6428                         DRM_DEBUG_KMS("Using SSC on panel\n");
6429                         val |= DREF_SSC1_ENABLE;
6430                 } else
6431                         val &= ~DREF_SSC1_ENABLE;
6432
6433                 /* Get SSC going before enabling the outputs */
6434                 I915_WRITE(PCH_DREF_CONTROL, val);
6435                 POSTING_READ(PCH_DREF_CONTROL);
6436                 udelay(200);
6437
6438                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6439
6440                 /* Enable CPU source on CPU attached eDP */
6441                 if (has_cpu_edp) {
6442                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6443                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
6444                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6445                         } else
6446                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6447                 } else
6448                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6449
6450                 I915_WRITE(PCH_DREF_CONTROL, val);
6451                 POSTING_READ(PCH_DREF_CONTROL);
6452                 udelay(200);
6453         } else {
6454                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
6455
6456                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6457
6458                 /* Turn off CPU output */
6459                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6460
6461                 I915_WRITE(PCH_DREF_CONTROL, val);
6462                 POSTING_READ(PCH_DREF_CONTROL);
6463                 udelay(200);
6464
6465                 /* Turn off the SSC source */
6466                 val &= ~DREF_SSC_SOURCE_MASK;
6467                 val |= DREF_SSC_SOURCE_DISABLE;
6468
6469                 /* Turn off SSC1 */
6470                 val &= ~DREF_SSC1_ENABLE;
6471
6472                 I915_WRITE(PCH_DREF_CONTROL, val);
6473                 POSTING_READ(PCH_DREF_CONTROL);
6474                 udelay(200);
6475         }
6476
6477         BUG_ON(val != final);
6478 }
6479
6480 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6481 {
6482         uint32_t tmp;
6483
6484         tmp = I915_READ(SOUTH_CHICKEN2);
6485         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6486         I915_WRITE(SOUTH_CHICKEN2, tmp);
6487
6488         if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6489                                FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6490                 DRM_ERROR("FDI mPHY reset assert timeout\n");
6491
6492         tmp = I915_READ(SOUTH_CHICKEN2);
6493         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6494         I915_WRITE(SOUTH_CHICKEN2, tmp);
6495
6496         if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
6497                                 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
6498                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
6499 }
6500
6501 /* WaMPhyProgramming:hsw */
6502 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
6503 {
6504         uint32_t tmp;
6505
6506         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
6507         tmp &= ~(0xFF << 24);
6508         tmp |= (0x12 << 24);
6509         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
6510
6511         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
6512         tmp |= (1 << 11);
6513         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
6514
6515         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
6516         tmp |= (1 << 11);
6517         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
6518
6519         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
6520         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6521         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
6522
6523         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
6524         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
6525         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
6526
6527         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
6528         tmp &= ~(7 << 13);
6529         tmp |= (5 << 13);
6530         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
6531
6532         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
6533         tmp &= ~(7 << 13);
6534         tmp |= (5 << 13);
6535         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
6536
6537         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
6538         tmp &= ~0xFF;
6539         tmp |= 0x1C;
6540         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
6541
6542         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
6543         tmp &= ~0xFF;
6544         tmp |= 0x1C;
6545         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
6546
6547         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
6548         tmp &= ~(0xFF << 16);
6549         tmp |= (0x1C << 16);
6550         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
6551
6552         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
6553         tmp &= ~(0xFF << 16);
6554         tmp |= (0x1C << 16);
6555         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
6556
6557         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
6558         tmp |= (1 << 27);
6559         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
6560
6561         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
6562         tmp |= (1 << 27);
6563         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
6564
6565         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
6566         tmp &= ~(0xF << 28);
6567         tmp |= (4 << 28);
6568         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
6569
6570         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
6571         tmp &= ~(0xF << 28);
6572         tmp |= (4 << 28);
6573         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
6574 }
6575
6576 /* Implements 3 different sequences from BSpec chapter "Display iCLK
6577  * Programming" based on the parameters passed:
6578  * - Sequence to enable CLKOUT_DP
6579  * - Sequence to enable CLKOUT_DP without spread
6580  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
6581  */
6582 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
6583                                  bool with_fdi)
6584 {
6585         struct drm_i915_private *dev_priv = dev->dev_private;
6586         uint32_t reg, tmp;
6587
6588         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
6589                 with_spread = true;
6590         if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
6591                  with_fdi, "LP PCH doesn't have FDI\n"))
6592                 with_fdi = false;
6593
6594         mutex_lock(&dev_priv->dpio_lock);
6595
6596         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6597         tmp &= ~SBI_SSCCTL_DISABLE;
6598         tmp |= SBI_SSCCTL_PATHALT;
6599         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6600
6601         udelay(24);
6602
6603         if (with_spread) {
6604                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6605                 tmp &= ~SBI_SSCCTL_PATHALT;
6606                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6607
6608                 if (with_fdi) {
6609                         lpt_reset_fdi_mphy(dev_priv);
6610                         lpt_program_fdi_mphy(dev_priv);
6611                 }
6612         }
6613
6614         reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6615                SBI_GEN0 : SBI_DBUFF0;
6616         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6617         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6618         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6619
6620         mutex_unlock(&dev_priv->dpio_lock);
6621 }
6622
6623 /* Sequence to disable CLKOUT_DP */
6624 static void lpt_disable_clkout_dp(struct drm_device *dev)
6625 {
6626         struct drm_i915_private *dev_priv = dev->dev_private;
6627         uint32_t reg, tmp;
6628
6629         mutex_lock(&dev_priv->dpio_lock);
6630
6631         reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
6632                SBI_GEN0 : SBI_DBUFF0;
6633         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
6634         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
6635         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
6636
6637         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
6638         if (!(tmp & SBI_SSCCTL_DISABLE)) {
6639                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
6640                         tmp |= SBI_SSCCTL_PATHALT;
6641                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6642                         udelay(32);
6643                 }
6644                 tmp |= SBI_SSCCTL_DISABLE;
6645                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
6646         }
6647
6648         mutex_unlock(&dev_priv->dpio_lock);
6649 }
6650
6651 static void lpt_init_pch_refclk(struct drm_device *dev)
6652 {
6653         struct drm_mode_config *mode_config = &dev->mode_config;
6654         struct intel_encoder *encoder;
6655         bool has_vga = false;
6656
6657         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
6658                 switch (encoder->type) {
6659                 case INTEL_OUTPUT_ANALOG:
6660                         has_vga = true;
6661                         break;
6662                 }
6663         }
6664
6665         if (has_vga)
6666                 lpt_enable_clkout_dp(dev, true, true);
6667         else
6668                 lpt_disable_clkout_dp(dev);
6669 }
6670
6671 /*
6672  * Initialize reference clocks when the driver loads
6673  */
6674 void intel_init_pch_refclk(struct drm_device *dev)
6675 {
6676         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
6677                 ironlake_init_pch_refclk(dev);
6678         else if (HAS_PCH_LPT(dev))
6679                 lpt_init_pch_refclk(dev);
6680 }
6681
6682 static int ironlake_get_refclk(struct drm_crtc *crtc)
6683 {
6684         struct drm_device *dev = crtc->dev;
6685         struct drm_i915_private *dev_priv = dev->dev_private;
6686         struct intel_encoder *encoder;
6687         int num_connectors = 0;
6688         bool is_lvds = false;
6689
6690         for_each_encoder_on_crtc(dev, crtc, encoder) {
6691                 switch (encoder->type) {
6692                 case INTEL_OUTPUT_LVDS:
6693                         is_lvds = true;
6694                         break;
6695                 }
6696                 num_connectors++;
6697         }
6698
6699         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
6700                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
6701                               dev_priv->vbt.lvds_ssc_freq);
6702                 return dev_priv->vbt.lvds_ssc_freq;
6703         }
6704
6705         return 120000;
6706 }
6707
6708 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
6709 {
6710         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
6711         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6712         int pipe = intel_crtc->pipe;
6713         uint32_t val;
6714
6715         val = 0;
6716
6717         switch (intel_crtc->config.pipe_bpp) {
6718         case 18:
6719                 val |= PIPECONF_6BPC;
6720                 break;
6721         case 24:
6722                 val |= PIPECONF_8BPC;
6723                 break;
6724         case 30:
6725                 val |= PIPECONF_10BPC;
6726                 break;
6727         case 36:
6728                 val |= PIPECONF_12BPC;
6729                 break;
6730         default:
6731                 /* Case prevented by intel_choose_pipe_bpp_dither. */
6732                 BUG();
6733         }
6734
6735         if (intel_crtc->config.dither)
6736                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6737
6738         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6739                 val |= PIPECONF_INTERLACED_ILK;
6740         else
6741                 val |= PIPECONF_PROGRESSIVE;
6742
6743         if (intel_crtc->config.limited_color_range)
6744                 val |= PIPECONF_COLOR_RANGE_SELECT;
6745
6746         I915_WRITE(PIPECONF(pipe), val);
6747         POSTING_READ(PIPECONF(pipe));
6748 }
6749
6750 /*
6751  * Set up the pipe CSC unit.
6752  *
6753  * Currently only full range RGB to limited range RGB conversion
6754  * is supported, but eventually this should handle various
6755  * RGB<->YCbCr scenarios as well.
6756  */
6757 static void intel_set_pipe_csc(struct drm_crtc *crtc)
6758 {
6759         struct drm_device *dev = crtc->dev;
6760         struct drm_i915_private *dev_priv = dev->dev_private;
6761         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6762         int pipe = intel_crtc->pipe;
6763         uint16_t coeff = 0x7800; /* 1.0 */
6764
6765         /*
6766          * TODO: Check what kind of values actually come out of the pipe
6767          * with these coeff/postoff values and adjust to get the best
6768          * accuracy. Perhaps we even need to take the bpc value into
6769          * consideration.
6770          */
6771
6772         if (intel_crtc->config.limited_color_range)
6773                 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6774
6775         /*
6776          * GY/GU and RY/RU should be the other way around according
6777          * to BSpec, but reality doesn't agree. Just set them up in
6778          * a way that results in the correct picture.
6779          */
6780         I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6781         I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6782
6783         I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6784         I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6785
6786         I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6787         I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6788
6789         I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6790         I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6791         I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6792
6793         if (INTEL_INFO(dev)->gen > 6) {
6794                 uint16_t postoff = 0;
6795
6796                 if (intel_crtc->config.limited_color_range)
6797                         postoff = (16 * (1 << 12) / 255) & 0x1fff;
6798
6799                 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6800                 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6801                 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6802
6803                 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6804         } else {
6805                 uint32_t mode = CSC_MODE_YUV_TO_RGB;
6806
6807                 if (intel_crtc->config.limited_color_range)
6808                         mode |= CSC_BLACK_SCREEN_OFFSET;
6809
6810                 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6811         }
6812 }
6813
6814 static void haswell_set_pipeconf(struct drm_crtc *crtc)
6815 {
6816         struct drm_device *dev = crtc->dev;
6817         struct drm_i915_private *dev_priv = dev->dev_private;
6818         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6819         enum pipe pipe = intel_crtc->pipe;
6820         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6821         uint32_t val;
6822
6823         val = 0;
6824
6825         if (IS_HASWELL(dev) && intel_crtc->config.dither)
6826                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6827
6828         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6829                 val |= PIPECONF_INTERLACED_ILK;
6830         else
6831                 val |= PIPECONF_PROGRESSIVE;
6832
6833         I915_WRITE(PIPECONF(cpu_transcoder), val);
6834         POSTING_READ(PIPECONF(cpu_transcoder));
6835
6836         I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6837         POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6838
6839         if (IS_BROADWELL(dev)) {
6840                 val = 0;
6841
6842                 switch (intel_crtc->config.pipe_bpp) {
6843                 case 18:
6844                         val |= PIPEMISC_DITHER_6_BPC;
6845                         break;
6846                 case 24:
6847                         val |= PIPEMISC_DITHER_8_BPC;
6848                         break;
6849                 case 30:
6850                         val |= PIPEMISC_DITHER_10_BPC;
6851                         break;
6852                 case 36:
6853                         val |= PIPEMISC_DITHER_12_BPC;
6854                         break;
6855                 default:
6856                         /* Case prevented by pipe_config_set_bpp. */
6857                         BUG();
6858                 }
6859
6860                 if (intel_crtc->config.dither)
6861                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6862
6863                 I915_WRITE(PIPEMISC(pipe), val);
6864         }
6865 }
6866
6867 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6868                                     intel_clock_t *clock,
6869                                     bool *has_reduced_clock,
6870                                     intel_clock_t *reduced_clock)
6871 {
6872         struct drm_device *dev = crtc->dev;
6873         struct drm_i915_private *dev_priv = dev->dev_private;
6874         struct intel_encoder *intel_encoder;
6875         int refclk;
6876         const intel_limit_t *limit;
6877         bool ret, is_lvds = false;
6878
6879         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6880                 switch (intel_encoder->type) {
6881                 case INTEL_OUTPUT_LVDS:
6882                         is_lvds = true;
6883                         break;
6884                 }
6885         }
6886
6887         refclk = ironlake_get_refclk(crtc);
6888
6889         /*
6890          * Returns a set of divisors for the desired target clock with the given
6891          * refclk, or FALSE.  The returned values represent the clock equation:
6892          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6893          */
6894         limit = intel_limit(crtc, refclk);
6895         ret = dev_priv->display.find_dpll(limit, crtc,
6896                                           to_intel_crtc(crtc)->config.port_clock,
6897                                           refclk, NULL, clock);
6898         if (!ret)
6899                 return false;
6900
6901         if (is_lvds && dev_priv->lvds_downclock_avail) {
6902                 /*
6903                  * Ensure we match the reduced clock's P to the target clock.
6904                  * If the clocks don't match, we can't switch the display clock
6905                  * by using the FP0/FP1. In such case we will disable the LVDS
6906                  * downclock feature.
6907                 */
6908                 *has_reduced_clock =
6909                         dev_priv->display.find_dpll(limit, crtc,
6910                                                     dev_priv->lvds_downclock,
6911                                                     refclk, clock,
6912                                                     reduced_clock);
6913         }
6914
6915         return true;
6916 }
6917
6918 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6919 {
6920         /*
6921          * Account for spread spectrum to avoid
6922          * oversubscribing the link. Max center spread
6923          * is 2.5%; use 5% for safety's sake.
6924          */
6925         u32 bps = target_clock * bpp * 21 / 20;
6926         return DIV_ROUND_UP(bps, link_bw * 8);
6927 }
6928
6929 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6930 {
6931         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
6932 }
6933
6934 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6935                                       u32 *fp,
6936                                       intel_clock_t *reduced_clock, u32 *fp2)
6937 {
6938         struct drm_crtc *crtc = &intel_crtc->base;
6939         struct drm_device *dev = crtc->dev;
6940         struct drm_i915_private *dev_priv = dev->dev_private;
6941         struct intel_encoder *intel_encoder;
6942         uint32_t dpll;
6943         int factor, num_connectors = 0;
6944         bool is_lvds = false, is_sdvo = false;
6945
6946         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6947                 switch (intel_encoder->type) {
6948                 case INTEL_OUTPUT_LVDS:
6949                         is_lvds = true;
6950                         break;
6951                 case INTEL_OUTPUT_SDVO:
6952                 case INTEL_OUTPUT_HDMI:
6953                         is_sdvo = true;
6954                         break;
6955                 }
6956
6957                 num_connectors++;
6958         }
6959
6960         /* Enable autotuning of the PLL clock (if permissible) */
6961         factor = 21;
6962         if (is_lvds) {
6963                 if ((intel_panel_use_ssc(dev_priv) &&
6964                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
6965                     (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6966                         factor = 25;
6967         } else if (intel_crtc->config.sdvo_tv_clock)
6968                 factor = 20;
6969
6970         if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
6971                 *fp |= FP_CB_TUNE;
6972
6973         if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
6974                 *fp2 |= FP_CB_TUNE;
6975
6976         dpll = 0;
6977
6978         if (is_lvds)
6979                 dpll |= DPLLB_MODE_LVDS;
6980         else
6981                 dpll |= DPLLB_MODE_DAC_SERIAL;
6982
6983         dpll |= (intel_crtc->config.pixel_multiplier - 1)
6984                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
6985
6986         if (is_sdvo)
6987                 dpll |= DPLL_SDVO_HIGH_SPEED;
6988         if (intel_crtc->config.has_dp_encoder)
6989                 dpll |= DPLL_SDVO_HIGH_SPEED;
6990
6991         /* compute bitmask from p1 value */
6992         dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6993         /* also FPA1 */
6994         dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6995
6996         switch (intel_crtc->config.dpll.p2) {
6997         case 5:
6998                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6999                 break;
7000         case 7:
7001                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7002                 break;
7003         case 10:
7004                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7005                 break;
7006         case 14:
7007                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7008                 break;
7009         }
7010
7011         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7012                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7013         else
7014                 dpll |= PLL_REF_INPUT_DREFCLK;
7015
7016         return dpll | DPLL_VCO_ENABLE;
7017 }
7018
7019 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
7020                                   int x, int y,
7021                                   struct drm_framebuffer *fb)
7022 {
7023         struct drm_device *dev = crtc->dev;
7024         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7025         int num_connectors = 0;
7026         intel_clock_t clock, reduced_clock;
7027         u32 dpll = 0, fp = 0, fp2 = 0;
7028         bool ok, has_reduced_clock = false;
7029         bool is_lvds = false;
7030         struct intel_encoder *encoder;
7031         struct intel_shared_dpll *pll;
7032
7033         for_each_encoder_on_crtc(dev, crtc, encoder) {
7034                 switch (encoder->type) {
7035                 case INTEL_OUTPUT_LVDS:
7036                         is_lvds = true;
7037                         break;
7038                 }
7039
7040                 num_connectors++;
7041         }
7042
7043         WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7044              "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7045
7046         ok = ironlake_compute_clocks(crtc, &clock,
7047                                      &has_reduced_clock, &reduced_clock);
7048         if (!ok && !intel_crtc->config.clock_set) {
7049                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7050                 return -EINVAL;
7051         }
7052         /* Compat-code for transition, will disappear. */
7053         if (!intel_crtc->config.clock_set) {
7054                 intel_crtc->config.dpll.n = clock.n;
7055                 intel_crtc->config.dpll.m1 = clock.m1;
7056                 intel_crtc->config.dpll.m2 = clock.m2;
7057                 intel_crtc->config.dpll.p1 = clock.p1;
7058                 intel_crtc->config.dpll.p2 = clock.p2;
7059         }
7060
7061         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7062         if (intel_crtc->config.has_pch_encoder) {
7063                 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
7064                 if (has_reduced_clock)
7065                         fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7066
7067                 dpll = ironlake_compute_dpll(intel_crtc,
7068                                              &fp, &reduced_clock,
7069                                              has_reduced_clock ? &fp2 : NULL);
7070
7071                 intel_crtc->config.dpll_hw_state.dpll = dpll;
7072                 intel_crtc->config.dpll_hw_state.fp0 = fp;
7073                 if (has_reduced_clock)
7074                         intel_crtc->config.dpll_hw_state.fp1 = fp2;
7075                 else
7076                         intel_crtc->config.dpll_hw_state.fp1 = fp;
7077
7078                 pll = intel_get_shared_dpll(intel_crtc);
7079                 if (pll == NULL) {
7080                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7081                                          pipe_name(intel_crtc->pipe));
7082                         return -EINVAL;
7083                 }
7084         } else
7085                 intel_put_shared_dpll(intel_crtc);
7086
7087         if (is_lvds && has_reduced_clock && i915.powersave)
7088                 intel_crtc->lowfreq_avail = true;
7089         else
7090                 intel_crtc->lowfreq_avail = false;
7091
7092         return 0;
7093 }
7094
7095 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7096                                          struct intel_link_m_n *m_n)
7097 {
7098         struct drm_device *dev = crtc->base.dev;
7099         struct drm_i915_private *dev_priv = dev->dev_private;
7100         enum pipe pipe = crtc->pipe;
7101
7102         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
7103         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
7104         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
7105                 & ~TU_SIZE_MASK;
7106         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
7107         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
7108                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7109 }
7110
7111 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7112                                          enum transcoder transcoder,
7113                                          struct intel_link_m_n *m_n)
7114 {
7115         struct drm_device *dev = crtc->base.dev;
7116         struct drm_i915_private *dev_priv = dev->dev_private;
7117         enum pipe pipe = crtc->pipe;
7118
7119         if (INTEL_INFO(dev)->gen >= 5) {
7120                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
7121                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
7122                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
7123                         & ~TU_SIZE_MASK;
7124                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7125                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7126                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7127         } else {
7128                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7129                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7130                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
7131                         & ~TU_SIZE_MASK;
7132                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
7133                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
7134                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7135         }
7136 }
7137
7138 void intel_dp_get_m_n(struct intel_crtc *crtc,
7139                       struct intel_crtc_config *pipe_config)
7140 {
7141         if (crtc->config.has_pch_encoder)
7142                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7143         else
7144                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7145                                              &pipe_config->dp_m_n);
7146 }
7147
7148 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7149                                         struct intel_crtc_config *pipe_config)
7150 {
7151         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7152                                      &pipe_config->fdi_m_n);
7153 }
7154
7155 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7156                                      struct intel_crtc_config *pipe_config)
7157 {
7158         struct drm_device *dev = crtc->base.dev;
7159         struct drm_i915_private *dev_priv = dev->dev_private;
7160         uint32_t tmp;
7161
7162         tmp = I915_READ(PF_CTL(crtc->pipe));
7163
7164         if (tmp & PF_ENABLE) {
7165                 pipe_config->pch_pfit.enabled = true;
7166                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
7167                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
7168
7169                 /* We currently do not free assignements of panel fitters on
7170                  * ivb/hsw (since we don't use the higher upscaling modes which
7171                  * differentiates them) so just WARN about this case for now. */
7172                 if (IS_GEN7(dev)) {
7173                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
7174                                 PF_PIPE_SEL_IVB(crtc->pipe));
7175                 }
7176         }
7177 }
7178
7179 static void ironlake_get_plane_config(struct intel_crtc *crtc,
7180                                       struct intel_plane_config *plane_config)
7181 {
7182         struct drm_device *dev = crtc->base.dev;
7183         struct drm_i915_private *dev_priv = dev->dev_private;
7184         u32 val, base, offset;
7185         int pipe = crtc->pipe, plane = crtc->plane;
7186         int fourcc, pixel_format;
7187         int aligned_height;
7188
7189         crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
7190         if (!crtc->base.primary->fb) {
7191                 DRM_DEBUG_KMS("failed to alloc fb\n");
7192                 return;
7193         }
7194
7195         val = I915_READ(DSPCNTR(plane));
7196
7197         if (INTEL_INFO(dev)->gen >= 4)
7198                 if (val & DISPPLANE_TILED)
7199                         plane_config->tiled = true;
7200
7201         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7202         fourcc = intel_format_to_fourcc(pixel_format);
7203         crtc->base.primary->fb->pixel_format = fourcc;
7204         crtc->base.primary->fb->bits_per_pixel =
7205                 drm_format_plane_cpp(fourcc, 0) * 8;
7206
7207         base = I915_READ(DSPSURF(plane)) & 0xfffff000;
7208         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7209                 offset = I915_READ(DSPOFFSET(plane));
7210         } else {
7211                 if (plane_config->tiled)
7212                         offset = I915_READ(DSPTILEOFF(plane));
7213                 else
7214                         offset = I915_READ(DSPLINOFF(plane));
7215         }
7216         plane_config->base = base;
7217
7218         val = I915_READ(PIPESRC(pipe));
7219         crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
7220         crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7221
7222         val = I915_READ(DSPSTRIDE(pipe));
7223         crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
7224
7225         aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7226                                             plane_config->tiled);
7227
7228         plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
7229                                         aligned_height);
7230
7231         DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7232                       pipe, plane, crtc->base.primary->fb->width,
7233                       crtc->base.primary->fb->height,
7234                       crtc->base.primary->fb->bits_per_pixel, base,
7235                       crtc->base.primary->fb->pitches[0],
7236                       plane_config->size);
7237 }
7238
7239 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7240                                      struct intel_crtc_config *pipe_config)
7241 {
7242         struct drm_device *dev = crtc->base.dev;
7243         struct drm_i915_private *dev_priv = dev->dev_private;
7244         uint32_t tmp;
7245
7246         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7247         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7248
7249         tmp = I915_READ(PIPECONF(crtc->pipe));
7250         if (!(tmp & PIPECONF_ENABLE))
7251                 return false;
7252
7253         switch (tmp & PIPECONF_BPC_MASK) {
7254         case PIPECONF_6BPC:
7255                 pipe_config->pipe_bpp = 18;
7256                 break;
7257         case PIPECONF_8BPC:
7258                 pipe_config->pipe_bpp = 24;
7259                 break;
7260         case PIPECONF_10BPC:
7261                 pipe_config->pipe_bpp = 30;
7262                 break;
7263         case PIPECONF_12BPC:
7264                 pipe_config->pipe_bpp = 36;
7265                 break;
7266         default:
7267                 break;
7268         }
7269
7270         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
7271                 pipe_config->limited_color_range = true;
7272
7273         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
7274                 struct intel_shared_dpll *pll;
7275
7276                 pipe_config->has_pch_encoder = true;
7277
7278                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
7279                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7280                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
7281
7282                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7283
7284                 if (HAS_PCH_IBX(dev_priv->dev)) {
7285                         pipe_config->shared_dpll =
7286                                 (enum intel_dpll_id) crtc->pipe;
7287                 } else {
7288                         tmp = I915_READ(PCH_DPLL_SEL);
7289                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
7290                                 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
7291                         else
7292                                 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
7293                 }
7294
7295                 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
7296
7297                 WARN_ON(!pll->get_hw_state(dev_priv, pll,
7298                                            &pipe_config->dpll_hw_state));
7299
7300                 tmp = pipe_config->dpll_hw_state.dpll;
7301                 pipe_config->pixel_multiplier =
7302                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
7303                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
7304
7305                 ironlake_pch_clock_get(crtc, pipe_config);
7306         } else {
7307                 pipe_config->pixel_multiplier = 1;
7308         }
7309
7310         intel_get_pipe_timings(crtc, pipe_config);
7311
7312         ironlake_get_pfit_config(crtc, pipe_config);
7313
7314         return true;
7315 }
7316
7317 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
7318 {
7319         struct drm_device *dev = dev_priv->dev;
7320         struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
7321         struct intel_crtc *crtc;
7322
7323         for_each_intel_crtc(dev, crtc)
7324                 WARN(crtc->active, "CRTC for pipe %c enabled\n",
7325                      pipe_name(crtc->pipe));
7326
7327         WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
7328         WARN(plls->spll_refcount, "SPLL enabled\n");
7329         WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
7330         WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
7331         WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
7332         WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
7333              "CPU PWM1 enabled\n");
7334         WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
7335              "CPU PWM2 enabled\n");
7336         WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
7337              "PCH PWM1 enabled\n");
7338         WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
7339              "Utility pin enabled\n");
7340         WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
7341
7342         /*
7343          * In theory we can still leave IRQs enabled, as long as only the HPD
7344          * interrupts remain enabled. We used to check for that, but since it's
7345          * gen-specific and since we only disable LCPLL after we fully disable
7346          * the interrupts, the check below should be enough.
7347          */
7348         WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
7349 }
7350
7351 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
7352 {
7353         struct drm_device *dev = dev_priv->dev;
7354
7355         if (IS_HASWELL(dev)) {
7356                 mutex_lock(&dev_priv->rps.hw_lock);
7357                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
7358                                             val))
7359                         DRM_ERROR("Failed to disable D_COMP\n");
7360                 mutex_unlock(&dev_priv->rps.hw_lock);
7361         } else {
7362                 I915_WRITE(D_COMP, val);
7363         }
7364         POSTING_READ(D_COMP);
7365 }
7366
7367 /*
7368  * This function implements pieces of two sequences from BSpec:
7369  * - Sequence for display software to disable LCPLL
7370  * - Sequence for display software to allow package C8+
7371  * The steps implemented here are just the steps that actually touch the LCPLL
7372  * register. Callers should take care of disabling all the display engine
7373  * functions, doing the mode unset, fixing interrupts, etc.
7374  */
7375 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7376                               bool switch_to_fclk, bool allow_power_down)
7377 {
7378         uint32_t val;
7379
7380         assert_can_disable_lcpll(dev_priv);
7381
7382         val = I915_READ(LCPLL_CTL);
7383
7384         if (switch_to_fclk) {
7385                 val |= LCPLL_CD_SOURCE_FCLK;
7386                 I915_WRITE(LCPLL_CTL, val);
7387
7388                 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
7389                                        LCPLL_CD_SOURCE_FCLK_DONE, 1))
7390                         DRM_ERROR("Switching to FCLK failed\n");
7391
7392                 val = I915_READ(LCPLL_CTL);
7393         }
7394
7395         val |= LCPLL_PLL_DISABLE;
7396         I915_WRITE(LCPLL_CTL, val);
7397         POSTING_READ(LCPLL_CTL);
7398
7399         if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
7400                 DRM_ERROR("LCPLL still locked\n");
7401
7402         val = I915_READ(D_COMP);
7403         val |= D_COMP_COMP_DISABLE;
7404         hsw_write_dcomp(dev_priv, val);
7405         ndelay(100);
7406
7407         if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
7408                 DRM_ERROR("D_COMP RCOMP still in progress\n");
7409
7410         if (allow_power_down) {
7411                 val = I915_READ(LCPLL_CTL);
7412                 val |= LCPLL_POWER_DOWN_ALLOW;
7413                 I915_WRITE(LCPLL_CTL, val);
7414                 POSTING_READ(LCPLL_CTL);
7415         }
7416 }
7417
7418 /*
7419  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
7420  * source.
7421  */
7422 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7423 {
7424         uint32_t val;
7425         unsigned long irqflags;
7426
7427         val = I915_READ(LCPLL_CTL);
7428
7429         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
7430                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
7431                 return;
7432
7433         /*
7434          * Make sure we're not on PC8 state before disabling PC8, otherwise
7435          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
7436          *
7437          * The other problem is that hsw_restore_lcpll() is called as part of
7438          * the runtime PM resume sequence, so we can't just call
7439          * gen6_gt_force_wake_get() because that function calls
7440          * intel_runtime_pm_get(), and we can't change the runtime PM refcount
7441          * while we are on the resume sequence. So to solve this problem we have
7442          * to call special forcewake code that doesn't touch runtime PM and
7443          * doesn't enable the forcewake delayed work.
7444          */
7445         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7446         if (dev_priv->uncore.forcewake_count++ == 0)
7447                 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7448         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
7449
7450         if (val & LCPLL_POWER_DOWN_ALLOW) {
7451                 val &= ~LCPLL_POWER_DOWN_ALLOW;
7452                 I915_WRITE(LCPLL_CTL, val);
7453                 POSTING_READ(LCPLL_CTL);
7454         }
7455
7456         val = I915_READ(D_COMP);
7457         val |= D_COMP_COMP_FORCE;
7458         val &= ~D_COMP_COMP_DISABLE;
7459         hsw_write_dcomp(dev_priv, val);
7460
7461         val = I915_READ(LCPLL_CTL);
7462         val &= ~LCPLL_PLL_DISABLE;
7463         I915_WRITE(LCPLL_CTL, val);
7464
7465         if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
7466                 DRM_ERROR("LCPLL not locked yet\n");
7467
7468         if (val & LCPLL_CD_SOURCE_FCLK) {
7469                 val = I915_READ(LCPLL_CTL);
7470                 val &= ~LCPLL_CD_SOURCE_FCLK;
7471                 I915_WRITE(LCPLL_CTL, val);
7472
7473                 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
7474                                         LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
7475                         DRM_ERROR("Switching back to LCPLL failed\n");
7476         }
7477
7478         /* See the big comment above. */
7479         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
7480         if (--dev_priv->uncore.forcewake_count == 0)
7481                 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7482         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
7483 }
7484
7485 /*
7486  * Package states C8 and deeper are really deep PC states that can only be
7487  * reached when all the devices on the system allow it, so even if the graphics
7488  * device allows PC8+, it doesn't mean the system will actually get to these
7489  * states. Our driver only allows PC8+ when going into runtime PM.
7490  *
7491  * The requirements for PC8+ are that all the outputs are disabled, the power
7492  * well is disabled and most interrupts are disabled, and these are also
7493  * requirements for runtime PM. When these conditions are met, we manually do
7494  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
7495  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
7496  * hang the machine.
7497  *
7498  * When we really reach PC8 or deeper states (not just when we allow it) we lose
7499  * the state of some registers, so when we come back from PC8+ we need to
7500  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
7501  * need to take care of the registers kept by RC6. Notice that this happens even
7502  * if we don't put the device in PCI D3 state (which is what currently happens
7503  * because of the runtime PM support).
7504  *
7505  * For more, read "Display Sequences for Package C8" on the hardware
7506  * documentation.
7507  */
7508 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
7509 {
7510         struct drm_device *dev = dev_priv->dev;
7511         uint32_t val;
7512
7513         DRM_DEBUG_KMS("Enabling package C8+\n");
7514
7515         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7516                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
7517                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7518                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7519         }
7520
7521         lpt_disable_clkout_dp(dev);
7522         hsw_disable_lcpll(dev_priv, true, true);
7523 }
7524
7525 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7526 {
7527         struct drm_device *dev = dev_priv->dev;
7528         uint32_t val;
7529
7530         DRM_DEBUG_KMS("Disabling package C8+\n");
7531
7532         hsw_restore_lcpll(dev_priv);
7533         lpt_init_pch_refclk(dev);
7534
7535         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
7536                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
7537                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
7538                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7539         }
7540
7541         intel_prepare_ddi(dev);
7542 }
7543
7544 static void snb_modeset_global_resources(struct drm_device *dev)
7545 {
7546         modeset_update_crtc_power_domains(dev);
7547 }
7548
7549 static void haswell_modeset_global_resources(struct drm_device *dev)
7550 {
7551         modeset_update_crtc_power_domains(dev);
7552 }
7553
7554 static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7555                                  int x, int y,
7556                                  struct drm_framebuffer *fb)
7557 {
7558         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7559
7560         if (!intel_ddi_pll_select(intel_crtc))
7561                 return -EINVAL;
7562         intel_ddi_pll_enable(intel_crtc);
7563
7564         intel_crtc->lowfreq_avail = false;
7565
7566         return 0;
7567 }
7568
7569 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7570                                     struct intel_crtc_config *pipe_config)
7571 {
7572         struct drm_device *dev = crtc->base.dev;
7573         struct drm_i915_private *dev_priv = dev->dev_private;
7574         enum intel_display_power_domain pfit_domain;
7575         uint32_t tmp;
7576
7577         if (!intel_display_power_enabled(dev_priv,
7578                                          POWER_DOMAIN_PIPE(crtc->pipe)))
7579                 return false;
7580
7581         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7582         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
7583
7584         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
7585         if (tmp & TRANS_DDI_FUNC_ENABLE) {
7586                 enum pipe trans_edp_pipe;
7587                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
7588                 default:
7589                         WARN(1, "unknown pipe linked to edp transcoder\n");
7590                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
7591                 case TRANS_DDI_EDP_INPUT_A_ON:
7592                         trans_edp_pipe = PIPE_A;
7593                         break;
7594                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
7595                         trans_edp_pipe = PIPE_B;
7596                         break;
7597                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
7598                         trans_edp_pipe = PIPE_C;
7599                         break;
7600                 }
7601
7602                 if (trans_edp_pipe == crtc->pipe)
7603                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
7604         }
7605
7606         if (!intel_display_power_enabled(dev_priv,
7607                         POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7608                 return false;
7609
7610         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
7611         if (!(tmp & PIPECONF_ENABLE))
7612                 return false;
7613
7614         /*
7615          * Haswell has only FDI/PCH transcoder A. It is which is connected to
7616          * DDI E. So just check whether this pipe is wired to DDI E and whether
7617          * the PCH transcoder is on.
7618          */
7619         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7620         if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
7621             I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7622                 pipe_config->has_pch_encoder = true;
7623
7624                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7625                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7626                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
7627
7628                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
7629         }
7630
7631         intel_get_pipe_timings(crtc, pipe_config);
7632
7633         pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7634         if (intel_display_power_enabled(dev_priv, pfit_domain))
7635                 ironlake_get_pfit_config(crtc, pipe_config);
7636
7637         if (IS_HASWELL(dev))
7638                 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7639                         (I915_READ(IPS_CTL) & IPS_ENABLE);
7640
7641         pipe_config->pixel_multiplier = 1;
7642
7643         return true;
7644 }
7645
7646 static struct {
7647         int clock;
7648         u32 config;
7649 } hdmi_audio_clock[] = {
7650         { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7651         { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7652         { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7653         { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7654         { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7655         { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7656         { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7657         { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7658         { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7659         { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7660 };
7661
7662 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7663 static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7664 {
7665         int i;
7666
7667         for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7668                 if (mode->clock == hdmi_audio_clock[i].clock)
7669                         break;
7670         }
7671
7672         if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7673                 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7674                 i = 1;
7675         }
7676
7677         DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7678                       hdmi_audio_clock[i].clock,
7679                       hdmi_audio_clock[i].config);
7680
7681         return hdmi_audio_clock[i].config;
7682 }
7683
7684 static bool intel_eld_uptodate(struct drm_connector *connector,
7685                                int reg_eldv, uint32_t bits_eldv,
7686                                int reg_elda, uint32_t bits_elda,
7687                                int reg_edid)
7688 {
7689         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7690         uint8_t *eld = connector->eld;
7691         uint32_t i;
7692
7693         i = I915_READ(reg_eldv);
7694         i &= bits_eldv;
7695
7696         if (!eld[0])
7697                 return !i;
7698
7699         if (!i)
7700                 return false;
7701
7702         i = I915_READ(reg_elda);
7703         i &= ~bits_elda;
7704         I915_WRITE(reg_elda, i);
7705
7706         for (i = 0; i < eld[2]; i++)
7707                 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7708                         return false;
7709
7710         return true;
7711 }
7712
7713 static void g4x_write_eld(struct drm_connector *connector,
7714                           struct drm_crtc *crtc,
7715                           struct drm_display_mode *mode)
7716 {
7717         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7718         uint8_t *eld = connector->eld;
7719         uint32_t eldv;
7720         uint32_t len;
7721         uint32_t i;
7722
7723         i = I915_READ(G4X_AUD_VID_DID);
7724
7725         if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7726                 eldv = G4X_ELDV_DEVCL_DEVBLC;
7727         else
7728                 eldv = G4X_ELDV_DEVCTG;
7729
7730         if (intel_eld_uptodate(connector,
7731                                G4X_AUD_CNTL_ST, eldv,
7732                                G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7733                                G4X_HDMIW_HDMIEDID))
7734                 return;
7735
7736         i = I915_READ(G4X_AUD_CNTL_ST);
7737         i &= ~(eldv | G4X_ELD_ADDR);
7738         len = (i >> 9) & 0x1f;          /* ELD buffer size */
7739         I915_WRITE(G4X_AUD_CNTL_ST, i);
7740
7741         if (!eld[0])
7742                 return;
7743
7744         len = min_t(uint8_t, eld[2], len);
7745         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7746         for (i = 0; i < len; i++)
7747                 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7748
7749         i = I915_READ(G4X_AUD_CNTL_ST);
7750         i |= eldv;
7751         I915_WRITE(G4X_AUD_CNTL_ST, i);
7752 }
7753
7754 static void haswell_write_eld(struct drm_connector *connector,
7755                               struct drm_crtc *crtc,
7756                               struct drm_display_mode *mode)
7757 {
7758         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7759         uint8_t *eld = connector->eld;
7760         uint32_t eldv;
7761         uint32_t i;
7762         int len;
7763         int pipe = to_intel_crtc(crtc)->pipe;
7764         int tmp;
7765
7766         int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7767         int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7768         int aud_config = HSW_AUD_CFG(pipe);
7769         int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7770
7771         /* Audio output enable */
7772         DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7773         tmp = I915_READ(aud_cntrl_st2);
7774         tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7775         I915_WRITE(aud_cntrl_st2, tmp);
7776         POSTING_READ(aud_cntrl_st2);
7777
7778         assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
7779
7780         /* Set ELD valid state */
7781         tmp = I915_READ(aud_cntrl_st2);
7782         DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
7783         tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7784         I915_WRITE(aud_cntrl_st2, tmp);
7785         tmp = I915_READ(aud_cntrl_st2);
7786         DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
7787
7788         /* Enable HDMI mode */
7789         tmp = I915_READ(aud_config);
7790         DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
7791         /* clear N_programing_enable and N_value_index */
7792         tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7793         I915_WRITE(aud_config, tmp);
7794
7795         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7796
7797         eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7798
7799         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7800                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7801                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
7802                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7803         } else {
7804                 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7805         }
7806
7807         if (intel_eld_uptodate(connector,
7808                                aud_cntrl_st2, eldv,
7809                                aud_cntl_st, IBX_ELD_ADDRESS,
7810                                hdmiw_hdmiedid))
7811                 return;
7812
7813         i = I915_READ(aud_cntrl_st2);
7814         i &= ~eldv;
7815         I915_WRITE(aud_cntrl_st2, i);
7816
7817         if (!eld[0])
7818                 return;
7819
7820         i = I915_READ(aud_cntl_st);
7821         i &= ~IBX_ELD_ADDRESS;
7822         I915_WRITE(aud_cntl_st, i);
7823         i = (i >> 29) & DIP_PORT_SEL_MASK;              /* DIP_Port_Select, 0x1 = PortB */
7824         DRM_DEBUG_DRIVER("port num:%d\n", i);
7825
7826         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
7827         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7828         for (i = 0; i < len; i++)
7829                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7830
7831         i = I915_READ(aud_cntrl_st2);
7832         i |= eldv;
7833         I915_WRITE(aud_cntrl_st2, i);
7834
7835 }
7836
7837 static void ironlake_write_eld(struct drm_connector *connector,
7838                                struct drm_crtc *crtc,
7839                                struct drm_display_mode *mode)
7840 {
7841         struct drm_i915_private *dev_priv = connector->dev->dev_private;
7842         uint8_t *eld = connector->eld;
7843         uint32_t eldv;
7844         uint32_t i;
7845         int len;
7846         int hdmiw_hdmiedid;
7847         int aud_config;
7848         int aud_cntl_st;
7849         int aud_cntrl_st2;
7850         int pipe = to_intel_crtc(crtc)->pipe;
7851
7852         if (HAS_PCH_IBX(connector->dev)) {
7853                 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7854                 aud_config = IBX_AUD_CFG(pipe);
7855                 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
7856                 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7857         } else if (IS_VALLEYVIEW(connector->dev)) {
7858                 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7859                 aud_config = VLV_AUD_CFG(pipe);
7860                 aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7861                 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
7862         } else {
7863                 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7864                 aud_config = CPT_AUD_CFG(pipe);
7865                 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
7866                 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7867         }
7868
7869         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7870
7871         if (IS_VALLEYVIEW(connector->dev))  {
7872                 struct intel_encoder *intel_encoder;
7873                 struct intel_digital_port *intel_dig_port;
7874
7875                 intel_encoder = intel_attached_encoder(connector);
7876                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7877                 i = intel_dig_port->port;
7878         } else {
7879                 i = I915_READ(aud_cntl_st);
7880                 i = (i >> 29) & DIP_PORT_SEL_MASK;
7881                 /* DIP_Port_Select, 0x1 = PortB */
7882         }
7883
7884         if (!i) {
7885                 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7886                 /* operate blindly on all ports */
7887                 eldv = IBX_ELD_VALIDB;
7888                 eldv |= IBX_ELD_VALIDB << 4;
7889                 eldv |= IBX_ELD_VALIDB << 8;
7890         } else {
7891                 DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
7892                 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7893         }
7894
7895         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7896                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7897                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
7898                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7899         } else {
7900                 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7901         }
7902
7903         if (intel_eld_uptodate(connector,
7904                                aud_cntrl_st2, eldv,
7905                                aud_cntl_st, IBX_ELD_ADDRESS,
7906                                hdmiw_hdmiedid))
7907                 return;
7908
7909         i = I915_READ(aud_cntrl_st2);
7910         i &= ~eldv;
7911         I915_WRITE(aud_cntrl_st2, i);
7912
7913         if (!eld[0])
7914                 return;
7915
7916         i = I915_READ(aud_cntl_st);
7917         i &= ~IBX_ELD_ADDRESS;
7918         I915_WRITE(aud_cntl_st, i);
7919
7920         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
7921         DRM_DEBUG_DRIVER("ELD size %d\n", len);
7922         for (i = 0; i < len; i++)
7923                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7924
7925         i = I915_READ(aud_cntrl_st2);
7926         i |= eldv;
7927         I915_WRITE(aud_cntrl_st2, i);
7928 }
7929
7930 void intel_write_eld(struct drm_encoder *encoder,
7931                      struct drm_display_mode *mode)
7932 {
7933         struct drm_crtc *crtc = encoder->crtc;
7934         struct drm_connector *connector;
7935         struct drm_device *dev = encoder->dev;
7936         struct drm_i915_private *dev_priv = dev->dev_private;
7937
7938         connector = drm_select_eld(encoder, mode);
7939         if (!connector)
7940                 return;
7941
7942         DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7943                          connector->base.id,
7944                          connector->name,
7945                          connector->encoder->base.id,
7946                          connector->encoder->name);
7947
7948         connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
7949
7950         if (dev_priv->display.write_eld)
7951                 dev_priv->display.write_eld(connector, crtc, mode);
7952 }
7953
7954 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
7955 {
7956         struct drm_device *dev = crtc->dev;
7957         struct drm_i915_private *dev_priv = dev->dev_private;
7958         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7959         uint32_t cntl;
7960
7961         if (base != intel_crtc->cursor_base) {
7962                 /* On these chipsets we can only modify the base whilst
7963                  * the cursor is disabled.
7964                  */
7965                 if (intel_crtc->cursor_cntl) {
7966                         I915_WRITE(_CURACNTR, 0);
7967                         POSTING_READ(_CURACNTR);
7968                         intel_crtc->cursor_cntl = 0;
7969                 }
7970
7971                 I915_WRITE(_CURABASE, base);
7972                 POSTING_READ(_CURABASE);
7973         }
7974
7975         /* XXX width must be 64, stride 256 => 0x00 << 28 */
7976         cntl = 0;
7977         if (base)
7978                 cntl = (CURSOR_ENABLE |
7979                         CURSOR_GAMMA_ENABLE |
7980                         CURSOR_FORMAT_ARGB);
7981         if (intel_crtc->cursor_cntl != cntl) {
7982                 I915_WRITE(_CURACNTR, cntl);
7983                 POSTING_READ(_CURACNTR);
7984                 intel_crtc->cursor_cntl = cntl;
7985         }
7986 }
7987
7988 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7989 {
7990         struct drm_device *dev = crtc->dev;
7991         struct drm_i915_private *dev_priv = dev->dev_private;
7992         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7993         int pipe = intel_crtc->pipe;
7994         uint32_t cntl;
7995
7996         cntl = 0;
7997         if (base) {
7998                 cntl = MCURSOR_GAMMA_ENABLE;
7999                 switch (intel_crtc->cursor_width) {
8000                         case 64:
8001                                 cntl |= CURSOR_MODE_64_ARGB_AX;
8002                                 break;
8003                         case 128:
8004                                 cntl |= CURSOR_MODE_128_ARGB_AX;
8005                                 break;
8006                         case 256:
8007                                 cntl |= CURSOR_MODE_256_ARGB_AX;
8008                                 break;
8009                         default:
8010                                 WARN_ON(1);
8011                                 return;
8012                 }
8013                 cntl |= pipe << 28; /* Connect to correct pipe */
8014         }
8015         if (intel_crtc->cursor_cntl != cntl) {
8016                 I915_WRITE(CURCNTR(pipe), cntl);
8017                 POSTING_READ(CURCNTR(pipe));
8018                 intel_crtc->cursor_cntl = cntl;
8019         }
8020
8021         /* and commit changes on next vblank */
8022         I915_WRITE(CURBASE(pipe), base);
8023         POSTING_READ(CURBASE(pipe));
8024 }
8025
8026 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
8027 {
8028         struct drm_device *dev = crtc->dev;
8029         struct drm_i915_private *dev_priv = dev->dev_private;
8030         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8031         int pipe = intel_crtc->pipe;
8032         uint32_t cntl;
8033
8034         cntl = 0;
8035         if (base) {
8036                 cntl = MCURSOR_GAMMA_ENABLE;
8037                 switch (intel_crtc->cursor_width) {
8038                         case 64:
8039                                 cntl |= CURSOR_MODE_64_ARGB_AX;
8040                                 break;
8041                         case 128:
8042                                 cntl |= CURSOR_MODE_128_ARGB_AX;
8043                                 break;
8044                         case 256:
8045                                 cntl |= CURSOR_MODE_256_ARGB_AX;
8046                                 break;
8047                         default:
8048                                 WARN_ON(1);
8049                                 return;
8050                 }
8051         }
8052         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8053                 cntl |= CURSOR_PIPE_CSC_ENABLE;
8054
8055         if (intel_crtc->cursor_cntl != cntl) {
8056                 I915_WRITE(CURCNTR(pipe), cntl);
8057                 POSTING_READ(CURCNTR(pipe));
8058                 intel_crtc->cursor_cntl = cntl;
8059         }
8060
8061         /* and commit changes on next vblank */
8062         I915_WRITE(CURBASE(pipe), base);
8063         POSTING_READ(CURBASE(pipe));
8064 }
8065
8066 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
8067 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8068                                      bool on)
8069 {
8070         struct drm_device *dev = crtc->dev;
8071         struct drm_i915_private *dev_priv = dev->dev_private;
8072         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8073         int pipe = intel_crtc->pipe;
8074         int x = crtc->cursor_x;
8075         int y = crtc->cursor_y;
8076         u32 base = 0, pos = 0;
8077
8078         if (on)
8079                 base = intel_crtc->cursor_addr;
8080
8081         if (x >= intel_crtc->config.pipe_src_w)
8082                 base = 0;
8083
8084         if (y >= intel_crtc->config.pipe_src_h)
8085                 base = 0;
8086
8087         if (x < 0) {
8088                 if (x + intel_crtc->cursor_width <= 0)
8089                         base = 0;
8090
8091                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
8092                 x = -x;
8093         }
8094         pos |= x << CURSOR_X_SHIFT;
8095
8096         if (y < 0) {
8097                 if (y + intel_crtc->cursor_height <= 0)
8098                         base = 0;
8099
8100                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
8101                 y = -y;
8102         }
8103         pos |= y << CURSOR_Y_SHIFT;
8104
8105         if (base == 0 && intel_crtc->cursor_base == 0)
8106                 return;
8107
8108         I915_WRITE(CURPOS(pipe), pos);
8109
8110         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
8111                 ivb_update_cursor(crtc, base);
8112         else if (IS_845G(dev) || IS_I865G(dev))
8113                 i845_update_cursor(crtc, base);
8114         else
8115                 i9xx_update_cursor(crtc, base);
8116         intel_crtc->cursor_base = base;
8117 }
8118
8119 /*
8120  * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8121  *
8122  * Note that the object's reference will be consumed if the update fails.  If
8123  * the update succeeds, the reference of the old object (if any) will be
8124  * consumed.
8125  */
8126 static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8127                                      struct drm_i915_gem_object *obj,
8128                                      uint32_t width, uint32_t height)
8129 {
8130         struct drm_device *dev = crtc->dev;
8131         struct drm_i915_private *dev_priv = dev->dev_private;
8132         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8133         enum pipe pipe = intel_crtc->pipe;
8134         unsigned old_width;
8135         uint32_t addr;
8136         int ret;
8137
8138         /* if we want to turn off the cursor ignore width and height */
8139         if (!obj) {
8140                 DRM_DEBUG_KMS("cursor off\n");
8141                 addr = 0;
8142                 obj = NULL;
8143                 mutex_lock(&dev->struct_mutex);
8144                 goto finish;
8145         }
8146
8147         /* Check for which cursor types we support */
8148         if (!((width == 64 && height == 64) ||
8149                         (width == 128 && height == 128 && !IS_GEN2(dev)) ||
8150                         (width == 256 && height == 256 && !IS_GEN2(dev)))) {
8151                 DRM_DEBUG("Cursor dimension not supported\n");
8152                 return -EINVAL;
8153         }
8154
8155         if (obj->base.size < width * height * 4) {
8156                 DRM_DEBUG_KMS("buffer is too small\n");
8157                 ret = -ENOMEM;
8158                 goto fail;
8159         }
8160
8161         /* we only need to pin inside GTT if cursor is non-phy */
8162         mutex_lock(&dev->struct_mutex);
8163         if (!INTEL_INFO(dev)->cursor_needs_physical) {
8164                 unsigned alignment;
8165
8166                 if (obj->tiling_mode) {
8167                         DRM_DEBUG_KMS("cursor cannot be tiled\n");
8168                         ret = -EINVAL;
8169                         goto fail_locked;
8170                 }
8171
8172                 /* Note that the w/a also requires 2 PTE of padding following
8173                  * the bo. We currently fill all unused PTE with the shadow
8174                  * page and so we should always have valid PTE following the
8175                  * cursor preventing the VT-d warning.
8176                  */
8177                 alignment = 0;
8178                 if (need_vtd_wa(dev))
8179                         alignment = 64*1024;
8180
8181                 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
8182                 if (ret) {
8183                         DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
8184                         goto fail_locked;
8185                 }
8186
8187                 ret = i915_gem_object_put_fence(obj);
8188                 if (ret) {
8189                         DRM_DEBUG_KMS("failed to release fence for cursor");
8190                         goto fail_unpin;
8191                 }
8192
8193                 addr = i915_gem_obj_ggtt_offset(obj);
8194         } else {
8195                 int align = IS_I830(dev) ? 16 * 1024 : 256;
8196                 ret = i915_gem_object_attach_phys(obj, align);
8197                 if (ret) {
8198                         DRM_DEBUG_KMS("failed to attach phys object\n");
8199                         goto fail_locked;
8200                 }
8201                 addr = obj->phys_handle->busaddr;
8202         }
8203
8204         if (IS_GEN2(dev))
8205                 I915_WRITE(CURSIZE, (height << 12) | width);
8206
8207  finish:
8208         if (intel_crtc->cursor_bo) {
8209                 if (!INTEL_INFO(dev)->cursor_needs_physical)
8210                         i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
8211         }
8212
8213         i915_gem_track_fb(intel_crtc->cursor_bo, obj,
8214                           INTEL_FRONTBUFFER_CURSOR(pipe));
8215         mutex_unlock(&dev->struct_mutex);
8216
8217         old_width = intel_crtc->cursor_width;
8218
8219         intel_crtc->cursor_addr = addr;
8220         intel_crtc->cursor_bo = obj;
8221         intel_crtc->cursor_width = width;
8222         intel_crtc->cursor_height = height;
8223
8224         if (intel_crtc->active) {
8225                 if (old_width != width)
8226                         intel_update_watermarks(crtc);
8227                 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8228         }
8229
8230         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
8231
8232         return 0;
8233 fail_unpin:
8234         i915_gem_object_unpin_from_display_plane(obj);
8235 fail_locked:
8236         mutex_unlock(&dev->struct_mutex);
8237 fail:
8238         drm_gem_object_unreference_unlocked(&obj->base);
8239         return ret;
8240 }
8241
8242 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
8243                                  u16 *blue, uint32_t start, uint32_t size)
8244 {
8245         int end = (start + size > 256) ? 256 : start + size, i;
8246         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8247
8248         for (i = start; i < end; i++) {
8249                 intel_crtc->lut_r[i] = red[i] >> 8;
8250                 intel_crtc->lut_g[i] = green[i] >> 8;
8251                 intel_crtc->lut_b[i] = blue[i] >> 8;
8252         }
8253
8254         intel_crtc_load_lut(crtc);
8255 }
8256
8257 /* VESA 640x480x72Hz mode to set on the pipe */
8258 static struct drm_display_mode load_detect_mode = {
8259         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
8260                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
8261 };
8262
8263 struct drm_framebuffer *
8264 __intel_framebuffer_create(struct drm_device *dev,
8265                            struct drm_mode_fb_cmd2 *mode_cmd,
8266                            struct drm_i915_gem_object *obj)
8267 {
8268         struct intel_framebuffer *intel_fb;
8269         int ret;
8270
8271         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8272         if (!intel_fb) {
8273                 drm_gem_object_unreference_unlocked(&obj->base);
8274                 return ERR_PTR(-ENOMEM);
8275         }
8276
8277         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
8278         if (ret)
8279                 goto err;
8280
8281         return &intel_fb->base;
8282 err:
8283         drm_gem_object_unreference_unlocked(&obj->base);
8284         kfree(intel_fb);
8285
8286         return ERR_PTR(ret);
8287 }
8288
8289 static struct drm_framebuffer *
8290 intel_framebuffer_create(struct drm_device *dev,
8291                          struct drm_mode_fb_cmd2 *mode_cmd,
8292                          struct drm_i915_gem_object *obj)
8293 {
8294         struct drm_framebuffer *fb;
8295         int ret;
8296
8297         ret = i915_mutex_lock_interruptible(dev);
8298         if (ret)
8299                 return ERR_PTR(ret);
8300         fb = __intel_framebuffer_create(dev, mode_cmd, obj);
8301         mutex_unlock(&dev->struct_mutex);
8302
8303         return fb;
8304 }
8305
8306 static u32
8307 intel_framebuffer_pitch_for_width(int width, int bpp)
8308 {
8309         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
8310         return ALIGN(pitch, 64);
8311 }
8312
8313 static u32
8314 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
8315 {
8316         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
8317         return PAGE_ALIGN(pitch * mode->vdisplay);
8318 }
8319
8320 static struct drm_framebuffer *
8321 intel_framebuffer_create_for_mode(struct drm_device *dev,
8322                                   struct drm_display_mode *mode,
8323                                   int depth, int bpp)
8324 {
8325         struct drm_i915_gem_object *obj;
8326         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
8327
8328         obj = i915_gem_alloc_object(dev,
8329                                     intel_framebuffer_size_for_mode(mode, bpp));
8330         if (obj == NULL)
8331                 return ERR_PTR(-ENOMEM);
8332
8333         mode_cmd.width = mode->hdisplay;
8334         mode_cmd.height = mode->vdisplay;
8335         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
8336                                                                 bpp);
8337         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
8338
8339         return intel_framebuffer_create(dev, &mode_cmd, obj);
8340 }
8341
8342 static struct drm_framebuffer *
8343 mode_fits_in_fbdev(struct drm_device *dev,
8344                    struct drm_display_mode *mode)
8345 {
8346 #ifdef CONFIG_DRM_I915_FBDEV
8347         struct drm_i915_private *dev_priv = dev->dev_private;
8348         struct drm_i915_gem_object *obj;
8349         struct drm_framebuffer *fb;
8350
8351         if (!dev_priv->fbdev)
8352                 return NULL;
8353
8354         if (!dev_priv->fbdev->fb)
8355                 return NULL;
8356
8357         obj = dev_priv->fbdev->fb->obj;
8358         BUG_ON(!obj);
8359
8360         fb = &dev_priv->fbdev->fb->base;
8361         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
8362                                                                fb->bits_per_pixel))
8363                 return NULL;
8364
8365         if (obj->base.size < mode->vdisplay * fb->pitches[0])
8366                 return NULL;
8367
8368         return fb;
8369 #else
8370         return NULL;
8371 #endif
8372 }
8373
8374 bool intel_get_load_detect_pipe(struct drm_connector *connector,
8375                                 struct drm_display_mode *mode,
8376                                 struct intel_load_detect_pipe *old,
8377                                 struct drm_modeset_acquire_ctx *ctx)
8378 {
8379         struct intel_crtc *intel_crtc;
8380         struct intel_encoder *intel_encoder =
8381                 intel_attached_encoder(connector);
8382         struct drm_crtc *possible_crtc;
8383         struct drm_encoder *encoder = &intel_encoder->base;
8384         struct drm_crtc *crtc = NULL;
8385         struct drm_device *dev = encoder->dev;
8386         struct drm_framebuffer *fb;
8387         struct drm_mode_config *config = &dev->mode_config;
8388         int ret, i = -1;
8389
8390         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8391                       connector->base.id, connector->name,
8392                       encoder->base.id, encoder->name);
8393
8394         drm_modeset_acquire_init(ctx, 0);
8395
8396 retry:
8397         ret = drm_modeset_lock(&config->connection_mutex, ctx);
8398         if (ret)
8399                 goto fail_unlock;
8400
8401         /*
8402          * Algorithm gets a little messy:
8403          *
8404          *   - if the connector already has an assigned crtc, use it (but make
8405          *     sure it's on first)
8406          *
8407          *   - try to find the first unused crtc that can drive this connector,
8408          *     and use that if we find one
8409          */
8410
8411         /* See if we already have a CRTC for this connector */
8412         if (encoder->crtc) {
8413                 crtc = encoder->crtc;
8414
8415                 ret = drm_modeset_lock(&crtc->mutex, ctx);
8416                 if (ret)
8417                         goto fail_unlock;
8418
8419                 old->dpms_mode = connector->dpms;
8420                 old->load_detect_temp = false;
8421
8422                 /* Make sure the crtc and connector are running */
8423                 if (connector->dpms != DRM_MODE_DPMS_ON)
8424                         connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8425
8426                 return true;
8427         }
8428
8429         /* Find an unused one (if possible) */
8430         for_each_crtc(dev, possible_crtc) {
8431                 i++;
8432                 if (!(encoder->possible_crtcs & (1 << i)))
8433                         continue;
8434                 if (!possible_crtc->enabled) {
8435                         crtc = possible_crtc;
8436                         break;
8437                 }
8438         }
8439
8440         /*
8441          * If we didn't find an unused CRTC, don't use any.
8442          */
8443         if (!crtc) {
8444                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
8445                 goto fail_unlock;
8446         }
8447
8448         ret = drm_modeset_lock(&crtc->mutex, ctx);
8449         if (ret)
8450                 goto fail_unlock;
8451         intel_encoder->new_crtc = to_intel_crtc(crtc);
8452         to_intel_connector(connector)->new_encoder = intel_encoder;
8453
8454         intel_crtc = to_intel_crtc(crtc);
8455         intel_crtc->new_enabled = true;
8456         intel_crtc->new_config = &intel_crtc->config;
8457         old->dpms_mode = connector->dpms;
8458         old->load_detect_temp = true;
8459         old->release_fb = NULL;
8460
8461         if (!mode)
8462                 mode = &load_detect_mode;
8463
8464         /* We need a framebuffer large enough to accommodate all accesses
8465          * that the plane may generate whilst we perform load detection.
8466          * We can not rely on the fbcon either being present (we get called
8467          * during its initialisation to detect all boot displays, or it may
8468          * not even exist) or that it is large enough to satisfy the
8469          * requested mode.
8470          */
8471         fb = mode_fits_in_fbdev(dev, mode);
8472         if (fb == NULL) {
8473                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
8474                 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
8475                 old->release_fb = fb;
8476         } else
8477                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
8478         if (IS_ERR(fb)) {
8479                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
8480                 goto fail;
8481         }
8482
8483         if (intel_set_mode(crtc, mode, 0, 0, fb)) {
8484                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
8485                 if (old->release_fb)
8486                         old->release_fb->funcs->destroy(old->release_fb);
8487                 goto fail;
8488         }
8489
8490         /* let the connector get through one full cycle before testing */
8491         intel_wait_for_vblank(dev, intel_crtc->pipe);
8492         return true;
8493
8494  fail:
8495         intel_crtc->new_enabled = crtc->enabled;
8496         if (intel_crtc->new_enabled)
8497                 intel_crtc->new_config = &intel_crtc->config;
8498         else
8499                 intel_crtc->new_config = NULL;
8500 fail_unlock:
8501         if (ret == -EDEADLK) {
8502                 drm_modeset_backoff(ctx);
8503                 goto retry;
8504         }
8505
8506         drm_modeset_drop_locks(ctx);
8507         drm_modeset_acquire_fini(ctx);
8508
8509         return false;
8510 }
8511
8512 void intel_release_load_detect_pipe(struct drm_connector *connector,
8513                                     struct intel_load_detect_pipe *old,
8514                                     struct drm_modeset_acquire_ctx *ctx)
8515 {
8516         struct intel_encoder *intel_encoder =
8517                 intel_attached_encoder(connector);
8518         struct drm_encoder *encoder = &intel_encoder->base;
8519         struct drm_crtc *crtc = encoder->crtc;
8520         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8521
8522         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8523                       connector->base.id, connector->name,
8524                       encoder->base.id, encoder->name);
8525
8526         if (old->load_detect_temp) {
8527                 to_intel_connector(connector)->new_encoder = NULL;
8528                 intel_encoder->new_crtc = NULL;
8529                 intel_crtc->new_enabled = false;
8530                 intel_crtc->new_config = NULL;
8531                 intel_set_mode(crtc, NULL, 0, 0, NULL);
8532
8533                 if (old->release_fb) {
8534                         drm_framebuffer_unregister_private(old->release_fb);
8535                         drm_framebuffer_unreference(old->release_fb);
8536                 }
8537
8538                 goto unlock;
8539                 return;
8540         }
8541
8542         /* Switch crtc and encoder back off if necessary */
8543         if (old->dpms_mode != DRM_MODE_DPMS_ON)
8544                 connector->funcs->dpms(connector, old->dpms_mode);
8545
8546 unlock:
8547         drm_modeset_drop_locks(ctx);
8548         drm_modeset_acquire_fini(ctx);
8549 }
8550
8551 static int i9xx_pll_refclk(struct drm_device *dev,
8552                            const struct intel_crtc_config *pipe_config)
8553 {
8554         struct drm_i915_private *dev_priv = dev->dev_private;
8555         u32 dpll = pipe_config->dpll_hw_state.dpll;
8556
8557         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
8558                 return dev_priv->vbt.lvds_ssc_freq;
8559         else if (HAS_PCH_SPLIT(dev))
8560                 return 120000;
8561         else if (!IS_GEN2(dev))
8562                 return 96000;
8563         else
8564                 return 48000;
8565 }
8566
8567 /* Returns the clock of the currently programmed mode of the given pipe. */
8568 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
8569                                 struct intel_crtc_config *pipe_config)
8570 {
8571         struct drm_device *dev = crtc->base.dev;
8572         struct drm_i915_private *dev_priv = dev->dev_private;
8573         int pipe = pipe_config->cpu_transcoder;
8574         u32 dpll = pipe_config->dpll_hw_state.dpll;
8575         u32 fp;
8576         intel_clock_t clock;
8577         int refclk = i9xx_pll_refclk(dev, pipe_config);
8578
8579         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
8580                 fp = pipe_config->dpll_hw_state.fp0;
8581         else
8582                 fp = pipe_config->dpll_hw_state.fp1;
8583
8584         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
8585         if (IS_PINEVIEW(dev)) {
8586                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
8587                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
8588         } else {
8589                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
8590                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
8591         }
8592
8593         if (!IS_GEN2(dev)) {
8594                 if (IS_PINEVIEW(dev))
8595                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
8596                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
8597                 else
8598                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
8599                                DPLL_FPA01_P1_POST_DIV_SHIFT);
8600
8601                 switch (dpll & DPLL_MODE_MASK) {
8602                 case DPLLB_MODE_DAC_SERIAL:
8603                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
8604                                 5 : 10;
8605                         break;
8606                 case DPLLB_MODE_LVDS:
8607                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
8608                                 7 : 14;
8609                         break;
8610                 default:
8611                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
8612                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
8613                         return;
8614                 }
8615
8616                 if (IS_PINEVIEW(dev))
8617                         pineview_clock(refclk, &clock);
8618                 else
8619                         i9xx_clock(refclk, &clock);
8620         } else {
8621                 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8622                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8623
8624                 if (is_lvds) {
8625                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8626                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
8627
8628                         if (lvds & LVDS_CLKB_POWER_UP)
8629                                 clock.p2 = 7;
8630                         else
8631                                 clock.p2 = 14;
8632                 } else {
8633                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
8634                                 clock.p1 = 2;
8635                         else {
8636                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8637                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8638                         }
8639                         if (dpll & PLL_P2_DIVIDE_BY_4)
8640                                 clock.p2 = 4;
8641                         else
8642                                 clock.p2 = 2;
8643                 }
8644
8645                 i9xx_clock(refclk, &clock);
8646         }
8647
8648         /*
8649          * This value includes pixel_multiplier. We will use
8650          * port_clock to compute adjusted_mode.crtc_clock in the
8651          * encoder's get_config() function.
8652          */
8653         pipe_config->port_clock = clock.dot;
8654 }
8655
8656 int intel_dotclock_calculate(int link_freq,
8657                              const struct intel_link_m_n *m_n)
8658 {
8659         /*
8660          * The calculation for the data clock is:
8661          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8662          * But we want to avoid losing precison if possible, so:
8663          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8664          *
8665          * and the link clock is simpler:
8666          * link_clock = (m * link_clock) / n
8667          */
8668
8669         if (!m_n->link_n)
8670                 return 0;
8671
8672         return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8673 }
8674
8675 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8676                                    struct intel_crtc_config *pipe_config)
8677 {
8678         struct drm_device *dev = crtc->base.dev;
8679
8680         /* read out port_clock from the DPLL */
8681         i9xx_crtc_clock_get(crtc, pipe_config);
8682
8683         /*
8684          * This value does not include pixel_multiplier.
8685          * We will check that port_clock and adjusted_mode.crtc_clock
8686          * agree once we know their relationship in the encoder's
8687          * get_config() function.
8688          */
8689         pipe_config->adjusted_mode.crtc_clock =
8690                 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8691                                          &pipe_config->fdi_m_n);
8692 }
8693
8694 /** Returns the currently programmed mode of the given pipe. */
8695 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8696                                              struct drm_crtc *crtc)
8697 {
8698         struct drm_i915_private *dev_priv = dev->dev_private;
8699         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8700         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8701         struct drm_display_mode *mode;
8702         struct intel_crtc_config pipe_config;
8703         int htot = I915_READ(HTOTAL(cpu_transcoder));
8704         int hsync = I915_READ(HSYNC(cpu_transcoder));
8705         int vtot = I915_READ(VTOTAL(cpu_transcoder));
8706         int vsync = I915_READ(VSYNC(cpu_transcoder));
8707         enum pipe pipe = intel_crtc->pipe;
8708
8709         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8710         if (!mode)
8711                 return NULL;
8712
8713         /*
8714          * Construct a pipe_config sufficient for getting the clock info
8715          * back out of crtc_clock_get.
8716          *
8717          * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8718          * to use a real value here instead.
8719          */
8720         pipe_config.cpu_transcoder = (enum transcoder) pipe;
8721         pipe_config.pixel_multiplier = 1;
8722         pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8723         pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8724         pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8725         i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8726
8727         mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8728         mode->hdisplay = (htot & 0xffff) + 1;
8729         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8730         mode->hsync_start = (hsync & 0xffff) + 1;
8731         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8732         mode->vdisplay = (vtot & 0xffff) + 1;
8733         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8734         mode->vsync_start = (vsync & 0xffff) + 1;
8735         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8736
8737         drm_mode_set_name(mode);
8738
8739         return mode;
8740 }
8741
8742 static void intel_increase_pllclock(struct drm_device *dev,
8743                                     enum pipe pipe)
8744 {
8745         struct drm_i915_private *dev_priv = dev->dev_private;
8746         int dpll_reg = DPLL(pipe);
8747         int dpll;
8748
8749         if (HAS_PCH_SPLIT(dev))
8750                 return;
8751
8752         if (!dev_priv->lvds_downclock_avail)
8753                 return;
8754
8755         dpll = I915_READ(dpll_reg);
8756         if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8757                 DRM_DEBUG_DRIVER("upclocking LVDS\n");
8758
8759                 assert_panel_unlocked(dev_priv, pipe);
8760
8761                 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8762                 I915_WRITE(dpll_reg, dpll);
8763                 intel_wait_for_vblank(dev, pipe);
8764
8765                 dpll = I915_READ(dpll_reg);
8766                 if (dpll & DISPLAY_RATE_SELECT_FPA1)
8767                         DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8768         }
8769 }
8770
8771 static void intel_decrease_pllclock(struct drm_crtc *crtc)
8772 {
8773         struct drm_device *dev = crtc->dev;
8774         struct drm_i915_private *dev_priv = dev->dev_private;
8775         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8776
8777         if (HAS_PCH_SPLIT(dev))
8778                 return;
8779
8780         if (!dev_priv->lvds_downclock_avail)
8781                 return;
8782
8783         /*
8784          * Since this is called by a timer, we should never get here in
8785          * the manual case.
8786          */
8787         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8788                 int pipe = intel_crtc->pipe;
8789                 int dpll_reg = DPLL(pipe);
8790                 int dpll;
8791
8792                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
8793
8794                 assert_panel_unlocked(dev_priv, pipe);
8795
8796                 dpll = I915_READ(dpll_reg);
8797                 dpll |= DISPLAY_RATE_SELECT_FPA1;
8798                 I915_WRITE(dpll_reg, dpll);
8799                 intel_wait_for_vblank(dev, pipe);
8800                 dpll = I915_READ(dpll_reg);
8801                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8802                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8803         }
8804
8805 }
8806
8807 void intel_mark_busy(struct drm_device *dev)
8808 {
8809         struct drm_i915_private *dev_priv = dev->dev_private;
8810
8811         if (dev_priv->mm.busy)
8812                 return;
8813
8814         intel_runtime_pm_get(dev_priv);
8815         i915_update_gfx_val(dev_priv);
8816         dev_priv->mm.busy = true;
8817 }
8818
8819 void intel_mark_idle(struct drm_device *dev)
8820 {
8821         struct drm_i915_private *dev_priv = dev->dev_private;
8822         struct drm_crtc *crtc;
8823
8824         if (!dev_priv->mm.busy)
8825                 return;
8826
8827         dev_priv->mm.busy = false;
8828
8829         if (!i915.powersave)
8830                 goto out;
8831
8832         for_each_crtc(dev, crtc) {
8833                 if (!crtc->primary->fb)
8834                         continue;
8835
8836                 intel_decrease_pllclock(crtc);
8837         }
8838
8839         if (INTEL_INFO(dev)->gen >= 6)
8840                 gen6_rps_idle(dev->dev_private);
8841
8842 out:
8843         intel_runtime_pm_put(dev_priv);
8844 }
8845
8846
8847 /**
8848  * intel_mark_fb_busy - mark given planes as busy
8849  * @dev: DRM device
8850  * @frontbuffer_bits: bits for the affected planes
8851  * @ring: optional ring for asynchronous commands
8852  *
8853  * This function gets called every time the screen contents change. It can be
8854  * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
8855  */
8856 static void intel_mark_fb_busy(struct drm_device *dev,
8857                                unsigned frontbuffer_bits,
8858                                struct intel_engine_cs *ring)
8859 {
8860         enum pipe pipe;
8861
8862         if (!i915.powersave)
8863                 return;
8864
8865         for_each_pipe(pipe) {
8866                 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
8867                         continue;
8868
8869                 intel_increase_pllclock(dev, pipe);
8870                 if (ring && intel_fbc_enabled(dev))
8871                         ring->fbc_dirty = true;
8872         }
8873 }
8874
8875 /**
8876  * intel_fb_obj_invalidate - invalidate frontbuffer object
8877  * @obj: GEM object to invalidate
8878  * @ring: set for asynchronous rendering
8879  *
8880  * This function gets called every time rendering on the given object starts and
8881  * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
8882  * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
8883  * until the rendering completes or a flip on this frontbuffer plane is
8884  * scheduled.
8885  */
8886 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
8887                              struct intel_engine_cs *ring)
8888 {
8889         struct drm_device *dev = obj->base.dev;
8890         struct drm_i915_private *dev_priv = dev->dev_private;
8891
8892         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8893
8894         if (!obj->frontbuffer_bits)
8895                 return;
8896
8897         if (ring) {
8898                 mutex_lock(&dev_priv->fb_tracking.lock);
8899                 dev_priv->fb_tracking.busy_bits
8900                         |= obj->frontbuffer_bits;
8901                 dev_priv->fb_tracking.flip_bits
8902                         &= ~obj->frontbuffer_bits;
8903                 mutex_unlock(&dev_priv->fb_tracking.lock);
8904         }
8905
8906         intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
8907
8908         intel_edp_psr_exit(dev);
8909 }
8910
8911 /**
8912  * intel_frontbuffer_flush - flush frontbuffer
8913  * @dev: DRM device
8914  * @frontbuffer_bits: frontbuffer plane tracking bits
8915  *
8916  * This function gets called every time rendering on the given planes has
8917  * completed and frontbuffer caching can be started again. Flushes will get
8918  * delayed if they're blocked by some oustanding asynchronous rendering.
8919  *
8920  * Can be called without any locks held.
8921  */
8922 void intel_frontbuffer_flush(struct drm_device *dev,
8923                              unsigned frontbuffer_bits)
8924 {
8925         struct drm_i915_private *dev_priv = dev->dev_private;
8926
8927         /* Delay flushing when rings are still busy.*/
8928         mutex_lock(&dev_priv->fb_tracking.lock);
8929         frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
8930         mutex_unlock(&dev_priv->fb_tracking.lock);
8931
8932         intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
8933
8934         intel_edp_psr_exit(dev);
8935 }
8936
8937 /**
8938  * intel_fb_obj_flush - flush frontbuffer object
8939  * @obj: GEM object to flush
8940  * @retire: set when retiring asynchronous rendering
8941  *
8942  * This function gets called every time rendering on the given object has
8943  * completed and frontbuffer caching can be started again. If @retire is true
8944  * then any delayed flushes will be unblocked.
8945  */
8946 void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
8947                         bool retire)
8948 {
8949         struct drm_device *dev = obj->base.dev;
8950         struct drm_i915_private *dev_priv = dev->dev_private;
8951         unsigned frontbuffer_bits;
8952
8953         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8954
8955         if (!obj->frontbuffer_bits)
8956                 return;
8957
8958         frontbuffer_bits = obj->frontbuffer_bits;
8959
8960         if (retire) {
8961                 mutex_lock(&dev_priv->fb_tracking.lock);
8962                 /* Filter out new bits since rendering started. */
8963                 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
8964
8965                 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
8966                 mutex_unlock(&dev_priv->fb_tracking.lock);
8967         }
8968
8969         intel_frontbuffer_flush(dev, frontbuffer_bits);
8970 }
8971
8972 /**
8973  * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
8974  * @dev: DRM device
8975  * @frontbuffer_bits: frontbuffer plane tracking bits
8976  *
8977  * This function gets called after scheduling a flip on @obj. The actual
8978  * frontbuffer flushing will be delayed until completion is signalled with
8979  * intel_frontbuffer_flip_complete. If an invalidate happens in between this
8980  * flush will be cancelled.
8981  *
8982  * Can be called without any locks held.
8983  */
8984 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
8985                                     unsigned frontbuffer_bits)
8986 {
8987         struct drm_i915_private *dev_priv = dev->dev_private;
8988
8989         mutex_lock(&dev_priv->fb_tracking.lock);
8990         dev_priv->fb_tracking.flip_bits
8991                 |= frontbuffer_bits;
8992         mutex_unlock(&dev_priv->fb_tracking.lock);
8993 }
8994
8995 /**
8996  * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
8997  * @dev: DRM device
8998  * @frontbuffer_bits: frontbuffer plane tracking bits
8999  *
9000  * This function gets called after the flip has been latched and will complete
9001  * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
9002  *
9003  * Can be called without any locks held.
9004  */
9005 void intel_frontbuffer_flip_complete(struct drm_device *dev,
9006                                      unsigned frontbuffer_bits)
9007 {
9008         struct drm_i915_private *dev_priv = dev->dev_private;
9009
9010         mutex_lock(&dev_priv->fb_tracking.lock);
9011         /* Mask any cancelled flips. */
9012         frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
9013         dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
9014         mutex_unlock(&dev_priv->fb_tracking.lock);
9015
9016         intel_frontbuffer_flush(dev, frontbuffer_bits);
9017 }
9018
9019 static void intel_crtc_destroy(struct drm_crtc *crtc)
9020 {
9021         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9022         struct drm_device *dev = crtc->dev;
9023         struct intel_unpin_work *work;
9024         unsigned long flags;
9025
9026         spin_lock_irqsave(&dev->event_lock, flags);
9027         work = intel_crtc->unpin_work;
9028         intel_crtc->unpin_work = NULL;
9029         spin_unlock_irqrestore(&dev->event_lock, flags);
9030
9031         if (work) {
9032                 cancel_work_sync(&work->work);
9033                 kfree(work);
9034         }
9035
9036         drm_crtc_cleanup(crtc);
9037
9038         kfree(intel_crtc);
9039 }
9040
9041 static void intel_unpin_work_fn(struct work_struct *__work)
9042 {
9043         struct intel_unpin_work *work =
9044                 container_of(__work, struct intel_unpin_work, work);
9045         struct drm_device *dev = work->crtc->dev;
9046         enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
9047
9048         mutex_lock(&dev->struct_mutex);
9049         intel_unpin_fb_obj(work->old_fb_obj);
9050         drm_gem_object_unreference(&work->pending_flip_obj->base);
9051         drm_gem_object_unreference(&work->old_fb_obj->base);
9052
9053         intel_update_fbc(dev);
9054         mutex_unlock(&dev->struct_mutex);
9055
9056         intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9057
9058         BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
9059         atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
9060
9061         kfree(work);
9062 }
9063
9064 static void do_intel_finish_page_flip(struct drm_device *dev,
9065                                       struct drm_crtc *crtc)
9066 {
9067         struct drm_i915_private *dev_priv = dev->dev_private;
9068         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9069         struct intel_unpin_work *work;
9070         unsigned long flags;
9071
9072         /* Ignore early vblank irqs */
9073         if (intel_crtc == NULL)
9074                 return;
9075
9076         spin_lock_irqsave(&dev->event_lock, flags);
9077         work = intel_crtc->unpin_work;
9078
9079         /* Ensure we don't miss a work->pending update ... */
9080         smp_rmb();
9081
9082         if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
9083                 spin_unlock_irqrestore(&dev->event_lock, flags);
9084                 return;
9085         }
9086
9087         /* and that the unpin work is consistent wrt ->pending. */
9088         smp_rmb();
9089
9090         intel_crtc->unpin_work = NULL;
9091
9092         if (work->event)
9093                 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
9094
9095         drm_crtc_vblank_put(crtc);
9096
9097         spin_unlock_irqrestore(&dev->event_lock, flags);
9098
9099         wake_up_all(&dev_priv->pending_flip_queue);
9100
9101         queue_work(dev_priv->wq, &work->work);
9102
9103         trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
9104 }
9105
9106 void intel_finish_page_flip(struct drm_device *dev, int pipe)
9107 {
9108         struct drm_i915_private *dev_priv = dev->dev_private;
9109         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9110
9111         do_intel_finish_page_flip(dev, crtc);
9112 }
9113
9114 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
9115 {
9116         struct drm_i915_private *dev_priv = dev->dev_private;
9117         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
9118
9119         do_intel_finish_page_flip(dev, crtc);
9120 }
9121
9122 /* Is 'a' after or equal to 'b'? */
9123 static bool g4x_flip_count_after_eq(u32 a, u32 b)
9124 {
9125         return !((a - b) & 0x80000000);
9126 }
9127
9128 static bool page_flip_finished(struct intel_crtc *crtc)
9129 {
9130         struct drm_device *dev = crtc->base.dev;
9131         struct drm_i915_private *dev_priv = dev->dev_private;
9132
9133         /*
9134          * The relevant registers doen't exist on pre-ctg.
9135          * As the flip done interrupt doesn't trigger for mmio
9136          * flips on gmch platforms, a flip count check isn't
9137          * really needed there. But since ctg has the registers,
9138          * include it in the check anyway.
9139          */
9140         if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
9141                 return true;
9142
9143         /*
9144          * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
9145          * used the same base address. In that case the mmio flip might
9146          * have completed, but the CS hasn't even executed the flip yet.
9147          *
9148          * A flip count check isn't enough as the CS might have updated
9149          * the base address just after start of vblank, but before we
9150          * managed to process the interrupt. This means we'd complete the
9151          * CS flip too soon.
9152          *
9153          * Combining both checks should get us a good enough result. It may
9154          * still happen that the CS flip has been executed, but has not
9155          * yet actually completed. But in case the base address is the same
9156          * anyway, we don't really care.
9157          */
9158         return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
9159                 crtc->unpin_work->gtt_offset &&
9160                 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)),
9161                                     crtc->unpin_work->flip_count);
9162 }
9163
9164 void intel_prepare_page_flip(struct drm_device *dev, int plane)
9165 {
9166         struct drm_i915_private *dev_priv = dev->dev_private;
9167         struct intel_crtc *intel_crtc =
9168                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9169         unsigned long flags;
9170
9171         /* NB: An MMIO update of the plane base pointer will also
9172          * generate a page-flip completion irq, i.e. every modeset
9173          * is also accompanied by a spurious intel_prepare_page_flip().
9174          */
9175         spin_lock_irqsave(&dev->event_lock, flags);
9176         if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
9177                 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
9178         spin_unlock_irqrestore(&dev->event_lock, flags);
9179 }
9180
9181 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
9182 {
9183         /* Ensure that the work item is consistent when activating it ... */
9184         smp_wmb();
9185         atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
9186         /* and that it is marked active as soon as the irq could fire. */
9187         smp_wmb();
9188 }
9189
9190 static int intel_gen2_queue_flip(struct drm_device *dev,
9191                                  struct drm_crtc *crtc,
9192                                  struct drm_framebuffer *fb,
9193                                  struct drm_i915_gem_object *obj,
9194                                  struct intel_engine_cs *ring,
9195                                  uint32_t flags)
9196 {
9197         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9198         u32 flip_mask;
9199         int ret;
9200
9201         ret = intel_ring_begin(ring, 6);
9202         if (ret)
9203                 return ret;
9204
9205         /* Can't queue multiple flips, so wait for the previous
9206          * one to finish before executing the next.
9207          */
9208         if (intel_crtc->plane)
9209                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9210         else
9211                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9212         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9213         intel_ring_emit(ring, MI_NOOP);
9214         intel_ring_emit(ring, MI_DISPLAY_FLIP |
9215                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9216         intel_ring_emit(ring, fb->pitches[0]);
9217         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9218         intel_ring_emit(ring, 0); /* aux display base address, unused */
9219
9220         intel_mark_page_flip_active(intel_crtc);
9221         __intel_ring_advance(ring);
9222         return 0;
9223 }
9224
9225 static int intel_gen3_queue_flip(struct drm_device *dev,
9226                                  struct drm_crtc *crtc,
9227                                  struct drm_framebuffer *fb,
9228                                  struct drm_i915_gem_object *obj,
9229                                  struct intel_engine_cs *ring,
9230                                  uint32_t flags)
9231 {
9232         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9233         u32 flip_mask;
9234         int ret;
9235
9236         ret = intel_ring_begin(ring, 6);
9237         if (ret)
9238                 return ret;
9239
9240         if (intel_crtc->plane)
9241                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
9242         else
9243                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
9244         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
9245         intel_ring_emit(ring, MI_NOOP);
9246         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
9247                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9248         intel_ring_emit(ring, fb->pitches[0]);
9249         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9250         intel_ring_emit(ring, MI_NOOP);
9251
9252         intel_mark_page_flip_active(intel_crtc);
9253         __intel_ring_advance(ring);
9254         return 0;
9255 }
9256
9257 static int intel_gen4_queue_flip(struct drm_device *dev,
9258                                  struct drm_crtc *crtc,
9259                                  struct drm_framebuffer *fb,
9260                                  struct drm_i915_gem_object *obj,
9261                                  struct intel_engine_cs *ring,
9262                                  uint32_t flags)
9263 {
9264         struct drm_i915_private *dev_priv = dev->dev_private;
9265         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9266         uint32_t pf, pipesrc;
9267         int ret;
9268
9269         ret = intel_ring_begin(ring, 4);
9270         if (ret)
9271                 return ret;
9272
9273         /* i965+ uses the linear or tiled offsets from the
9274          * Display Registers (which do not change across a page-flip)
9275          * so we need only reprogram the base address.
9276          */
9277         intel_ring_emit(ring, MI_DISPLAY_FLIP |
9278                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9279         intel_ring_emit(ring, fb->pitches[0]);
9280         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
9281                         obj->tiling_mode);
9282
9283         /* XXX Enabling the panel-fitter across page-flip is so far
9284          * untested on non-native modes, so ignore it for now.
9285          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
9286          */
9287         pf = 0;
9288         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9289         intel_ring_emit(ring, pf | pipesrc);
9290
9291         intel_mark_page_flip_active(intel_crtc);
9292         __intel_ring_advance(ring);
9293         return 0;
9294 }
9295
9296 static int intel_gen6_queue_flip(struct drm_device *dev,
9297                                  struct drm_crtc *crtc,
9298                                  struct drm_framebuffer *fb,
9299                                  struct drm_i915_gem_object *obj,
9300                                  struct intel_engine_cs *ring,
9301                                  uint32_t flags)
9302 {
9303         struct drm_i915_private *dev_priv = dev->dev_private;
9304         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9305         uint32_t pf, pipesrc;
9306         int ret;
9307
9308         ret = intel_ring_begin(ring, 4);
9309         if (ret)
9310                 return ret;
9311
9312         intel_ring_emit(ring, MI_DISPLAY_FLIP |
9313                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
9314         intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
9315         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9316
9317         /* Contrary to the suggestions in the documentation,
9318          * "Enable Panel Fitter" does not seem to be required when page
9319          * flipping with a non-native mode, and worse causes a normal
9320          * modeset to fail.
9321          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
9322          */
9323         pf = 0;
9324         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
9325         intel_ring_emit(ring, pf | pipesrc);
9326
9327         intel_mark_page_flip_active(intel_crtc);
9328         __intel_ring_advance(ring);
9329         return 0;
9330 }
9331
9332 static int intel_gen7_queue_flip(struct drm_device *dev,
9333                                  struct drm_crtc *crtc,
9334                                  struct drm_framebuffer *fb,
9335                                  struct drm_i915_gem_object *obj,
9336                                  struct intel_engine_cs *ring,
9337                                  uint32_t flags)
9338 {
9339         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9340         uint32_t plane_bit = 0;
9341         int len, ret;
9342
9343         switch (intel_crtc->plane) {
9344         case PLANE_A:
9345                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
9346                 break;
9347         case PLANE_B:
9348                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
9349                 break;
9350         case PLANE_C:
9351                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
9352                 break;
9353         default:
9354                 WARN_ONCE(1, "unknown plane in flip command\n");
9355                 return -ENODEV;
9356         }
9357
9358         len = 4;
9359         if (ring->id == RCS) {
9360                 len += 6;
9361                 /*
9362                  * On Gen 8, SRM is now taking an extra dword to accommodate
9363                  * 48bits addresses, and we need a NOOP for the batch size to
9364                  * stay even.
9365                  */
9366                 if (IS_GEN8(dev))
9367                         len += 2;
9368         }
9369
9370         /*
9371          * BSpec MI_DISPLAY_FLIP for IVB:
9372          * "The full packet must be contained within the same cache line."
9373          *
9374          * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
9375          * cacheline, if we ever start emitting more commands before
9376          * the MI_DISPLAY_FLIP we may need to first emit everything else,
9377          * then do the cacheline alignment, and finally emit the
9378          * MI_DISPLAY_FLIP.
9379          */
9380         ret = intel_ring_cacheline_align(ring);
9381         if (ret)
9382                 return ret;
9383
9384         ret = intel_ring_begin(ring, len);
9385         if (ret)
9386                 return ret;
9387
9388         /* Unmask the flip-done completion message. Note that the bspec says that
9389          * we should do this for both the BCS and RCS, and that we must not unmask
9390          * more than one flip event at any time (or ensure that one flip message
9391          * can be sent by waiting for flip-done prior to queueing new flips).
9392          * Experimentation says that BCS works despite DERRMR masking all
9393          * flip-done completion events and that unmasking all planes at once
9394          * for the RCS also doesn't appear to drop events. Setting the DERRMR
9395          * to zero does lead to lockups within MI_DISPLAY_FLIP.
9396          */
9397         if (ring->id == RCS) {
9398                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9399                 intel_ring_emit(ring, DERRMR);
9400                 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9401                                         DERRMR_PIPEB_PRI_FLIP_DONE |
9402                                         DERRMR_PIPEC_PRI_FLIP_DONE));
9403                 if (IS_GEN8(dev))
9404                         intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9405                                               MI_SRM_LRM_GLOBAL_GTT);
9406                 else
9407                         intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
9408                                               MI_SRM_LRM_GLOBAL_GTT);
9409                 intel_ring_emit(ring, DERRMR);
9410                 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9411                 if (IS_GEN8(dev)) {
9412                         intel_ring_emit(ring, 0);
9413                         intel_ring_emit(ring, MI_NOOP);
9414                 }
9415         }
9416
9417         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
9418         intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
9419         intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9420         intel_ring_emit(ring, (MI_NOOP));
9421
9422         intel_mark_page_flip_active(intel_crtc);
9423         __intel_ring_advance(ring);
9424         return 0;
9425 }
9426
9427 static bool use_mmio_flip(struct intel_engine_cs *ring,
9428                           struct drm_i915_gem_object *obj)
9429 {
9430         /*
9431          * This is not being used for older platforms, because
9432          * non-availability of flip done interrupt forces us to use
9433          * CS flips. Older platforms derive flip done using some clever
9434          * tricks involving the flip_pending status bits and vblank irqs.
9435          * So using MMIO flips there would disrupt this mechanism.
9436          */
9437
9438         if (INTEL_INFO(ring->dev)->gen < 5)
9439                 return false;
9440
9441         if (i915.use_mmio_flip < 0)
9442                 return false;
9443         else if (i915.use_mmio_flip > 0)
9444                 return true;
9445         else
9446                 return ring != obj->ring;
9447 }
9448
9449 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
9450 {
9451         struct drm_device *dev = intel_crtc->base.dev;
9452         struct drm_i915_private *dev_priv = dev->dev_private;
9453         struct intel_framebuffer *intel_fb =
9454                 to_intel_framebuffer(intel_crtc->base.primary->fb);
9455         struct drm_i915_gem_object *obj = intel_fb->obj;
9456         u32 dspcntr;
9457         u32 reg;
9458
9459         intel_mark_page_flip_active(intel_crtc);
9460
9461         reg = DSPCNTR(intel_crtc->plane);
9462         dspcntr = I915_READ(reg);
9463
9464         if (INTEL_INFO(dev)->gen >= 4) {
9465                 if (obj->tiling_mode != I915_TILING_NONE)
9466                         dspcntr |= DISPPLANE_TILED;
9467                 else
9468                         dspcntr &= ~DISPPLANE_TILED;
9469         }
9470         I915_WRITE(reg, dspcntr);
9471
9472         I915_WRITE(DSPSURF(intel_crtc->plane),
9473                    intel_crtc->unpin_work->gtt_offset);
9474         POSTING_READ(DSPSURF(intel_crtc->plane));
9475 }
9476
9477 static int intel_postpone_flip(struct drm_i915_gem_object *obj)
9478 {
9479         struct intel_engine_cs *ring;
9480         int ret;
9481
9482         lockdep_assert_held(&obj->base.dev->struct_mutex);
9483
9484         if (!obj->last_write_seqno)
9485                 return 0;
9486
9487         ring = obj->ring;
9488
9489         if (i915_seqno_passed(ring->get_seqno(ring, true),
9490                               obj->last_write_seqno))
9491                 return 0;
9492
9493         ret = i915_gem_check_olr(ring, obj->last_write_seqno);
9494         if (ret)
9495                 return ret;
9496
9497         if (WARN_ON(!ring->irq_get(ring)))
9498                 return 0;
9499
9500         return 1;
9501 }
9502
9503 void intel_notify_mmio_flip(struct intel_engine_cs *ring)
9504 {
9505         struct drm_i915_private *dev_priv = to_i915(ring->dev);
9506         struct intel_crtc *intel_crtc;
9507         unsigned long irq_flags;
9508         u32 seqno;
9509
9510         seqno = ring->get_seqno(ring, false);
9511
9512         spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
9513         for_each_intel_crtc(ring->dev, intel_crtc) {
9514                 struct intel_mmio_flip *mmio_flip;
9515
9516                 mmio_flip = &intel_crtc->mmio_flip;
9517                 if (mmio_flip->seqno == 0)
9518                         continue;
9519
9520                 if (ring->id != mmio_flip->ring_id)
9521                         continue;
9522
9523                 if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
9524                         intel_do_mmio_flip(intel_crtc);
9525                         mmio_flip->seqno = 0;
9526                         ring->irq_put(ring);
9527                 }
9528         }
9529         spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
9530 }
9531
9532 static int intel_queue_mmio_flip(struct drm_device *dev,
9533                                  struct drm_crtc *crtc,
9534                                  struct drm_framebuffer *fb,
9535                                  struct drm_i915_gem_object *obj,
9536                                  struct intel_engine_cs *ring,
9537                                  uint32_t flags)
9538 {
9539         struct drm_i915_private *dev_priv = dev->dev_private;
9540         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9541         unsigned long irq_flags;
9542         int ret;
9543
9544         if (WARN_ON(intel_crtc->mmio_flip.seqno))
9545                 return -EBUSY;
9546
9547         ret = intel_postpone_flip(obj);
9548         if (ret < 0)
9549                 return ret;
9550         if (ret == 0) {
9551                 intel_do_mmio_flip(intel_crtc);
9552                 return 0;
9553         }
9554
9555         spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
9556         intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
9557         intel_crtc->mmio_flip.ring_id = obj->ring->id;
9558         spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
9559
9560         /*
9561          * Double check to catch cases where irq fired before
9562          * mmio flip data was ready
9563          */
9564         intel_notify_mmio_flip(obj->ring);
9565         return 0;
9566 }
9567
9568 static int intel_default_queue_flip(struct drm_device *dev,
9569                                     struct drm_crtc *crtc,
9570                                     struct drm_framebuffer *fb,
9571                                     struct drm_i915_gem_object *obj,
9572                                     struct intel_engine_cs *ring,
9573                                     uint32_t flags)
9574 {
9575         return -ENODEV;
9576 }
9577
9578 static int intel_crtc_page_flip(struct drm_crtc *crtc,
9579                                 struct drm_framebuffer *fb,
9580                                 struct drm_pending_vblank_event *event,
9581                                 uint32_t page_flip_flags)
9582 {
9583         struct drm_device *dev = crtc->dev;
9584         struct drm_i915_private *dev_priv = dev->dev_private;
9585         struct drm_framebuffer *old_fb = crtc->primary->fb;
9586         struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
9587         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9588         enum pipe pipe = intel_crtc->pipe;
9589         struct intel_unpin_work *work;
9590         struct intel_engine_cs *ring;
9591         unsigned long flags;
9592         int ret;
9593
9594         /* Can't change pixel format via MI display flips. */
9595         if (fb->pixel_format != crtc->primary->fb->pixel_format)
9596                 return -EINVAL;
9597
9598         /*
9599          * TILEOFF/LINOFF registers can't be changed via MI display flips.
9600          * Note that pitch changes could also affect these register.
9601          */
9602         if (INTEL_INFO(dev)->gen > 3 &&
9603             (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
9604              fb->pitches[0] != crtc->primary->fb->pitches[0]))
9605                 return -EINVAL;
9606
9607         if (i915_terminally_wedged(&dev_priv->gpu_error))
9608                 goto out_hang;
9609
9610         work = kzalloc(sizeof(*work), GFP_KERNEL);
9611         if (work == NULL)
9612                 return -ENOMEM;
9613
9614         work->event = event;
9615         work->crtc = crtc;
9616         work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
9617         INIT_WORK(&work->work, intel_unpin_work_fn);
9618
9619         ret = drm_crtc_vblank_get(crtc);
9620         if (ret)
9621                 goto free_work;
9622
9623         /* We borrow the event spin lock for protecting unpin_work */
9624         spin_lock_irqsave(&dev->event_lock, flags);
9625         if (intel_crtc->unpin_work) {
9626                 spin_unlock_irqrestore(&dev->event_lock, flags);
9627                 kfree(work);
9628                 drm_crtc_vblank_put(crtc);
9629
9630                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9631                 return -EBUSY;
9632         }
9633         intel_crtc->unpin_work = work;
9634         spin_unlock_irqrestore(&dev->event_lock, flags);
9635
9636         if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9637                 flush_workqueue(dev_priv->wq);
9638
9639         ret = i915_mutex_lock_interruptible(dev);
9640         if (ret)
9641                 goto cleanup;
9642
9643         /* Reference the objects for the scheduled work. */
9644         drm_gem_object_reference(&work->old_fb_obj->base);
9645         drm_gem_object_reference(&obj->base);
9646
9647         crtc->primary->fb = fb;
9648
9649         work->pending_flip_obj = obj;
9650
9651         work->enable_stall_check = true;
9652
9653         atomic_inc(&intel_crtc->unpin_work_count);
9654         intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9655
9656         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
9657                 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
9658
9659         if (IS_VALLEYVIEW(dev)) {
9660                 ring = &dev_priv->ring[BCS];
9661         } else if (INTEL_INFO(dev)->gen >= 7) {
9662                 ring = obj->ring;
9663                 if (ring == NULL || ring->id != RCS)
9664                         ring = &dev_priv->ring[BCS];
9665         } else {
9666                 ring = &dev_priv->ring[RCS];
9667         }
9668
9669         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
9670         if (ret)
9671                 goto cleanup_pending;
9672
9673         work->gtt_offset =
9674                 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9675
9676         if (use_mmio_flip(ring, obj))
9677                 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9678                                             page_flip_flags);
9679         else
9680                 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9681                                 page_flip_flags);
9682         if (ret)
9683                 goto cleanup_unpin;
9684
9685         i915_gem_track_fb(work->old_fb_obj, obj,
9686                           INTEL_FRONTBUFFER_PRIMARY(pipe));
9687
9688         intel_disable_fbc(dev);
9689         intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
9690         mutex_unlock(&dev->struct_mutex);
9691
9692         trace_i915_flip_request(intel_crtc->plane, obj);
9693
9694         return 0;
9695
9696 cleanup_unpin:
9697         intel_unpin_fb_obj(obj);
9698 cleanup_pending:
9699         atomic_dec(&intel_crtc->unpin_work_count);
9700         crtc->primary->fb = old_fb;
9701         drm_gem_object_unreference(&work->old_fb_obj->base);
9702         drm_gem_object_unreference(&obj->base);
9703         mutex_unlock(&dev->struct_mutex);
9704
9705 cleanup:
9706         spin_lock_irqsave(&dev->event_lock, flags);
9707         intel_crtc->unpin_work = NULL;
9708         spin_unlock_irqrestore(&dev->event_lock, flags);
9709
9710         drm_crtc_vblank_put(crtc);
9711 free_work:
9712         kfree(work);
9713
9714         if (ret == -EIO) {
9715 out_hang:
9716                 intel_crtc_wait_for_pending_flips(crtc);
9717                 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9718                 if (ret == 0 && event)
9719                         drm_send_vblank_event(dev, pipe, event);
9720         }
9721         return ret;
9722 }
9723
9724 static struct drm_crtc_helper_funcs intel_helper_funcs = {
9725         .mode_set_base_atomic = intel_pipe_set_base_atomic,
9726         .load_lut = intel_crtc_load_lut,
9727 };
9728
9729 /**
9730  * intel_modeset_update_staged_output_state
9731  *
9732  * Updates the staged output configuration state, e.g. after we've read out the
9733  * current hw state.
9734  */
9735 static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9736 {
9737         struct intel_crtc *crtc;
9738         struct intel_encoder *encoder;
9739         struct intel_connector *connector;
9740
9741         list_for_each_entry(connector, &dev->mode_config.connector_list,
9742                             base.head) {
9743                 connector->new_encoder =
9744                         to_intel_encoder(connector->base.encoder);
9745         }
9746
9747         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9748                             base.head) {
9749                 encoder->new_crtc =
9750                         to_intel_crtc(encoder->base.crtc);
9751         }
9752
9753         for_each_intel_crtc(dev, crtc) {
9754                 crtc->new_enabled = crtc->base.enabled;
9755
9756                 if (crtc->new_enabled)
9757                         crtc->new_config = &crtc->config;
9758                 else
9759                         crtc->new_config = NULL;
9760         }
9761 }
9762
9763 /**
9764  * intel_modeset_commit_output_state
9765  *
9766  * This function copies the stage display pipe configuration to the real one.
9767  */
9768 static void intel_modeset_commit_output_state(struct drm_device *dev)
9769 {
9770         struct intel_crtc *crtc;
9771         struct intel_encoder *encoder;
9772         struct intel_connector *connector;
9773
9774         list_for_each_entry(connector, &dev->mode_config.connector_list,
9775                             base.head) {
9776                 connector->base.encoder = &connector->new_encoder->base;
9777         }
9778
9779         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9780                             base.head) {
9781                 encoder->base.crtc = &encoder->new_crtc->base;
9782         }
9783
9784         for_each_intel_crtc(dev, crtc) {
9785                 crtc->base.enabled = crtc->new_enabled;
9786         }
9787 }
9788
9789 static void
9790 connected_sink_compute_bpp(struct intel_connector *connector,
9791                            struct intel_crtc_config *pipe_config)
9792 {
9793         int bpp = pipe_config->pipe_bpp;
9794
9795         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
9796                 connector->base.base.id,
9797                 connector->base.name);
9798
9799         /* Don't use an invalid EDID bpc value */
9800         if (connector->base.display_info.bpc &&
9801             connector->base.display_info.bpc * 3 < bpp) {
9802                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
9803                               bpp, connector->base.display_info.bpc*3);
9804                 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
9805         }
9806
9807         /* Clamp bpp to 8 on screens without EDID 1.4 */
9808         if (connector->base.display_info.bpc == 0 && bpp > 24) {
9809                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
9810                               bpp);
9811                 pipe_config->pipe_bpp = 24;
9812         }
9813 }
9814
9815 static int
9816 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
9817                           struct drm_framebuffer *fb,
9818                           struct intel_crtc_config *pipe_config)
9819 {
9820         struct drm_device *dev = crtc->base.dev;
9821         struct intel_connector *connector;
9822         int bpp;
9823
9824         switch (fb->pixel_format) {
9825         case DRM_FORMAT_C8:
9826                 bpp = 8*3; /* since we go through a colormap */
9827                 break;
9828         case DRM_FORMAT_XRGB1555:
9829         case DRM_FORMAT_ARGB1555:
9830                 /* checked in intel_framebuffer_init already */
9831                 if (WARN_ON(INTEL_INFO(dev)->gen > 3))
9832                         return -EINVAL;
9833         case DRM_FORMAT_RGB565:
9834                 bpp = 6*3; /* min is 18bpp */
9835                 break;
9836         case DRM_FORMAT_XBGR8888:
9837         case DRM_FORMAT_ABGR8888:
9838                 /* checked in intel_framebuffer_init already */
9839                 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9840                         return -EINVAL;
9841         case DRM_FORMAT_XRGB8888:
9842         case DRM_FORMAT_ARGB8888:
9843                 bpp = 8*3;
9844                 break;
9845         case DRM_FORMAT_XRGB2101010:
9846         case DRM_FORMAT_ARGB2101010:
9847         case DRM_FORMAT_XBGR2101010:
9848         case DRM_FORMAT_ABGR2101010:
9849                 /* checked in intel_framebuffer_init already */
9850                 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
9851                         return -EINVAL;
9852                 bpp = 10*3;
9853                 break;
9854         /* TODO: gen4+ supports 16 bpc floating point, too. */
9855         default:
9856                 DRM_DEBUG_KMS("unsupported depth\n");
9857                 return -EINVAL;
9858         }
9859
9860         pipe_config->pipe_bpp = bpp;
9861
9862         /* Clamp display bpp to EDID value */
9863         list_for_each_entry(connector, &dev->mode_config.connector_list,
9864                             base.head) {
9865                 if (!connector->new_encoder ||
9866                     connector->new_encoder->new_crtc != crtc)
9867                         continue;
9868
9869                 connected_sink_compute_bpp(connector, pipe_config);
9870         }
9871
9872         return bpp;
9873 }
9874
9875 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
9876 {
9877         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
9878                         "type: 0x%x flags: 0x%x\n",
9879                 mode->crtc_clock,
9880                 mode->crtc_hdisplay, mode->crtc_hsync_start,
9881                 mode->crtc_hsync_end, mode->crtc_htotal,
9882                 mode->crtc_vdisplay, mode->crtc_vsync_start,
9883                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
9884 }
9885
9886 static void intel_dump_pipe_config(struct intel_crtc *crtc,
9887                                    struct intel_crtc_config *pipe_config,
9888                                    const char *context)
9889 {
9890         DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
9891                       context, pipe_name(crtc->pipe));
9892
9893         DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
9894         DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
9895                       pipe_config->pipe_bpp, pipe_config->dither);
9896         DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9897                       pipe_config->has_pch_encoder,
9898                       pipe_config->fdi_lanes,
9899                       pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
9900                       pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
9901                       pipe_config->fdi_m_n.tu);
9902         DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
9903                       pipe_config->has_dp_encoder,
9904                       pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
9905                       pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
9906                       pipe_config->dp_m_n.tu);
9907         DRM_DEBUG_KMS("requested mode:\n");
9908         drm_mode_debug_printmodeline(&pipe_config->requested_mode);
9909         DRM_DEBUG_KMS("adjusted mode:\n");
9910         drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
9911         intel_dump_crtc_timings(&pipe_config->adjusted_mode);
9912         DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
9913         DRM_DEBUG_KMS("pipe src size: %dx%d\n",
9914                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
9915         DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
9916                       pipe_config->gmch_pfit.control,
9917                       pipe_config->gmch_pfit.pgm_ratios,
9918                       pipe_config->gmch_pfit.lvds_border_bits);
9919         DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
9920                       pipe_config->pch_pfit.pos,
9921                       pipe_config->pch_pfit.size,
9922                       pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
9923         DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
9924         DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
9925 }
9926
9927 static bool encoders_cloneable(const struct intel_encoder *a,
9928                                const struct intel_encoder *b)
9929 {
9930         /* masks could be asymmetric, so check both ways */
9931         return a == b || (a->cloneable & (1 << b->type) &&
9932                           b->cloneable & (1 << a->type));
9933 }
9934
9935 static bool check_single_encoder_cloning(struct intel_crtc *crtc,
9936                                          struct intel_encoder *encoder)
9937 {
9938         struct drm_device *dev = crtc->base.dev;
9939         struct intel_encoder *source_encoder;
9940
9941         list_for_each_entry(source_encoder,
9942                             &dev->mode_config.encoder_list, base.head) {
9943                 if (source_encoder->new_crtc != crtc)
9944                         continue;
9945
9946                 if (!encoders_cloneable(encoder, source_encoder))
9947                         return false;
9948         }
9949
9950         return true;
9951 }
9952
9953 static bool check_encoder_cloning(struct intel_crtc *crtc)
9954 {
9955         struct drm_device *dev = crtc->base.dev;
9956         struct intel_encoder *encoder;
9957
9958         list_for_each_entry(encoder,
9959                             &dev->mode_config.encoder_list, base.head) {
9960                 if (encoder->new_crtc != crtc)
9961                         continue;
9962
9963                 if (!check_single_encoder_cloning(crtc, encoder))
9964                         return false;
9965         }
9966
9967         return true;
9968 }
9969
9970 static struct intel_crtc_config *
9971 intel_modeset_pipe_config(struct drm_crtc *crtc,
9972                           struct drm_framebuffer *fb,
9973                           struct drm_display_mode *mode)
9974 {
9975         struct drm_device *dev = crtc->dev;
9976         struct intel_encoder *encoder;
9977         struct intel_crtc_config *pipe_config;
9978         int plane_bpp, ret = -EINVAL;
9979         bool retry = true;
9980
9981         if (!check_encoder_cloning(to_intel_crtc(crtc))) {
9982                 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
9983                 return ERR_PTR(-EINVAL);
9984         }
9985
9986         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
9987         if (!pipe_config)
9988                 return ERR_PTR(-ENOMEM);
9989
9990         drm_mode_copy(&pipe_config->adjusted_mode, mode);
9991         drm_mode_copy(&pipe_config->requested_mode, mode);
9992
9993         pipe_config->cpu_transcoder =
9994                 (enum transcoder) to_intel_crtc(crtc)->pipe;
9995         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9996
9997         /*
9998          * Sanitize sync polarity flags based on requested ones. If neither
9999          * positive or negative polarity is requested, treat this as meaning
10000          * negative polarity.
10001          */
10002         if (!(pipe_config->adjusted_mode.flags &
10003               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
10004                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
10005
10006         if (!(pipe_config->adjusted_mode.flags &
10007               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
10008                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
10009
10010         /* Compute a starting value for pipe_config->pipe_bpp taking the source
10011          * plane pixel format and any sink constraints into account. Returns the
10012          * source plane bpp so that dithering can be selected on mismatches
10013          * after encoders and crtc also have had their say. */
10014         plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
10015                                               fb, pipe_config);
10016         if (plane_bpp < 0)
10017                 goto fail;
10018
10019         /*
10020          * Determine the real pipe dimensions. Note that stereo modes can
10021          * increase the actual pipe size due to the frame doubling and
10022          * insertion of additional space for blanks between the frame. This
10023          * is stored in the crtc timings. We use the requested mode to do this
10024          * computation to clearly distinguish it from the adjusted mode, which
10025          * can be changed by the connectors in the below retry loop.
10026          */
10027         drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
10028         pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
10029         pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
10030
10031 encoder_retry:
10032         /* Ensure the port clock defaults are reset when retrying. */
10033         pipe_config->port_clock = 0;
10034         pipe_config->pixel_multiplier = 1;
10035
10036         /* Fill in default crtc timings, allow encoders to overwrite them. */
10037         drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
10038
10039         /* Pass our mode to the connectors and the CRTC to give them a chance to
10040          * adjust it according to limitations or connector properties, and also
10041          * a chance to reject the mode entirely.
10042          */
10043         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10044                             base.head) {
10045
10046                 if (&encoder->new_crtc->base != crtc)
10047                         continue;
10048
10049                 if (!(encoder->compute_config(encoder, pipe_config))) {
10050                         DRM_DEBUG_KMS("Encoder config failure\n");
10051                         goto fail;
10052                 }
10053         }
10054
10055         /* Set default port clock if not overwritten by the encoder. Needs to be
10056          * done afterwards in case the encoder adjusts the mode. */
10057         if (!pipe_config->port_clock)
10058                 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
10059                         * pipe_config->pixel_multiplier;
10060
10061         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
10062         if (ret < 0) {
10063                 DRM_DEBUG_KMS("CRTC fixup failed\n");
10064                 goto fail;
10065         }
10066
10067         if (ret == RETRY) {
10068                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
10069                         ret = -EINVAL;
10070                         goto fail;
10071                 }
10072
10073                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
10074                 retry = false;
10075                 goto encoder_retry;
10076         }
10077
10078         pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
10079         DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
10080                       plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
10081
10082         return pipe_config;
10083 fail:
10084         kfree(pipe_config);
10085         return ERR_PTR(ret);
10086 }
10087
10088 /* Computes which crtcs are affected and sets the relevant bits in the mask. For
10089  * simplicity we use the crtc's pipe number (because it's easier to obtain). */
10090 static void
10091 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10092                              unsigned *prepare_pipes, unsigned *disable_pipes)
10093 {
10094         struct intel_crtc *intel_crtc;
10095         struct drm_device *dev = crtc->dev;
10096         struct intel_encoder *encoder;
10097         struct intel_connector *connector;
10098         struct drm_crtc *tmp_crtc;
10099
10100         *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
10101
10102         /* Check which crtcs have changed outputs connected to them, these need
10103          * to be part of the prepare_pipes mask. We don't (yet) support global
10104          * modeset across multiple crtcs, so modeset_pipes will only have one
10105          * bit set at most. */
10106         list_for_each_entry(connector, &dev->mode_config.connector_list,
10107                             base.head) {
10108                 if (connector->base.encoder == &connector->new_encoder->base)
10109                         continue;
10110
10111                 if (connector->base.encoder) {
10112                         tmp_crtc = connector->base.encoder->crtc;
10113
10114                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10115                 }
10116
10117                 if (connector->new_encoder)
10118                         *prepare_pipes |=
10119                                 1 << connector->new_encoder->new_crtc->pipe;
10120         }
10121
10122         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10123                             base.head) {
10124                 if (encoder->base.crtc == &encoder->new_crtc->base)
10125                         continue;
10126
10127                 if (encoder->base.crtc) {
10128                         tmp_crtc = encoder->base.crtc;
10129
10130                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
10131                 }
10132
10133                 if (encoder->new_crtc)
10134                         *prepare_pipes |= 1 << encoder->new_crtc->pipe;
10135         }
10136
10137         /* Check for pipes that will be enabled/disabled ... */
10138         for_each_intel_crtc(dev, intel_crtc) {
10139                 if (intel_crtc->base.enabled == intel_crtc->new_enabled)
10140                         continue;
10141
10142                 if (!intel_crtc->new_enabled)
10143                         *disable_pipes |= 1 << intel_crtc->pipe;
10144                 else
10145                         *prepare_pipes |= 1 << intel_crtc->pipe;
10146         }
10147
10148
10149         /* set_mode is also used to update properties on life display pipes. */
10150         intel_crtc = to_intel_crtc(crtc);
10151         if (intel_crtc->new_enabled)
10152                 *prepare_pipes |= 1 << intel_crtc->pipe;
10153
10154         /*
10155          * For simplicity do a full modeset on any pipe where the output routing
10156          * changed. We could be more clever, but that would require us to be
10157          * more careful with calling the relevant encoder->mode_set functions.
10158          */
10159         if (*prepare_pipes)
10160                 *modeset_pipes = *prepare_pipes;
10161
10162         /* ... and mask these out. */
10163         *modeset_pipes &= ~(*disable_pipes);
10164         *prepare_pipes &= ~(*disable_pipes);
10165
10166         /*
10167          * HACK: We don't (yet) fully support global modesets. intel_set_config
10168          * obies this rule, but the modeset restore mode of
10169          * intel_modeset_setup_hw_state does not.
10170          */
10171         *modeset_pipes &= 1 << intel_crtc->pipe;
10172         *prepare_pipes &= 1 << intel_crtc->pipe;
10173
10174         DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
10175                       *modeset_pipes, *prepare_pipes, *disable_pipes);
10176 }
10177
10178 static bool intel_crtc_in_use(struct drm_crtc *crtc)
10179 {
10180         struct drm_encoder *encoder;
10181         struct drm_device *dev = crtc->dev;
10182
10183         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
10184                 if (encoder->crtc == crtc)
10185                         return true;
10186
10187         return false;
10188 }
10189
10190 static void
10191 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10192 {
10193         struct intel_encoder *intel_encoder;
10194         struct intel_crtc *intel_crtc;
10195         struct drm_connector *connector;
10196
10197         list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
10198                             base.head) {
10199                 if (!intel_encoder->base.crtc)
10200                         continue;
10201
10202                 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
10203
10204                 if (prepare_pipes & (1 << intel_crtc->pipe))
10205                         intel_encoder->connectors_active = false;
10206         }
10207
10208         intel_modeset_commit_output_state(dev);
10209
10210         /* Double check state. */
10211         for_each_intel_crtc(dev, intel_crtc) {
10212                 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
10213                 WARN_ON(intel_crtc->new_config &&
10214                         intel_crtc->new_config != &intel_crtc->config);
10215                 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
10216         }
10217
10218         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10219                 if (!connector->encoder || !connector->encoder->crtc)
10220                         continue;
10221
10222                 intel_crtc = to_intel_crtc(connector->encoder->crtc);
10223
10224                 if (prepare_pipes & (1 << intel_crtc->pipe)) {
10225                         struct drm_property *dpms_property =
10226                                 dev->mode_config.dpms_property;
10227
10228                         connector->dpms = DRM_MODE_DPMS_ON;
10229                         drm_object_property_set_value(&connector->base,
10230                                                          dpms_property,
10231                                                          DRM_MODE_DPMS_ON);
10232
10233                         intel_encoder = to_intel_encoder(connector->encoder);
10234                         intel_encoder->connectors_active = true;
10235                 }
10236         }
10237
10238 }
10239
10240 static bool intel_fuzzy_clock_check(int clock1, int clock2)
10241 {
10242         int diff;
10243
10244         if (clock1 == clock2)
10245                 return true;
10246
10247         if (!clock1 || !clock2)
10248                 return false;
10249
10250         diff = abs(clock1 - clock2);
10251
10252         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
10253                 return true;
10254
10255         return false;
10256 }
10257
10258 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
10259         list_for_each_entry((intel_crtc), \
10260                             &(dev)->mode_config.crtc_list, \
10261                             base.head) \
10262                 if (mask & (1 <<(intel_crtc)->pipe))
10263
10264 static bool
10265 intel_pipe_config_compare(struct drm_device *dev,
10266                           struct intel_crtc_config *current_config,
10267                           struct intel_crtc_config *pipe_config)
10268 {
10269 #define PIPE_CONF_CHECK_X(name) \
10270         if (current_config->name != pipe_config->name) { \
10271                 DRM_ERROR("mismatch in " #name " " \
10272                           "(expected 0x%08x, found 0x%08x)\n", \
10273                           current_config->name, \
10274                           pipe_config->name); \
10275                 return false; \
10276         }
10277
10278 #define PIPE_CONF_CHECK_I(name) \
10279         if (current_config->name != pipe_config->name) { \
10280                 DRM_ERROR("mismatch in " #name " " \
10281                           "(expected %i, found %i)\n", \
10282                           current_config->name, \
10283                           pipe_config->name); \
10284                 return false; \
10285         }
10286
10287 #define PIPE_CONF_CHECK_FLAGS(name, mask)       \
10288         if ((current_config->name ^ pipe_config->name) & (mask)) { \
10289                 DRM_ERROR("mismatch in " #name "(" #mask ") "      \
10290                           "(expected %i, found %i)\n", \
10291                           current_config->name & (mask), \
10292                           pipe_config->name & (mask)); \
10293                 return false; \
10294         }
10295
10296 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
10297         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
10298                 DRM_ERROR("mismatch in " #name " " \
10299                           "(expected %i, found %i)\n", \
10300                           current_config->name, \
10301                           pipe_config->name); \
10302                 return false; \
10303         }
10304
10305 #define PIPE_CONF_QUIRK(quirk)  \
10306         ((current_config->quirks | pipe_config->quirks) & (quirk))
10307
10308         PIPE_CONF_CHECK_I(cpu_transcoder);
10309
10310         PIPE_CONF_CHECK_I(has_pch_encoder);
10311         PIPE_CONF_CHECK_I(fdi_lanes);
10312         PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
10313         PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
10314         PIPE_CONF_CHECK_I(fdi_m_n.link_m);
10315         PIPE_CONF_CHECK_I(fdi_m_n.link_n);
10316         PIPE_CONF_CHECK_I(fdi_m_n.tu);
10317
10318         PIPE_CONF_CHECK_I(has_dp_encoder);
10319         PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10320         PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10321         PIPE_CONF_CHECK_I(dp_m_n.link_m);
10322         PIPE_CONF_CHECK_I(dp_m_n.link_n);
10323         PIPE_CONF_CHECK_I(dp_m_n.tu);
10324
10325         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10326         PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
10327         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10328         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
10329         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
10330         PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
10331
10332         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
10333         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
10334         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
10335         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
10336         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
10337         PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
10338
10339         PIPE_CONF_CHECK_I(pixel_multiplier);
10340         PIPE_CONF_CHECK_I(has_hdmi_sink);
10341         if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10342             IS_VALLEYVIEW(dev))
10343                 PIPE_CONF_CHECK_I(limited_color_range);
10344
10345         PIPE_CONF_CHECK_I(has_audio);
10346
10347         PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10348                               DRM_MODE_FLAG_INTERLACE);
10349
10350         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
10351                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10352                                       DRM_MODE_FLAG_PHSYNC);
10353                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10354                                       DRM_MODE_FLAG_NHSYNC);
10355                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10356                                       DRM_MODE_FLAG_PVSYNC);
10357                 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
10358                                       DRM_MODE_FLAG_NVSYNC);
10359         }
10360
10361         PIPE_CONF_CHECK_I(pipe_src_w);
10362         PIPE_CONF_CHECK_I(pipe_src_h);
10363
10364         /*
10365          * FIXME: BIOS likes to set up a cloned config with lvds+external
10366          * screen. Since we don't yet re-compute the pipe config when moving
10367          * just the lvds port away to another pipe the sw tracking won't match.
10368          *
10369          * Proper atomic modesets with recomputed global state will fix this.
10370          * Until then just don't check gmch state for inherited modes.
10371          */
10372         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
10373                 PIPE_CONF_CHECK_I(gmch_pfit.control);
10374                 /* pfit ratios are autocomputed by the hw on gen4+ */
10375                 if (INTEL_INFO(dev)->gen < 4)
10376                         PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
10377                 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
10378         }
10379
10380         PIPE_CONF_CHECK_I(pch_pfit.enabled);
10381         if (current_config->pch_pfit.enabled) {
10382                 PIPE_CONF_CHECK_I(pch_pfit.pos);
10383                 PIPE_CONF_CHECK_I(pch_pfit.size);
10384         }
10385
10386         /* BDW+ don't expose a synchronous way to read the state */
10387         if (IS_HASWELL(dev))
10388                 PIPE_CONF_CHECK_I(ips_enabled);
10389
10390         PIPE_CONF_CHECK_I(double_wide);
10391
10392         PIPE_CONF_CHECK_I(shared_dpll);
10393         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
10394         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
10395         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10396         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10397
10398         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10399                 PIPE_CONF_CHECK_I(pipe_bpp);
10400
10401         PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
10402         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
10403
10404 #undef PIPE_CONF_CHECK_X
10405 #undef PIPE_CONF_CHECK_I
10406 #undef PIPE_CONF_CHECK_FLAGS
10407 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
10408 #undef PIPE_CONF_QUIRK
10409
10410         return true;
10411 }
10412
10413 static void
10414 check_connector_state(struct drm_device *dev)
10415 {
10416         struct intel_connector *connector;
10417
10418         list_for_each_entry(connector, &dev->mode_config.connector_list,
10419                             base.head) {
10420                 /* This also checks the encoder/connector hw state with the
10421                  * ->get_hw_state callbacks. */
10422                 intel_connector_check_state(connector);
10423
10424                 WARN(&connector->new_encoder->base != connector->base.encoder,
10425                      "connector's staged encoder doesn't match current encoder\n");
10426         }
10427 }
10428
10429 static void
10430 check_encoder_state(struct drm_device *dev)
10431 {
10432         struct intel_encoder *encoder;
10433         struct intel_connector *connector;
10434
10435         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10436                             base.head) {
10437                 bool enabled = false;
10438                 bool active = false;
10439                 enum pipe pipe, tracked_pipe;
10440
10441                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
10442                               encoder->base.base.id,
10443                               encoder->base.name);
10444
10445                 WARN(&encoder->new_crtc->base != encoder->base.crtc,
10446                      "encoder's stage crtc doesn't match current crtc\n");
10447                 WARN(encoder->connectors_active && !encoder->base.crtc,
10448                      "encoder's active_connectors set, but no crtc\n");
10449
10450                 list_for_each_entry(connector, &dev->mode_config.connector_list,
10451                                     base.head) {
10452                         if (connector->base.encoder != &encoder->base)
10453                                 continue;
10454                         enabled = true;
10455                         if (connector->base.dpms != DRM_MODE_DPMS_OFF)
10456                                 active = true;
10457                 }
10458                 WARN(!!encoder->base.crtc != enabled,
10459                      "encoder's enabled state mismatch "
10460                      "(expected %i, found %i)\n",
10461                      !!encoder->base.crtc, enabled);
10462                 WARN(active && !encoder->base.crtc,
10463                      "active encoder with no crtc\n");
10464
10465                 WARN(encoder->connectors_active != active,
10466                      "encoder's computed active state doesn't match tracked active state "
10467                      "(expected %i, found %i)\n", active, encoder->connectors_active);
10468
10469                 active = encoder->get_hw_state(encoder, &pipe);
10470                 WARN(active != encoder->connectors_active,
10471                      "encoder's hw state doesn't match sw tracking "
10472                      "(expected %i, found %i)\n",
10473                      encoder->connectors_active, active);
10474
10475                 if (!encoder->base.crtc)
10476                         continue;
10477
10478                 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
10479                 WARN(active && pipe != tracked_pipe,
10480                      "active encoder's pipe doesn't match"
10481                      "(expected %i, found %i)\n",
10482                      tracked_pipe, pipe);
10483
10484         }
10485 }
10486
10487 static void
10488 check_crtc_state(struct drm_device *dev)
10489 {
10490         struct drm_i915_private *dev_priv = dev->dev_private;
10491         struct intel_crtc *crtc;
10492         struct intel_encoder *encoder;
10493         struct intel_crtc_config pipe_config;
10494
10495         for_each_intel_crtc(dev, crtc) {
10496                 bool enabled = false;
10497                 bool active = false;
10498
10499                 memset(&pipe_config, 0, sizeof(pipe_config));
10500
10501                 DRM_DEBUG_KMS("[CRTC:%d]\n",
10502                               crtc->base.base.id);
10503
10504                 WARN(crtc->active && !crtc->base.enabled,
10505                      "active crtc, but not enabled in sw tracking\n");
10506
10507                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10508                                     base.head) {
10509                         if (encoder->base.crtc != &crtc->base)
10510                                 continue;
10511                         enabled = true;
10512                         if (encoder->connectors_active)
10513                                 active = true;
10514                 }
10515
10516                 WARN(active != crtc->active,
10517                      "crtc's computed active state doesn't match tracked active state "
10518                      "(expected %i, found %i)\n", active, crtc->active);
10519                 WARN(enabled != crtc->base.enabled,
10520                      "crtc's computed enabled state doesn't match tracked enabled state "
10521                      "(expected %i, found %i)\n", enabled, crtc->base.enabled);
10522
10523                 active = dev_priv->display.get_pipe_config(crtc,
10524                                                            &pipe_config);
10525
10526                 /* hw state is inconsistent with the pipe A quirk */
10527                 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
10528                         active = crtc->active;
10529
10530                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10531                                     base.head) {
10532                         enum pipe pipe;
10533                         if (encoder->base.crtc != &crtc->base)
10534                                 continue;
10535                         if (encoder->get_hw_state(encoder, &pipe))
10536                                 encoder->get_config(encoder, &pipe_config);
10537                 }
10538
10539                 WARN(crtc->active != active,
10540                      "crtc active state doesn't match with hw state "
10541                      "(expected %i, found %i)\n", crtc->active, active);
10542
10543                 if (active &&
10544                     !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
10545                         WARN(1, "pipe state doesn't match!\n");
10546                         intel_dump_pipe_config(crtc, &pipe_config,
10547                                                "[hw state]");
10548                         intel_dump_pipe_config(crtc, &crtc->config,
10549                                                "[sw state]");
10550                 }
10551         }
10552 }
10553
10554 static void
10555 check_shared_dpll_state(struct drm_device *dev)
10556 {
10557         struct drm_i915_private *dev_priv = dev->dev_private;
10558         struct intel_crtc *crtc;
10559         struct intel_dpll_hw_state dpll_hw_state;
10560         int i;
10561
10562         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10563                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10564                 int enabled_crtcs = 0, active_crtcs = 0;
10565                 bool active;
10566
10567                 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
10568
10569                 DRM_DEBUG_KMS("%s\n", pll->name);
10570
10571                 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10572
10573                 WARN(pll->active > pll->refcount,
10574                      "more active pll users than references: %i vs %i\n",
10575                      pll->active, pll->refcount);
10576                 WARN(pll->active && !pll->on,
10577                      "pll in active use but not on in sw tracking\n");
10578                 WARN(pll->on && !pll->active,
10579                      "pll in on but not on in use in sw tracking\n");
10580                 WARN(pll->on != active,
10581                      "pll on state mismatch (expected %i, found %i)\n",
10582                      pll->on, active);
10583
10584                 for_each_intel_crtc(dev, crtc) {
10585                         if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
10586                                 enabled_crtcs++;
10587                         if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10588                                 active_crtcs++;
10589                 }
10590                 WARN(pll->active != active_crtcs,
10591                      "pll active crtcs mismatch (expected %i, found %i)\n",
10592                      pll->active, active_crtcs);
10593                 WARN(pll->refcount != enabled_crtcs,
10594                      "pll enabled crtcs mismatch (expected %i, found %i)\n",
10595                      pll->refcount, enabled_crtcs);
10596
10597                 WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
10598                                        sizeof(dpll_hw_state)),
10599                      "pll hw state mismatch\n");
10600         }
10601 }
10602
10603 void
10604 intel_modeset_check_state(struct drm_device *dev)
10605 {
10606         check_connector_state(dev);
10607         check_encoder_state(dev);
10608         check_crtc_state(dev);
10609         check_shared_dpll_state(dev);
10610 }
10611
10612 void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
10613                                      int dotclock)
10614 {
10615         /*
10616          * FDI already provided one idea for the dotclock.
10617          * Yell if the encoder disagrees.
10618          */
10619         WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
10620              "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
10621              pipe_config->adjusted_mode.crtc_clock, dotclock);
10622 }
10623
10624 static void update_scanline_offset(struct intel_crtc *crtc)
10625 {
10626         struct drm_device *dev = crtc->base.dev;
10627
10628         /*
10629          * The scanline counter increments at the leading edge of hsync.
10630          *
10631          * On most platforms it starts counting from vtotal-1 on the
10632          * first active line. That means the scanline counter value is
10633          * always one less than what we would expect. Ie. just after
10634          * start of vblank, which also occurs at start of hsync (on the
10635          * last active line), the scanline counter will read vblank_start-1.
10636          *
10637          * On gen2 the scanline counter starts counting from 1 instead
10638          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
10639          * to keep the value positive), instead of adding one.
10640          *
10641          * On HSW+ the behaviour of the scanline counter depends on the output
10642          * type. For DP ports it behaves like most other platforms, but on HDMI
10643          * there's an extra 1 line difference. So we need to add two instead of
10644          * one to the value.
10645          */
10646         if (IS_GEN2(dev)) {
10647                 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
10648                 int vtotal;
10649
10650                 vtotal = mode->crtc_vtotal;
10651                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
10652                         vtotal /= 2;
10653
10654                 crtc->scanline_offset = vtotal - 1;
10655         } else if (HAS_DDI(dev) &&
10656                    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
10657                 crtc->scanline_offset = 2;
10658         } else
10659                 crtc->scanline_offset = 1;
10660 }
10661
10662 static int __intel_set_mode(struct drm_crtc *crtc,
10663                             struct drm_display_mode *mode,
10664                             int x, int y, struct drm_framebuffer *fb)
10665 {
10666         struct drm_device *dev = crtc->dev;
10667         struct drm_i915_private *dev_priv = dev->dev_private;
10668         struct drm_display_mode *saved_mode;
10669         struct intel_crtc_config *pipe_config = NULL;
10670         struct intel_crtc *intel_crtc;
10671         unsigned disable_pipes, prepare_pipes, modeset_pipes;
10672         int ret = 0;
10673
10674         saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
10675         if (!saved_mode)
10676                 return -ENOMEM;
10677
10678         intel_modeset_affected_pipes(crtc, &modeset_pipes,
10679                                      &prepare_pipes, &disable_pipes);
10680
10681         *saved_mode = crtc->mode;
10682
10683         /* Hack: Because we don't (yet) support global modeset on multiple
10684          * crtcs, we don't keep track of the new mode for more than one crtc.
10685          * Hence simply check whether any bit is set in modeset_pipes in all the
10686          * pieces of code that are not yet converted to deal with mutliple crtcs
10687          * changing their mode at the same time. */
10688         if (modeset_pipes) {
10689                 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
10690                 if (IS_ERR(pipe_config)) {
10691                         ret = PTR_ERR(pipe_config);
10692                         pipe_config = NULL;
10693
10694                         goto out;
10695                 }
10696                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
10697                                        "[modeset]");
10698                 to_intel_crtc(crtc)->new_config = pipe_config;
10699         }
10700
10701         /*
10702          * See if the config requires any additional preparation, e.g.
10703          * to adjust global state with pipes off.  We need to do this
10704          * here so we can get the modeset_pipe updated config for the new
10705          * mode set on this crtc.  For other crtcs we need to use the
10706          * adjusted_mode bits in the crtc directly.
10707          */
10708         if (IS_VALLEYVIEW(dev)) {
10709                 valleyview_modeset_global_pipes(dev, &prepare_pipes);
10710
10711                 /* may have added more to prepare_pipes than we should */
10712                 prepare_pipes &= ~disable_pipes;
10713         }
10714
10715         for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
10716                 intel_crtc_disable(&intel_crtc->base);
10717
10718         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10719                 if (intel_crtc->base.enabled)
10720                         dev_priv->display.crtc_disable(&intel_crtc->base);
10721         }
10722
10723         /* crtc->mode is already used by the ->mode_set callbacks, hence we need
10724          * to set it here already despite that we pass it down the callchain.
10725          */
10726         if (modeset_pipes) {
10727                 crtc->mode = *mode;
10728                 /* mode_set/enable/disable functions rely on a correct pipe
10729                  * config. */
10730                 to_intel_crtc(crtc)->config = *pipe_config;
10731                 to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
10732
10733                 /*
10734                  * Calculate and store various constants which
10735                  * are later needed by vblank and swap-completion
10736                  * timestamping. They are derived from true hwmode.
10737                  */
10738                 drm_calc_timestamping_constants(crtc,
10739                                                 &pipe_config->adjusted_mode);
10740         }
10741
10742         /* Only after disabling all output pipelines that will be changed can we
10743          * update the the output configuration. */
10744         intel_modeset_update_state(dev, prepare_pipes);
10745
10746         if (dev_priv->display.modeset_global_resources)
10747                 dev_priv->display.modeset_global_resources(dev);
10748
10749         /* Set up the DPLL and any encoders state that needs to adjust or depend
10750          * on the DPLL.
10751          */
10752         for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
10753                 struct drm_framebuffer *old_fb;
10754                 struct drm_i915_gem_object *old_obj = NULL;
10755                 struct drm_i915_gem_object *obj =
10756                         to_intel_framebuffer(fb)->obj;
10757
10758                 mutex_lock(&dev->struct_mutex);
10759                 ret = intel_pin_and_fence_fb_obj(dev,
10760                                                  obj,
10761                                                  NULL);
10762                 if (ret != 0) {
10763                         DRM_ERROR("pin & fence failed\n");
10764                         mutex_unlock(&dev->struct_mutex);
10765                         goto done;
10766                 }
10767                 old_fb = crtc->primary->fb;
10768                 if (old_fb) {
10769                         old_obj = to_intel_framebuffer(old_fb)->obj;
10770                         intel_unpin_fb_obj(old_obj);
10771                 }
10772                 i915_gem_track_fb(old_obj, obj,
10773                                   INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
10774                 mutex_unlock(&dev->struct_mutex);
10775
10776                 crtc->primary->fb = fb;
10777                 crtc->x = x;
10778                 crtc->y = y;
10779
10780                 ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
10781                                                       x, y, fb);
10782                 if (ret)
10783                         goto done;
10784         }
10785
10786         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
10787         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
10788                 update_scanline_offset(intel_crtc);
10789
10790                 dev_priv->display.crtc_enable(&intel_crtc->base);
10791         }
10792
10793         /* FIXME: add subpixel order */
10794 done:
10795         if (ret && crtc->enabled)
10796                 crtc->mode = *saved_mode;
10797
10798 out:
10799         kfree(pipe_config);
10800         kfree(saved_mode);
10801         return ret;
10802 }
10803
10804 static int intel_set_mode(struct drm_crtc *crtc,
10805                           struct drm_display_mode *mode,
10806                           int x, int y, struct drm_framebuffer *fb)
10807 {
10808         int ret;
10809
10810         ret = __intel_set_mode(crtc, mode, x, y, fb);
10811
10812         if (ret == 0)
10813                 intel_modeset_check_state(crtc->dev);
10814
10815         return ret;
10816 }
10817
10818 void intel_crtc_restore_mode(struct drm_crtc *crtc)
10819 {
10820         intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
10821 }
10822
10823 #undef for_each_intel_crtc_masked
10824
10825 static void intel_set_config_free(struct intel_set_config *config)
10826 {
10827         if (!config)
10828                 return;
10829
10830         kfree(config->save_connector_encoders);
10831         kfree(config->save_encoder_crtcs);
10832         kfree(config->save_crtc_enabled);
10833         kfree(config);
10834 }
10835
10836 static int intel_set_config_save_state(struct drm_device *dev,
10837                                        struct intel_set_config *config)
10838 {
10839         struct drm_crtc *crtc;
10840         struct drm_encoder *encoder;
10841         struct drm_connector *connector;
10842         int count;
10843
10844         config->save_crtc_enabled =
10845                 kcalloc(dev->mode_config.num_crtc,
10846                         sizeof(bool), GFP_KERNEL);
10847         if (!config->save_crtc_enabled)
10848                 return -ENOMEM;
10849
10850         config->save_encoder_crtcs =
10851                 kcalloc(dev->mode_config.num_encoder,
10852                         sizeof(struct drm_crtc *), GFP_KERNEL);
10853         if (!config->save_encoder_crtcs)
10854                 return -ENOMEM;
10855
10856         config->save_connector_encoders =
10857                 kcalloc(dev->mode_config.num_connector,
10858                         sizeof(struct drm_encoder *), GFP_KERNEL);
10859         if (!config->save_connector_encoders)
10860                 return -ENOMEM;
10861
10862         /* Copy data. Note that driver private data is not affected.
10863          * Should anything bad happen only the expected state is
10864          * restored, not the drivers personal bookkeeping.
10865          */
10866         count = 0;
10867         for_each_crtc(dev, crtc) {
10868                 config->save_crtc_enabled[count++] = crtc->enabled;
10869         }
10870
10871         count = 0;
10872         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
10873                 config->save_encoder_crtcs[count++] = encoder->crtc;
10874         }
10875
10876         count = 0;
10877         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
10878                 config->save_connector_encoders[count++] = connector->encoder;
10879         }
10880
10881         return 0;
10882 }
10883
10884 static void intel_set_config_restore_state(struct drm_device *dev,
10885                                            struct intel_set_config *config)
10886 {
10887         struct intel_crtc *crtc;
10888         struct intel_encoder *encoder;
10889         struct intel_connector *connector;
10890         int count;
10891
10892         count = 0;
10893         for_each_intel_crtc(dev, crtc) {
10894                 crtc->new_enabled = config->save_crtc_enabled[count++];
10895
10896                 if (crtc->new_enabled)
10897                         crtc->new_config = &crtc->config;
10898                 else
10899                         crtc->new_config = NULL;
10900         }
10901
10902         count = 0;
10903         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10904                 encoder->new_crtc =
10905                         to_intel_crtc(config->save_encoder_crtcs[count++]);
10906         }
10907
10908         count = 0;
10909         list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
10910                 connector->new_encoder =
10911                         to_intel_encoder(config->save_connector_encoders[count++]);
10912         }
10913 }
10914
10915 static bool
10916 is_crtc_connector_off(struct drm_mode_set *set)
10917 {
10918         int i;
10919
10920         if (set->num_connectors == 0)
10921                 return false;
10922
10923         if (WARN_ON(set->connectors == NULL))
10924                 return false;
10925
10926         for (i = 0; i < set->num_connectors; i++)
10927                 if (set->connectors[i]->encoder &&
10928                     set->connectors[i]->encoder->crtc == set->crtc &&
10929                     set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
10930                         return true;
10931
10932         return false;
10933 }
10934
10935 static void
10936 intel_set_config_compute_mode_changes(struct drm_mode_set *set,
10937                                       struct intel_set_config *config)
10938 {
10939
10940         /* We should be able to check here if the fb has the same properties
10941          * and then just flip_or_move it */
10942         if (is_crtc_connector_off(set)) {
10943                 config->mode_changed = true;
10944         } else if (set->crtc->primary->fb != set->fb) {
10945                 /*
10946                  * If we have no fb, we can only flip as long as the crtc is
10947                  * active, otherwise we need a full mode set.  The crtc may
10948                  * be active if we've only disabled the primary plane, or
10949                  * in fastboot situations.
10950                  */
10951                 if (set->crtc->primary->fb == NULL) {
10952                         struct intel_crtc *intel_crtc =
10953                                 to_intel_crtc(set->crtc);
10954
10955                         if (intel_crtc->active) {
10956                                 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
10957                                 config->fb_changed = true;
10958                         } else {
10959                                 DRM_DEBUG_KMS("inactive crtc, full mode set\n");
10960                                 config->mode_changed = true;
10961                         }
10962                 } else if (set->fb == NULL) {
10963                         config->mode_changed = true;
10964                 } else if (set->fb->pixel_format !=
10965                            set->crtc->primary->fb->pixel_format) {
10966                         config->mode_changed = true;
10967                 } else {
10968                         config->fb_changed = true;
10969                 }
10970         }
10971
10972         if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
10973                 config->fb_changed = true;
10974
10975         if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
10976                 DRM_DEBUG_KMS("modes are different, full mode set\n");
10977                 drm_mode_debug_printmodeline(&set->crtc->mode);
10978                 drm_mode_debug_printmodeline(set->mode);
10979                 config->mode_changed = true;
10980         }
10981
10982         DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
10983                         set->crtc->base.id, config->mode_changed, config->fb_changed);
10984 }
10985
10986 static int
10987 intel_modeset_stage_output_state(struct drm_device *dev,
10988                                  struct drm_mode_set *set,
10989                                  struct intel_set_config *config)
10990 {
10991         struct intel_connector *connector;
10992         struct intel_encoder *encoder;
10993         struct intel_crtc *crtc;
10994         int ro;
10995
10996         /* The upper layers ensure that we either disable a crtc or have a list
10997          * of connectors. For paranoia, double-check this. */
10998         WARN_ON(!set->fb && (set->num_connectors != 0));
10999         WARN_ON(set->fb && (set->num_connectors == 0));
11000
11001         list_for_each_entry(connector, &dev->mode_config.connector_list,
11002                             base.head) {
11003                 /* Otherwise traverse passed in connector list and get encoders
11004                  * for them. */
11005                 for (ro = 0; ro < set->num_connectors; ro++) {
11006                         if (set->connectors[ro] == &connector->base) {
11007                                 connector->new_encoder = connector->encoder;
11008                                 break;
11009                         }
11010                 }
11011
11012                 /* If we disable the crtc, disable all its connectors. Also, if
11013                  * the connector is on the changing crtc but not on the new
11014                  * connector list, disable it. */
11015                 if ((!set->fb || ro == set->num_connectors) &&
11016                     connector->base.encoder &&
11017                     connector->base.encoder->crtc == set->crtc) {
11018                         connector->new_encoder = NULL;
11019
11020                         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
11021                                 connector->base.base.id,
11022                                 connector->base.name);
11023                 }
11024
11025
11026                 if (&connector->new_encoder->base != connector->base.encoder) {
11027                         DRM_DEBUG_KMS("encoder changed, full mode switch\n");
11028                         config->mode_changed = true;
11029                 }
11030         }
11031         /* connector->new_encoder is now updated for all connectors. */
11032
11033         /* Update crtc of enabled connectors. */
11034         list_for_each_entry(connector, &dev->mode_config.connector_list,
11035                             base.head) {
11036                 struct drm_crtc *new_crtc;
11037
11038                 if (!connector->new_encoder)
11039                         continue;
11040
11041                 new_crtc = connector->new_encoder->base.crtc;
11042
11043                 for (ro = 0; ro < set->num_connectors; ro++) {
11044                         if (set->connectors[ro] == &connector->base)
11045                                 new_crtc = set->crtc;
11046                 }
11047
11048                 /* Make sure the new CRTC will work with the encoder */
11049                 if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
11050                                          new_crtc)) {
11051                         return -EINVAL;
11052                 }
11053                 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
11054
11055                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
11056                         connector->base.base.id,
11057                         connector->base.name,
11058                         new_crtc->base.id);
11059         }
11060
11061         /* Check for any encoders that needs to be disabled. */
11062         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11063                             base.head) {
11064                 int num_connectors = 0;
11065                 list_for_each_entry(connector,
11066                                     &dev->mode_config.connector_list,
11067                                     base.head) {
11068                         if (connector->new_encoder == encoder) {
11069                                 WARN_ON(!connector->new_encoder->new_crtc);
11070                                 num_connectors++;
11071                         }
11072                 }
11073
11074                 if (num_connectors == 0)
11075                         encoder->new_crtc = NULL;
11076                 else if (num_connectors > 1)
11077                         return -EINVAL;
11078
11079                 /* Only now check for crtc changes so we don't miss encoders
11080                  * that will be disabled. */
11081                 if (&encoder->new_crtc->base != encoder->base.crtc) {
11082                         DRM_DEBUG_KMS("crtc changed, full mode switch\n");
11083                         config->mode_changed = true;
11084                 }
11085         }
11086         /* Now we've also updated encoder->new_crtc for all encoders. */
11087
11088         for_each_intel_crtc(dev, crtc) {
11089                 crtc->new_enabled = false;
11090
11091                 list_for_each_entry(encoder,
11092                                     &dev->mode_config.encoder_list,
11093                                     base.head) {
11094                         if (encoder->new_crtc == crtc) {
11095                                 crtc->new_enabled = true;
11096                                 break;
11097                         }
11098                 }
11099
11100                 if (crtc->new_enabled != crtc->base.enabled) {
11101                         DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
11102                                       crtc->new_enabled ? "en" : "dis");
11103                         config->mode_changed = true;
11104                 }
11105
11106                 if (crtc->new_enabled)
11107                         crtc->new_config = &crtc->config;
11108                 else
11109                         crtc->new_config = NULL;
11110         }
11111
11112         return 0;
11113 }
11114
11115 static void disable_crtc_nofb(struct intel_crtc *crtc)
11116 {
11117         struct drm_device *dev = crtc->base.dev;
11118         struct intel_encoder *encoder;
11119         struct intel_connector *connector;
11120
11121         DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
11122                       pipe_name(crtc->pipe));
11123
11124         list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
11125                 if (connector->new_encoder &&
11126                     connector->new_encoder->new_crtc == crtc)
11127                         connector->new_encoder = NULL;
11128         }
11129
11130         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11131                 if (encoder->new_crtc == crtc)
11132                         encoder->new_crtc = NULL;
11133         }
11134
11135         crtc->new_enabled = false;
11136         crtc->new_config = NULL;
11137 }
11138
11139 static int intel_crtc_set_config(struct drm_mode_set *set)
11140 {
11141         struct drm_device *dev;
11142         struct drm_mode_set save_set;
11143         struct intel_set_config *config;
11144         int ret;
11145
11146         BUG_ON(!set);
11147         BUG_ON(!set->crtc);
11148         BUG_ON(!set->crtc->helper_private);
11149
11150         /* Enforce sane interface api - has been abused by the fb helper. */
11151         BUG_ON(!set->mode && set->fb);
11152         BUG_ON(set->fb && set->num_connectors == 0);
11153
11154         if (set->fb) {
11155                 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
11156                                 set->crtc->base.id, set->fb->base.id,
11157                                 (int)set->num_connectors, set->x, set->y);
11158         } else {
11159                 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
11160         }
11161
11162         dev = set->crtc->dev;
11163
11164         ret = -ENOMEM;
11165         config = kzalloc(sizeof(*config), GFP_KERNEL);
11166         if (!config)
11167                 goto out_config;
11168
11169         ret = intel_set_config_save_state(dev, config);
11170         if (ret)
11171                 goto out_config;
11172
11173         save_set.crtc = set->crtc;
11174         save_set.mode = &set->crtc->mode;
11175         save_set.x = set->crtc->x;
11176         save_set.y = set->crtc->y;
11177         save_set.fb = set->crtc->primary->fb;
11178
11179         /* Compute whether we need a full modeset, only an fb base update or no
11180          * change at all. In the future we might also check whether only the
11181          * mode changed, e.g. for LVDS where we only change the panel fitter in
11182          * such cases. */
11183         intel_set_config_compute_mode_changes(set, config);
11184
11185         ret = intel_modeset_stage_output_state(dev, set, config);
11186         if (ret)
11187                 goto fail;
11188
11189         if (config->mode_changed) {
11190                 ret = intel_set_mode(set->crtc, set->mode,
11191                                      set->x, set->y, set->fb);
11192         } else if (config->fb_changed) {
11193                 struct drm_i915_private *dev_priv = dev->dev_private;
11194                 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11195
11196                 intel_crtc_wait_for_pending_flips(set->crtc);
11197
11198                 ret = intel_pipe_set_base(set->crtc,
11199                                           set->x, set->y, set->fb);
11200
11201                 /*
11202                  * We need to make sure the primary plane is re-enabled if it
11203                  * has previously been turned off.
11204                  */
11205                 if (!intel_crtc->primary_enabled && ret == 0) {
11206                         WARN_ON(!intel_crtc->active);
11207                         intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11208                                                       intel_crtc->pipe);
11209                 }
11210
11211                 /*
11212                  * In the fastboot case this may be our only check of the
11213                  * state after boot.  It would be better to only do it on
11214                  * the first update, but we don't have a nice way of doing that
11215                  * (and really, set_config isn't used much for high freq page
11216                  * flipping, so increasing its cost here shouldn't be a big
11217                  * deal).
11218                  */
11219                 if (i915.fastboot && ret == 0)
11220                         intel_modeset_check_state(set->crtc->dev);
11221         }
11222
11223         if (ret) {
11224                 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
11225                               set->crtc->base.id, ret);
11226 fail:
11227                 intel_set_config_restore_state(dev, config);
11228
11229                 /*
11230                  * HACK: if the pipe was on, but we didn't have a framebuffer,
11231                  * force the pipe off to avoid oopsing in the modeset code
11232                  * due to fb==NULL. This should only happen during boot since
11233                  * we don't yet reconstruct the FB from the hardware state.
11234                  */
11235                 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
11236                         disable_crtc_nofb(to_intel_crtc(save_set.crtc));
11237
11238                 /* Try to restore the config */
11239                 if (config->mode_changed &&
11240                     intel_set_mode(save_set.crtc, save_set.mode,
11241                                    save_set.x, save_set.y, save_set.fb))
11242                         DRM_ERROR("failed to restore config after modeset failure\n");
11243         }
11244
11245 out_config:
11246         intel_set_config_free(config);
11247         return ret;
11248 }
11249
11250 static const struct drm_crtc_funcs intel_crtc_funcs = {
11251         .gamma_set = intel_crtc_gamma_set,
11252         .set_config = intel_crtc_set_config,
11253         .destroy = intel_crtc_destroy,
11254         .page_flip = intel_crtc_page_flip,
11255 };
11256
11257 static void intel_cpu_pll_init(struct drm_device *dev)
11258 {
11259         if (HAS_DDI(dev))
11260                 intel_ddi_pll_init(dev);
11261 }
11262
11263 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11264                                       struct intel_shared_dpll *pll,
11265                                       struct intel_dpll_hw_state *hw_state)
11266 {
11267         uint32_t val;
11268
11269         val = I915_READ(PCH_DPLL(pll->id));
11270         hw_state->dpll = val;
11271         hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
11272         hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
11273
11274         return val & DPLL_VCO_ENABLE;
11275 }
11276
11277 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11278                                   struct intel_shared_dpll *pll)
11279 {
11280         I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
11281         I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
11282 }
11283
11284 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11285                                 struct intel_shared_dpll *pll)
11286 {
11287         /* PCH refclock must be enabled first */
11288         ibx_assert_pch_refclk_enabled(dev_priv);
11289
11290         I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11291
11292         /* Wait for the clocks to stabilize. */
11293         POSTING_READ(PCH_DPLL(pll->id));
11294         udelay(150);
11295
11296         /* The pixel multiplier can only be updated once the
11297          * DPLL is enabled and the clocks are stable.
11298          *
11299          * So write it again.
11300          */
11301         I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
11302         POSTING_READ(PCH_DPLL(pll->id));
11303         udelay(200);
11304 }
11305
11306 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
11307                                  struct intel_shared_dpll *pll)
11308 {
11309         struct drm_device *dev = dev_priv->dev;
11310         struct intel_crtc *crtc;
11311
11312         /* Make sure no transcoder isn't still depending on us. */
11313         for_each_intel_crtc(dev, crtc) {
11314                 if (intel_crtc_to_shared_dpll(crtc) == pll)
11315                         assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
11316         }
11317
11318         I915_WRITE(PCH_DPLL(pll->id), 0);
11319         POSTING_READ(PCH_DPLL(pll->id));
11320         udelay(200);
11321 }
11322
11323 static char *ibx_pch_dpll_names[] = {
11324         "PCH DPLL A",
11325         "PCH DPLL B",
11326 };
11327
11328 static void ibx_pch_dpll_init(struct drm_device *dev)
11329 {
11330         struct drm_i915_private *dev_priv = dev->dev_private;
11331         int i;
11332
11333         dev_priv->num_shared_dpll = 2;
11334
11335         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11336                 dev_priv->shared_dplls[i].id = i;
11337                 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
11338                 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
11339                 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
11340                 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
11341                 dev_priv->shared_dplls[i].get_hw_state =
11342                         ibx_pch_dpll_get_hw_state;
11343         }
11344 }
11345
11346 static void intel_shared_dpll_init(struct drm_device *dev)
11347 {
11348         struct drm_i915_private *dev_priv = dev->dev_private;
11349
11350         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
11351                 ibx_pch_dpll_init(dev);
11352         else
11353                 dev_priv->num_shared_dpll = 0;
11354
11355         BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
11356 }
11357
11358 static int
11359 intel_primary_plane_disable(struct drm_plane *plane)
11360 {
11361         struct drm_device *dev = plane->dev;
11362         struct drm_i915_private *dev_priv = dev->dev_private;
11363         struct intel_plane *intel_plane = to_intel_plane(plane);
11364         struct intel_crtc *intel_crtc;
11365
11366         if (!plane->fb)
11367                 return 0;
11368
11369         BUG_ON(!plane->crtc);
11370
11371         intel_crtc = to_intel_crtc(plane->crtc);
11372
11373         /*
11374          * Even though we checked plane->fb above, it's still possible that
11375          * the primary plane has been implicitly disabled because the crtc
11376          * coordinates given weren't visible, or because we detected
11377          * that it was 100% covered by a sprite plane.  Or, the CRTC may be
11378          * off and we've set a fb, but haven't actually turned on the CRTC yet.
11379          * In either case, we need to unpin the FB and let the fb pointer get
11380          * updated, but otherwise we don't need to touch the hardware.
11381          */
11382         if (!intel_crtc->primary_enabled)
11383                 goto disable_unpin;
11384
11385         intel_crtc_wait_for_pending_flips(plane->crtc);
11386         intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
11387                                        intel_plane->pipe);
11388 disable_unpin:
11389         i915_gem_track_fb(to_intel_framebuffer(plane->fb)->obj, NULL,
11390                           INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11391         intel_unpin_fb_obj(to_intel_framebuffer(plane->fb)->obj);
11392         plane->fb = NULL;
11393
11394         return 0;
11395 }
11396
11397 static int
11398 intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11399                              struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11400                              unsigned int crtc_w, unsigned int crtc_h,
11401                              uint32_t src_x, uint32_t src_y,
11402                              uint32_t src_w, uint32_t src_h)
11403 {
11404         struct drm_device *dev = crtc->dev;
11405         struct drm_i915_private *dev_priv = dev->dev_private;
11406         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11407         struct intel_plane *intel_plane = to_intel_plane(plane);
11408         struct drm_i915_gem_object *obj, *old_obj = NULL;
11409         struct drm_rect dest = {
11410                 /* integer pixels */
11411                 .x1 = crtc_x,
11412                 .y1 = crtc_y,
11413                 .x2 = crtc_x + crtc_w,
11414                 .y2 = crtc_y + crtc_h,
11415         };
11416         struct drm_rect src = {
11417                 /* 16.16 fixed point */
11418                 .x1 = src_x,
11419                 .y1 = src_y,
11420                 .x2 = src_x + src_w,
11421                 .y2 = src_y + src_h,
11422         };
11423         const struct drm_rect clip = {
11424                 /* integer pixels */
11425                 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11426                 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11427         };
11428         bool visible;
11429         int ret;
11430
11431         ret = drm_plane_helper_check_update(plane, crtc, fb,
11432                                             &src, &dest, &clip,
11433                                             DRM_PLANE_HELPER_NO_SCALING,
11434                                             DRM_PLANE_HELPER_NO_SCALING,
11435                                             false, true, &visible);
11436
11437         if (ret)
11438                 return ret;
11439
11440         if (plane->fb)
11441                 old_obj = to_intel_framebuffer(plane->fb)->obj;
11442         obj = to_intel_framebuffer(fb)->obj;
11443
11444         /*
11445          * If the CRTC isn't enabled, we're just pinning the framebuffer,
11446          * updating the fb pointer, and returning without touching the
11447          * hardware.  This allows us to later do a drmModeSetCrtc with fb=-1 to
11448          * turn on the display with all planes setup as desired.
11449          */
11450         if (!crtc->enabled) {
11451                 /*
11452                  * If we already called setplane while the crtc was disabled,
11453                  * we may have an fb pinned; unpin it.
11454                  */
11455                 if (plane->fb)
11456                         intel_unpin_fb_obj(old_obj);
11457
11458                 i915_gem_track_fb(old_obj, obj,
11459                                   INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11460
11461                 /* Pin and return without programming hardware */
11462                 return intel_pin_and_fence_fb_obj(dev, obj, NULL);
11463         }
11464
11465         intel_crtc_wait_for_pending_flips(crtc);
11466
11467         /*
11468          * If clipping results in a non-visible primary plane, we'll disable
11469          * the primary plane.  Note that this is a bit different than what
11470          * happens if userspace explicitly disables the plane by passing fb=0
11471          * because plane->fb still gets set and pinned.
11472          */
11473         if (!visible) {
11474                 /*
11475                  * Try to pin the new fb first so that we can bail out if we
11476                  * fail.
11477                  */
11478                 if (plane->fb != fb) {
11479                         ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11480                         if (ret)
11481                                 return ret;
11482                 }
11483
11484                 i915_gem_track_fb(old_obj, obj,
11485                                   INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11486
11487                 if (intel_crtc->primary_enabled)
11488                         intel_disable_primary_hw_plane(dev_priv,
11489                                                        intel_plane->plane,
11490                                                        intel_plane->pipe);
11491
11492
11493                 if (plane->fb != fb)
11494                         if (plane->fb)
11495                                 intel_unpin_fb_obj(old_obj);
11496
11497                 return 0;
11498         }
11499
11500         ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11501         if (ret)
11502                 return ret;
11503
11504         if (!intel_crtc->primary_enabled)
11505                 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
11506                                               intel_crtc->pipe);
11507
11508         return 0;
11509 }
11510
11511 /* Common destruction function for both primary and cursor planes */
11512 static void intel_plane_destroy(struct drm_plane *plane)
11513 {
11514         struct intel_plane *intel_plane = to_intel_plane(plane);
11515         drm_plane_cleanup(plane);
11516         kfree(intel_plane);
11517 }
11518
11519 static const struct drm_plane_funcs intel_primary_plane_funcs = {
11520         .update_plane = intel_primary_plane_setplane,
11521         .disable_plane = intel_primary_plane_disable,
11522         .destroy = intel_plane_destroy,
11523 };
11524
11525 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11526                                                     int pipe)
11527 {
11528         struct intel_plane *primary;
11529         const uint32_t *intel_primary_formats;
11530         int num_formats;
11531
11532         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
11533         if (primary == NULL)
11534                 return NULL;
11535
11536         primary->can_scale = false;
11537         primary->max_downscale = 1;
11538         primary->pipe = pipe;
11539         primary->plane = pipe;
11540         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11541                 primary->plane = !pipe;
11542
11543         if (INTEL_INFO(dev)->gen <= 3) {
11544                 intel_primary_formats = intel_primary_formats_gen2;
11545                 num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
11546         } else {
11547                 intel_primary_formats = intel_primary_formats_gen4;
11548                 num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
11549         }
11550
11551         drm_universal_plane_init(dev, &primary->base, 0,
11552                                  &intel_primary_plane_funcs,
11553                                  intel_primary_formats, num_formats,
11554                                  DRM_PLANE_TYPE_PRIMARY);
11555         return &primary->base;
11556 }
11557
11558 static int
11559 intel_cursor_plane_disable(struct drm_plane *plane)
11560 {
11561         if (!plane->fb)
11562                 return 0;
11563
11564         BUG_ON(!plane->crtc);
11565
11566         return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
11567 }
11568
11569 static int
11570 intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11571                           struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11572                           unsigned int crtc_w, unsigned int crtc_h,
11573                           uint32_t src_x, uint32_t src_y,
11574                           uint32_t src_w, uint32_t src_h)
11575 {
11576         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11577         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11578         struct drm_i915_gem_object *obj = intel_fb->obj;
11579         struct drm_rect dest = {
11580                 /* integer pixels */
11581                 .x1 = crtc_x,
11582                 .y1 = crtc_y,
11583                 .x2 = crtc_x + crtc_w,
11584                 .y2 = crtc_y + crtc_h,
11585         };
11586         struct drm_rect src = {
11587                 /* 16.16 fixed point */
11588                 .x1 = src_x,
11589                 .y1 = src_y,
11590                 .x2 = src_x + src_w,
11591                 .y2 = src_y + src_h,
11592         };
11593         const struct drm_rect clip = {
11594                 /* integer pixels */
11595                 .x2 = intel_crtc->config.pipe_src_w,
11596                 .y2 = intel_crtc->config.pipe_src_h,
11597         };
11598         bool visible;
11599         int ret;
11600
11601         ret = drm_plane_helper_check_update(plane, crtc, fb,
11602                                             &src, &dest, &clip,
11603                                             DRM_PLANE_HELPER_NO_SCALING,
11604                                             DRM_PLANE_HELPER_NO_SCALING,
11605                                             true, true, &visible);
11606         if (ret)
11607                 return ret;
11608
11609         crtc->cursor_x = crtc_x;
11610         crtc->cursor_y = crtc_y;
11611         if (fb != crtc->cursor->fb) {
11612                 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11613         } else {
11614                 intel_crtc_update_cursor(crtc, visible);
11615                 return 0;
11616         }
11617 }
11618 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
11619         .update_plane = intel_cursor_plane_update,
11620         .disable_plane = intel_cursor_plane_disable,
11621         .destroy = intel_plane_destroy,
11622 };
11623
11624 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11625                                                    int pipe)
11626 {
11627         struct intel_plane *cursor;
11628
11629         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
11630         if (cursor == NULL)
11631                 return NULL;
11632
11633         cursor->can_scale = false;
11634         cursor->max_downscale = 1;
11635         cursor->pipe = pipe;
11636         cursor->plane = pipe;
11637
11638         drm_universal_plane_init(dev, &cursor->base, 0,
11639                                  &intel_cursor_plane_funcs,
11640                                  intel_cursor_formats,
11641                                  ARRAY_SIZE(intel_cursor_formats),
11642                                  DRM_PLANE_TYPE_CURSOR);
11643         return &cursor->base;
11644 }
11645
11646 static void intel_crtc_init(struct drm_device *dev, int pipe)
11647 {
11648         struct drm_i915_private *dev_priv = dev->dev_private;
11649         struct intel_crtc *intel_crtc;
11650         struct drm_plane *primary = NULL;
11651         struct drm_plane *cursor = NULL;
11652         int i, ret;
11653
11654         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
11655         if (intel_crtc == NULL)
11656                 return;
11657
11658         primary = intel_primary_plane_create(dev, pipe);
11659         if (!primary)
11660                 goto fail;
11661
11662         cursor = intel_cursor_plane_create(dev, pipe);
11663         if (!cursor)
11664                 goto fail;
11665
11666         ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
11667                                         cursor, &intel_crtc_funcs);
11668         if (ret)
11669                 goto fail;
11670
11671         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
11672         for (i = 0; i < 256; i++) {
11673                 intel_crtc->lut_r[i] = i;
11674                 intel_crtc->lut_g[i] = i;
11675                 intel_crtc->lut_b[i] = i;
11676         }
11677
11678         /*
11679          * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
11680          * is hooked to pipe B. Hence we want plane A feeding pipe B.
11681          */
11682         intel_crtc->pipe = pipe;
11683         intel_crtc->plane = pipe;
11684         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
11685                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
11686                 intel_crtc->plane = !pipe;
11687         }
11688
11689         intel_crtc->cursor_base = ~0;
11690         intel_crtc->cursor_cntl = ~0;
11691
11692         init_waitqueue_head(&intel_crtc->vbl_wait);
11693
11694         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
11695                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
11696         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
11697         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
11698
11699         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
11700
11701         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
11702         return;
11703
11704 fail:
11705         if (primary)
11706                 drm_plane_cleanup(primary);
11707         if (cursor)
11708                 drm_plane_cleanup(cursor);
11709         kfree(intel_crtc);
11710 }
11711
11712 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
11713 {
11714         struct drm_encoder *encoder = connector->base.encoder;
11715         struct drm_device *dev = connector->base.dev;
11716
11717         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
11718
11719         if (!encoder)
11720                 return INVALID_PIPE;
11721
11722         return to_intel_crtc(encoder->crtc)->pipe;
11723 }
11724
11725 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
11726                                 struct drm_file *file)
11727 {
11728         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11729         struct drm_mode_object *drmmode_obj;
11730         struct intel_crtc *crtc;
11731
11732         if (!drm_core_check_feature(dev, DRIVER_MODESET))
11733                 return -ENODEV;
11734
11735         drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
11736                         DRM_MODE_OBJECT_CRTC);
11737
11738         if (!drmmode_obj) {
11739                 DRM_ERROR("no such CRTC id\n");
11740                 return -ENOENT;
11741         }
11742
11743         crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
11744         pipe_from_crtc_id->pipe = crtc->pipe;
11745
11746         return 0;
11747 }
11748
11749 static int intel_encoder_clones(struct intel_encoder *encoder)
11750 {
11751         struct drm_device *dev = encoder->base.dev;
11752         struct intel_encoder *source_encoder;
11753         int index_mask = 0;
11754         int entry = 0;
11755
11756         list_for_each_entry(source_encoder,
11757                             &dev->mode_config.encoder_list, base.head) {
11758                 if (encoders_cloneable(encoder, source_encoder))
11759                         index_mask |= (1 << entry);
11760
11761                 entry++;
11762         }
11763
11764         return index_mask;
11765 }
11766
11767 static bool has_edp_a(struct drm_device *dev)
11768 {
11769         struct drm_i915_private *dev_priv = dev->dev_private;
11770
11771         if (!IS_MOBILE(dev))
11772                 return false;
11773
11774         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
11775                 return false;
11776
11777         if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
11778                 return false;
11779
11780         return true;
11781 }
11782
11783 const char *intel_output_name(int output)
11784 {
11785         static const char *names[] = {
11786                 [INTEL_OUTPUT_UNUSED] = "Unused",
11787                 [INTEL_OUTPUT_ANALOG] = "Analog",
11788                 [INTEL_OUTPUT_DVO] = "DVO",
11789                 [INTEL_OUTPUT_SDVO] = "SDVO",
11790                 [INTEL_OUTPUT_LVDS] = "LVDS",
11791                 [INTEL_OUTPUT_TVOUT] = "TV",
11792                 [INTEL_OUTPUT_HDMI] = "HDMI",
11793                 [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
11794                 [INTEL_OUTPUT_EDP] = "eDP",
11795                 [INTEL_OUTPUT_DSI] = "DSI",
11796                 [INTEL_OUTPUT_UNKNOWN] = "Unknown",
11797         };
11798
11799         if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
11800                 return "Invalid";
11801
11802         return names[output];
11803 }
11804
11805 static bool intel_crt_present(struct drm_device *dev)
11806 {
11807         struct drm_i915_private *dev_priv = dev->dev_private;
11808
11809         if (IS_ULT(dev))
11810                 return false;
11811
11812         if (IS_CHERRYVIEW(dev))
11813                 return false;
11814
11815         if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
11816                 return false;
11817
11818         return true;
11819 }
11820
11821 static void intel_setup_outputs(struct drm_device *dev)
11822 {
11823         struct drm_i915_private *dev_priv = dev->dev_private;
11824         struct intel_encoder *encoder;
11825         bool dpd_is_edp = false;
11826
11827         intel_lvds_init(dev);
11828
11829         if (intel_crt_present(dev))
11830                 intel_crt_init(dev);
11831
11832         if (HAS_DDI(dev)) {
11833                 int found;
11834
11835                 /* Haswell uses DDI functions to detect digital outputs */
11836                 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
11837                 /* DDI A only supports eDP */
11838                 if (found)
11839                         intel_ddi_init(dev, PORT_A);
11840
11841                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
11842                  * register */
11843                 found = I915_READ(SFUSE_STRAP);
11844
11845                 if (found & SFUSE_STRAP_DDIB_DETECTED)
11846                         intel_ddi_init(dev, PORT_B);
11847                 if (found & SFUSE_STRAP_DDIC_DETECTED)
11848                         intel_ddi_init(dev, PORT_C);
11849                 if (found & SFUSE_STRAP_DDID_DETECTED)
11850                         intel_ddi_init(dev, PORT_D);
11851         } else if (HAS_PCH_SPLIT(dev)) {
11852                 int found;
11853                 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
11854
11855                 if (has_edp_a(dev))
11856                         intel_dp_init(dev, DP_A, PORT_A);
11857
11858                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
11859                         /* PCH SDVOB multiplex with HDMIB */
11860                         found = intel_sdvo_init(dev, PCH_SDVOB, true);
11861                         if (!found)
11862                                 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
11863                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
11864                                 intel_dp_init(dev, PCH_DP_B, PORT_B);
11865                 }
11866
11867                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
11868                         intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
11869
11870                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
11871                         intel_hdmi_init(dev, PCH_HDMID, PORT_D);
11872
11873                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
11874                         intel_dp_init(dev, PCH_DP_C, PORT_C);
11875
11876                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
11877                         intel_dp_init(dev, PCH_DP_D, PORT_D);
11878         } else if (IS_VALLEYVIEW(dev)) {
11879                 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
11880                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
11881                                         PORT_B);
11882                         if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
11883                                 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
11884                 }
11885
11886                 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
11887                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
11888                                         PORT_C);
11889                         if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
11890                                 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
11891                 }
11892
11893                 if (IS_CHERRYVIEW(dev)) {
11894                         if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
11895                                 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
11896                                                 PORT_D);
11897                                 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
11898                                         intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
11899                         }
11900                 }
11901
11902                 intel_dsi_init(dev);
11903         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
11904                 bool found = false;
11905
11906                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11907                         DRM_DEBUG_KMS("probing SDVOB\n");
11908                         found = intel_sdvo_init(dev, GEN3_SDVOB, true);
11909                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
11910                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
11911                                 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
11912                         }
11913
11914                         if (!found && SUPPORTS_INTEGRATED_DP(dev))
11915                                 intel_dp_init(dev, DP_B, PORT_B);
11916                 }
11917
11918                 /* Before G4X SDVOC doesn't have its own detect register */
11919
11920                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
11921                         DRM_DEBUG_KMS("probing SDVOC\n");
11922                         found = intel_sdvo_init(dev, GEN3_SDVOC, false);
11923                 }
11924
11925                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
11926
11927                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
11928                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
11929                                 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
11930                         }
11931                         if (SUPPORTS_INTEGRATED_DP(dev))
11932                                 intel_dp_init(dev, DP_C, PORT_C);
11933                 }
11934
11935                 if (SUPPORTS_INTEGRATED_DP(dev) &&
11936                     (I915_READ(DP_D) & DP_DETECTED))
11937                         intel_dp_init(dev, DP_D, PORT_D);
11938         } else if (IS_GEN2(dev))
11939                 intel_dvo_init(dev);
11940
11941         if (SUPPORTS_TV(dev))
11942                 intel_tv_init(dev);
11943
11944         intel_edp_psr_init(dev);
11945
11946         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
11947                 encoder->base.possible_crtcs = encoder->crtc_mask;
11948                 encoder->base.possible_clones =
11949                         intel_encoder_clones(encoder);
11950         }
11951
11952         intel_init_pch_refclk(dev);
11953
11954         drm_helper_move_panel_connectors_to_head(dev);
11955 }
11956
11957 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11958 {
11959         struct drm_device *dev = fb->dev;
11960         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11961
11962         drm_framebuffer_cleanup(fb);
11963         mutex_lock(&dev->struct_mutex);
11964         WARN_ON(!intel_fb->obj->framebuffer_references--);
11965         drm_gem_object_unreference(&intel_fb->obj->base);
11966         mutex_unlock(&dev->struct_mutex);
11967         kfree(intel_fb);
11968 }
11969
11970 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11971                                                 struct drm_file *file,
11972                                                 unsigned int *handle)
11973 {
11974         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11975         struct drm_i915_gem_object *obj = intel_fb->obj;
11976
11977         return drm_gem_handle_create(file, &obj->base, handle);
11978 }
11979
11980 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11981         .destroy = intel_user_framebuffer_destroy,
11982         .create_handle = intel_user_framebuffer_create_handle,
11983 };
11984
11985 static int intel_framebuffer_init(struct drm_device *dev,
11986                                   struct intel_framebuffer *intel_fb,
11987                                   struct drm_mode_fb_cmd2 *mode_cmd,
11988                                   struct drm_i915_gem_object *obj)
11989 {
11990         int aligned_height;
11991         int pitch_limit;
11992         int ret;
11993
11994         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
11995
11996         if (obj->tiling_mode == I915_TILING_Y) {
11997                 DRM_DEBUG("hardware does not support tiling Y\n");
11998                 return -EINVAL;
11999         }
12000
12001         if (mode_cmd->pitches[0] & 63) {
12002                 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
12003                           mode_cmd->pitches[0]);
12004                 return -EINVAL;
12005         }
12006
12007         if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
12008                 pitch_limit = 32*1024;
12009         } else if (INTEL_INFO(dev)->gen >= 4) {
12010                 if (obj->tiling_mode)
12011                         pitch_limit = 16*1024;
12012                 else
12013                         pitch_limit = 32*1024;
12014         } else if (INTEL_INFO(dev)->gen >= 3) {
12015                 if (obj->tiling_mode)
12016                         pitch_limit = 8*1024;
12017                 else
12018                         pitch_limit = 16*1024;
12019         } else
12020                 /* XXX DSPC is limited to 4k tiled */
12021                 pitch_limit = 8*1024;
12022
12023         if (mode_cmd->pitches[0] > pitch_limit) {
12024                 DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
12025                           obj->tiling_mode ? "tiled" : "linear",
12026                           mode_cmd->pitches[0], pitch_limit);
12027                 return -EINVAL;
12028         }
12029
12030         if (obj->tiling_mode != I915_TILING_NONE &&
12031             mode_cmd->pitches[0] != obj->stride) {
12032                 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
12033                           mode_cmd->pitches[0], obj->stride);
12034                 return -EINVAL;
12035         }
12036
12037         /* Reject formats not supported by any plane early. */
12038         switch (mode_cmd->pixel_format) {
12039         case DRM_FORMAT_C8:
12040         case DRM_FORMAT_RGB565:
12041         case DRM_FORMAT_XRGB8888:
12042         case DRM_FORMAT_ARGB8888:
12043                 break;
12044         case DRM_FORMAT_XRGB1555:
12045         case DRM_FORMAT_ARGB1555:
12046                 if (INTEL_INFO(dev)->gen > 3) {
12047                         DRM_DEBUG("unsupported pixel format: %s\n",
12048                                   drm_get_format_name(mode_cmd->pixel_format));
12049                         return -EINVAL;
12050                 }
12051                 break;
12052         case DRM_FORMAT_XBGR8888:
12053         case DRM_FORMAT_ABGR8888:
12054         case DRM_FORMAT_XRGB2101010:
12055         case DRM_FORMAT_ARGB2101010:
12056         case DRM_FORMAT_XBGR2101010:
12057         case DRM_FORMAT_ABGR2101010:
12058                 if (INTEL_INFO(dev)->gen < 4) {
12059                         DRM_DEBUG("unsupported pixel format: %s\n",
12060                                   drm_get_format_name(mode_cmd->pixel_format));
12061                         return -EINVAL;
12062                 }
12063                 break;
12064         case DRM_FORMAT_YUYV:
12065         case DRM_FORMAT_UYVY:
12066         case DRM_FORMAT_YVYU:
12067         case DRM_FORMAT_VYUY:
12068                 if (INTEL_INFO(dev)->gen < 5) {
12069                         DRM_DEBUG("unsupported pixel format: %s\n",
12070                                   drm_get_format_name(mode_cmd->pixel_format));
12071                         return -EINVAL;
12072                 }
12073                 break;
12074         default:
12075                 DRM_DEBUG("unsupported pixel format: %s\n",
12076                           drm_get_format_name(mode_cmd->pixel_format));
12077                 return -EINVAL;
12078         }
12079
12080         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
12081         if (mode_cmd->offsets[0] != 0)
12082                 return -EINVAL;
12083
12084         aligned_height = intel_align_height(dev, mode_cmd->height,
12085                                             obj->tiling_mode);
12086         /* FIXME drm helper for size checks (especially planar formats)? */
12087         if (obj->base.size < aligned_height * mode_cmd->pitches[0])
12088                 return -EINVAL;
12089
12090         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
12091         intel_fb->obj = obj;
12092         intel_fb->obj->framebuffer_references++;
12093
12094         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
12095         if (ret) {
12096                 DRM_ERROR("framebuffer init failed %d\n", ret);
12097                 return ret;
12098         }
12099
12100         return 0;
12101 }
12102
12103 static struct drm_framebuffer *
12104 intel_user_framebuffer_create(struct drm_device *dev,
12105                               struct drm_file *filp,
12106                               struct drm_mode_fb_cmd2 *mode_cmd)
12107 {
12108         struct drm_i915_gem_object *obj;
12109
12110         obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
12111                                                 mode_cmd->handles[0]));
12112         if (&obj->base == NULL)
12113                 return ERR_PTR(-ENOENT);
12114
12115         return intel_framebuffer_create(dev, mode_cmd, obj);
12116 }
12117
12118 #ifndef CONFIG_DRM_I915_FBDEV
12119 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
12120 {
12121 }
12122 #endif
12123
12124 static const struct drm_mode_config_funcs intel_mode_funcs = {
12125         .fb_create = intel_user_framebuffer_create,
12126         .output_poll_changed = intel_fbdev_output_poll_changed,
12127 };
12128
12129 /* Set up chip specific display functions */
12130 static void intel_init_display(struct drm_device *dev)
12131 {
12132         struct drm_i915_private *dev_priv = dev->dev_private;
12133
12134         if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
12135                 dev_priv->display.find_dpll = g4x_find_best_dpll;
12136         else if (IS_CHERRYVIEW(dev))
12137                 dev_priv->display.find_dpll = chv_find_best_dpll;
12138         else if (IS_VALLEYVIEW(dev))
12139                 dev_priv->display.find_dpll = vlv_find_best_dpll;
12140         else if (IS_PINEVIEW(dev))
12141                 dev_priv->display.find_dpll = pnv_find_best_dpll;
12142         else
12143                 dev_priv->display.find_dpll = i9xx_find_best_dpll;
12144
12145         if (HAS_DDI(dev)) {
12146                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
12147                 dev_priv->display.get_plane_config = ironlake_get_plane_config;
12148                 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
12149                 dev_priv->display.crtc_enable = haswell_crtc_enable;
12150                 dev_priv->display.crtc_disable = haswell_crtc_disable;
12151                 dev_priv->display.off = haswell_crtc_off;
12152                 dev_priv->display.update_primary_plane =
12153                         ironlake_update_primary_plane;
12154         } else if (HAS_PCH_SPLIT(dev)) {
12155                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
12156                 dev_priv->display.get_plane_config = ironlake_get_plane_config;
12157                 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
12158                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
12159                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
12160                 dev_priv->display.off = ironlake_crtc_off;
12161                 dev_priv->display.update_primary_plane =
12162                         ironlake_update_primary_plane;
12163         } else if (IS_VALLEYVIEW(dev)) {
12164                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12165                 dev_priv->display.get_plane_config = i9xx_get_plane_config;
12166                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12167                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
12168                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12169                 dev_priv->display.off = i9xx_crtc_off;
12170                 dev_priv->display.update_primary_plane =
12171                         i9xx_update_primary_plane;
12172         } else {
12173                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12174                 dev_priv->display.get_plane_config = i9xx_get_plane_config;
12175                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
12176                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
12177                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12178                 dev_priv->display.off = i9xx_crtc_off;
12179                 dev_priv->display.update_primary_plane =
12180                         i9xx_update_primary_plane;
12181         }
12182
12183         /* Returns the core display clock speed */
12184         if (IS_VALLEYVIEW(dev))
12185                 dev_priv->display.get_display_clock_speed =
12186                         valleyview_get_display_clock_speed;
12187         else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
12188                 dev_priv->display.get_display_clock_speed =
12189                         i945_get_display_clock_speed;
12190         else if (IS_I915G(dev))
12191                 dev_priv->display.get_display_clock_speed =
12192                         i915_get_display_clock_speed;
12193         else if (IS_I945GM(dev) || IS_845G(dev))
12194                 dev_priv->display.get_display_clock_speed =
12195                         i9xx_misc_get_display_clock_speed;
12196         else if (IS_PINEVIEW(dev))
12197                 dev_priv->display.get_display_clock_speed =
12198                         pnv_get_display_clock_speed;
12199         else if (IS_I915GM(dev))
12200                 dev_priv->display.get_display_clock_speed =
12201                         i915gm_get_display_clock_speed;
12202         else if (IS_I865G(dev))
12203                 dev_priv->display.get_display_clock_speed =
12204                         i865_get_display_clock_speed;
12205         else if (IS_I85X(dev))
12206                 dev_priv->display.get_display_clock_speed =
12207                         i855_get_display_clock_speed;
12208         else /* 852, 830 */
12209                 dev_priv->display.get_display_clock_speed =
12210                         i830_get_display_clock_speed;
12211
12212         if (HAS_PCH_SPLIT(dev)) {
12213                 if (IS_GEN5(dev)) {
12214                         dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12215                         dev_priv->display.write_eld = ironlake_write_eld;
12216                 } else if (IS_GEN6(dev)) {
12217                         dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12218                         dev_priv->display.write_eld = ironlake_write_eld;
12219                         dev_priv->display.modeset_global_resources =
12220                                 snb_modeset_global_resources;
12221                 } else if (IS_IVYBRIDGE(dev)) {
12222                         /* FIXME: detect B0+ stepping and use auto training */
12223                         dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12224                         dev_priv->display.write_eld = ironlake_write_eld;
12225                         dev_priv->display.modeset_global_resources =
12226                                 ivb_modeset_global_resources;
12227                 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
12228                         dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12229                         dev_priv->display.write_eld = haswell_write_eld;
12230                         dev_priv->display.modeset_global_resources =
12231                                 haswell_modeset_global_resources;
12232                 }
12233         } else if (IS_G4X(dev)) {
12234                 dev_priv->display.write_eld = g4x_write_eld;
12235         } else if (IS_VALLEYVIEW(dev)) {
12236                 dev_priv->display.modeset_global_resources =
12237                         valleyview_modeset_global_resources;
12238                 dev_priv->display.write_eld = ironlake_write_eld;
12239         }
12240
12241         /* Default just returns -ENODEV to indicate unsupported */
12242         dev_priv->display.queue_flip = intel_default_queue_flip;
12243
12244         switch (INTEL_INFO(dev)->gen) {
12245         case 2:
12246                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
12247                 break;
12248
12249         case 3:
12250                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
12251                 break;
12252
12253         case 4:
12254         case 5:
12255                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
12256                 break;
12257
12258         case 6:
12259                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
12260                 break;
12261         case 7:
12262         case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
12263                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
12264                 break;
12265         }
12266
12267         intel_panel_init_backlight_funcs(dev);
12268 }
12269
12270 /*
12271  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
12272  * resume, or other times.  This quirk makes sure that's the case for
12273  * affected systems.
12274  */
12275 static void quirk_pipea_force(struct drm_device *dev)
12276 {
12277         struct drm_i915_private *dev_priv = dev->dev_private;
12278
12279         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
12280         DRM_INFO("applying pipe a force quirk\n");
12281 }
12282
12283 /*
12284  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12285  */
12286 static void quirk_ssc_force_disable(struct drm_device *dev)
12287 {
12288         struct drm_i915_private *dev_priv = dev->dev_private;
12289         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
12290         DRM_INFO("applying lvds SSC disable quirk\n");
12291 }
12292
12293 /*
12294  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
12295  * brightness value
12296  */
12297 static void quirk_invert_brightness(struct drm_device *dev)
12298 {
12299         struct drm_i915_private *dev_priv = dev->dev_private;
12300         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
12301         DRM_INFO("applying inverted panel brightness quirk\n");
12302 }
12303
12304 struct intel_quirk {
12305         int device;
12306         int subsystem_vendor;
12307         int subsystem_device;
12308         void (*hook)(struct drm_device *dev);
12309 };
12310
12311 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
12312 struct intel_dmi_quirk {
12313         void (*hook)(struct drm_device *dev);
12314         const struct dmi_system_id (*dmi_id_list)[];
12315 };
12316
12317 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
12318 {
12319         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
12320         return 1;
12321 }
12322
12323 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
12324         {
12325                 .dmi_id_list = &(const struct dmi_system_id[]) {
12326                         {
12327                                 .callback = intel_dmi_reverse_brightness,
12328                                 .ident = "NCR Corporation",
12329                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
12330                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
12331                                 },
12332                         },
12333                         { }  /* terminating entry */
12334                 },
12335                 .hook = quirk_invert_brightness,
12336         },
12337 };
12338
12339 static struct intel_quirk intel_quirks[] = {
12340         /* HP Mini needs pipe A force quirk (LP: #322104) */
12341         { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
12342
12343         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
12344         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
12345
12346         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12347         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
12348
12349         /* Lenovo U160 cannot use SSC on LVDS */
12350         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
12351
12352         /* Sony Vaio Y cannot use SSC on LVDS */
12353         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
12354
12355         /* Acer Aspire 5734Z must invert backlight brightness */
12356         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
12357
12358         /* Acer/eMachines G725 */
12359         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
12360
12361         /* Acer/eMachines e725 */
12362         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
12363
12364         /* Acer/Packard Bell NCL20 */
12365         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
12366
12367         /* Acer Aspire 4736Z */
12368         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
12369
12370         /* Acer Aspire 5336 */
12371         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
12372 };
12373
12374 static void intel_init_quirks(struct drm_device *dev)
12375 {
12376         struct pci_dev *d = dev->pdev;
12377         int i;
12378
12379         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
12380                 struct intel_quirk *q = &intel_quirks[i];
12381
12382                 if (d->device == q->device &&
12383                     (d->subsystem_vendor == q->subsystem_vendor ||
12384                      q->subsystem_vendor == PCI_ANY_ID) &&
12385                     (d->subsystem_device == q->subsystem_device ||
12386                      q->subsystem_device == PCI_ANY_ID))
12387                         q->hook(dev);
12388         }
12389         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
12390                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
12391                         intel_dmi_quirks[i].hook(dev);
12392         }
12393 }
12394
12395 /* Disable the VGA plane that we never use */
12396 static void i915_disable_vga(struct drm_device *dev)
12397 {
12398         struct drm_i915_private *dev_priv = dev->dev_private;
12399         u8 sr1;
12400         u32 vga_reg = i915_vgacntrl_reg(dev);
12401
12402         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
12403         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
12404         outb(SR01, VGA_SR_INDEX);
12405         sr1 = inb(VGA_SR_DATA);
12406         outb(sr1 | 1<<5, VGA_SR_DATA);
12407         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
12408         udelay(300);
12409
12410         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
12411         POSTING_READ(vga_reg);
12412 }
12413
12414 void intel_modeset_init_hw(struct drm_device *dev)
12415 {
12416         intel_prepare_ddi(dev);
12417
12418         if (IS_VALLEYVIEW(dev))
12419                 vlv_update_cdclk(dev);
12420
12421         intel_init_clock_gating(dev);
12422
12423         intel_reset_dpio(dev);
12424
12425         intel_enable_gt_powersave(dev);
12426 }
12427
12428 void intel_modeset_suspend_hw(struct drm_device *dev)
12429 {
12430         intel_suspend_hw(dev);
12431 }
12432
12433 void intel_modeset_init(struct drm_device *dev)
12434 {
12435         struct drm_i915_private *dev_priv = dev->dev_private;
12436         int sprite, ret;
12437         enum pipe pipe;
12438         struct intel_crtc *crtc;
12439
12440         drm_mode_config_init(dev);
12441
12442         dev->mode_config.min_width = 0;
12443         dev->mode_config.min_height = 0;
12444
12445         dev->mode_config.preferred_depth = 24;
12446         dev->mode_config.prefer_shadow = 1;
12447
12448         dev->mode_config.funcs = &intel_mode_funcs;
12449
12450         intel_init_quirks(dev);
12451
12452         intel_init_pm(dev);
12453
12454         if (INTEL_INFO(dev)->num_pipes == 0)
12455                 return;
12456
12457         intel_init_display(dev);
12458
12459         if (IS_GEN2(dev)) {
12460                 dev->mode_config.max_width = 2048;
12461                 dev->mode_config.max_height = 2048;
12462         } else if (IS_GEN3(dev)) {
12463                 dev->mode_config.max_width = 4096;
12464                 dev->mode_config.max_height = 4096;
12465         } else {
12466                 dev->mode_config.max_width = 8192;
12467                 dev->mode_config.max_height = 8192;
12468         }
12469
12470         if (IS_GEN2(dev)) {
12471                 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12472                 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12473         } else {
12474                 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
12475                 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
12476         }
12477
12478         dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
12479
12480         DRM_DEBUG_KMS("%d display pipe%s available.\n",
12481                       INTEL_INFO(dev)->num_pipes,
12482                       INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
12483
12484         for_each_pipe(pipe) {
12485                 intel_crtc_init(dev, pipe);
12486                 for_each_sprite(pipe, sprite) {
12487                         ret = intel_plane_init(dev, pipe, sprite);
12488                         if (ret)
12489                                 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
12490                                               pipe_name(pipe), sprite_name(pipe, sprite), ret);
12491                 }
12492         }
12493
12494         intel_init_dpio(dev);
12495         intel_reset_dpio(dev);
12496
12497         intel_cpu_pll_init(dev);
12498         intel_shared_dpll_init(dev);
12499
12500         /* Just disable it once at startup */
12501         i915_disable_vga(dev);
12502         intel_setup_outputs(dev);
12503
12504         /* Just in case the BIOS is doing something questionable. */
12505         intel_disable_fbc(dev);
12506
12507         drm_modeset_lock_all(dev);
12508         intel_modeset_setup_hw_state(dev, false);
12509         drm_modeset_unlock_all(dev);
12510
12511         for_each_intel_crtc(dev, crtc) {
12512                 if (!crtc->active)
12513                         continue;
12514
12515                 /*
12516                  * Note that reserving the BIOS fb up front prevents us
12517                  * from stuffing other stolen allocations like the ring
12518                  * on top.  This prevents some ugliness at boot time, and
12519                  * can even allow for smooth boot transitions if the BIOS
12520                  * fb is large enough for the active pipe configuration.
12521                  */
12522                 if (dev_priv->display.get_plane_config) {
12523                         dev_priv->display.get_plane_config(crtc,
12524                                                            &crtc->plane_config);
12525                         /*
12526                          * If the fb is shared between multiple heads, we'll
12527                          * just get the first one.
12528                          */
12529                         intel_find_plane_obj(crtc, &crtc->plane_config);
12530                 }
12531         }
12532 }
12533
12534 static void intel_enable_pipe_a(struct drm_device *dev)
12535 {
12536         struct intel_connector *connector;
12537         struct drm_connector *crt = NULL;
12538         struct intel_load_detect_pipe load_detect_temp;
12539         struct drm_modeset_acquire_ctx ctx;
12540
12541         /* We can't just switch on the pipe A, we need to set things up with a
12542          * proper mode and output configuration. As a gross hack, enable pipe A
12543          * by enabling the load detect pipe once. */
12544         list_for_each_entry(connector,
12545                             &dev->mode_config.connector_list,
12546                             base.head) {
12547                 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
12548                         crt = &connector->base;
12549                         break;
12550                 }
12551         }
12552
12553         if (!crt)
12554                 return;
12555
12556         if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
12557                 intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
12558
12559
12560 }
12561
12562 static bool
12563 intel_check_plane_mapping(struct intel_crtc *crtc)
12564 {
12565         struct drm_device *dev = crtc->base.dev;
12566         struct drm_i915_private *dev_priv = dev->dev_private;
12567         u32 reg, val;
12568
12569         if (INTEL_INFO(dev)->num_pipes == 1)
12570                 return true;
12571
12572         reg = DSPCNTR(!crtc->plane);
12573         val = I915_READ(reg);
12574
12575         if ((val & DISPLAY_PLANE_ENABLE) &&
12576             (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
12577                 return false;
12578
12579         return true;
12580 }
12581
12582 static void intel_sanitize_crtc(struct intel_crtc *crtc)
12583 {
12584         struct drm_device *dev = crtc->base.dev;
12585         struct drm_i915_private *dev_priv = dev->dev_private;
12586         u32 reg;
12587
12588         /* Clear any frame start delays used for debugging left by the BIOS */
12589         reg = PIPECONF(crtc->config.cpu_transcoder);
12590         I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
12591
12592         /* restore vblank interrupts to correct state */
12593         if (crtc->active)
12594                 drm_vblank_on(dev, crtc->pipe);
12595         else
12596                 drm_vblank_off(dev, crtc->pipe);
12597
12598         /* We need to sanitize the plane -> pipe mapping first because this will
12599          * disable the crtc (and hence change the state) if it is wrong. Note
12600          * that gen4+ has a fixed plane -> pipe mapping.  */
12601         if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
12602                 struct intel_connector *connector;
12603                 bool plane;
12604
12605                 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
12606                               crtc->base.base.id);
12607
12608                 /* Pipe has the wrong plane attached and the plane is active.
12609                  * Temporarily change the plane mapping and disable everything
12610                  * ...  */
12611                 plane = crtc->plane;
12612                 crtc->plane = !plane;
12613                 dev_priv->display.crtc_disable(&crtc->base);
12614                 crtc->plane = plane;
12615
12616                 /* ... and break all links. */
12617                 list_for_each_entry(connector, &dev->mode_config.connector_list,
12618                                     base.head) {
12619                         if (connector->encoder->base.crtc != &crtc->base)
12620                                 continue;
12621
12622                         connector->base.dpms = DRM_MODE_DPMS_OFF;
12623                         connector->base.encoder = NULL;
12624                 }
12625                 /* multiple connectors may have the same encoder:
12626                  *  handle them and break crtc link separately */
12627                 list_for_each_entry(connector, &dev->mode_config.connector_list,
12628                                     base.head)
12629                         if (connector->encoder->base.crtc == &crtc->base) {
12630                                 connector->encoder->base.crtc = NULL;
12631                                 connector->encoder->connectors_active = false;
12632                         }
12633
12634                 WARN_ON(crtc->active);
12635                 crtc->base.enabled = false;
12636         }
12637
12638         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
12639             crtc->pipe == PIPE_A && !crtc->active) {
12640                 /* BIOS forgot to enable pipe A, this mostly happens after
12641                  * resume. Force-enable the pipe to fix this, the update_dpms
12642                  * call below we restore the pipe to the right state, but leave
12643                  * the required bits on. */
12644                 intel_enable_pipe_a(dev);
12645         }
12646
12647         /* Adjust the state of the output pipe according to whether we
12648          * have active connectors/encoders. */
12649         intel_crtc_update_dpms(&crtc->base);
12650
12651         if (crtc->active != crtc->base.enabled) {
12652                 struct intel_encoder *encoder;
12653
12654                 /* This can happen either due to bugs in the get_hw_state
12655                  * functions or because the pipe is force-enabled due to the
12656                  * pipe A quirk. */
12657                 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
12658                               crtc->base.base.id,
12659                               crtc->base.enabled ? "enabled" : "disabled",
12660                               crtc->active ? "enabled" : "disabled");
12661
12662                 crtc->base.enabled = crtc->active;
12663
12664                 /* Because we only establish the connector -> encoder ->
12665                  * crtc links if something is active, this means the
12666                  * crtc is now deactivated. Break the links. connector
12667                  * -> encoder links are only establish when things are
12668                  *  actually up, hence no need to break them. */
12669                 WARN_ON(crtc->active);
12670
12671                 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
12672                         WARN_ON(encoder->connectors_active);
12673                         encoder->base.crtc = NULL;
12674                 }
12675         }
12676
12677         if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
12678                 /*
12679                  * We start out with underrun reporting disabled to avoid races.
12680                  * For correct bookkeeping mark this on active crtcs.
12681                  *
12682                  * Also on gmch platforms we dont have any hardware bits to
12683                  * disable the underrun reporting. Which means we need to start
12684                  * out with underrun reporting disabled also on inactive pipes,
12685                  * since otherwise we'll complain about the garbage we read when
12686                  * e.g. coming up after runtime pm.
12687                  *
12688                  * No protection against concurrent access is required - at
12689                  * worst a fifo underrun happens which also sets this to false.
12690                  */
12691                 crtc->cpu_fifo_underrun_disabled = true;
12692                 crtc->pch_fifo_underrun_disabled = true;
12693
12694                 update_scanline_offset(crtc);
12695         }
12696 }
12697
12698 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12699 {
12700         struct intel_connector *connector;
12701         struct drm_device *dev = encoder->base.dev;
12702
12703         /* We need to check both for a crtc link (meaning that the
12704          * encoder is active and trying to read from a pipe) and the
12705          * pipe itself being active. */
12706         bool has_active_crtc = encoder->base.crtc &&
12707                 to_intel_crtc(encoder->base.crtc)->active;
12708
12709         if (encoder->connectors_active && !has_active_crtc) {
12710                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12711                               encoder->base.base.id,
12712                               encoder->base.name);
12713
12714                 /* Connector is active, but has no active pipe. This is
12715                  * fallout from our resume register restoring. Disable
12716                  * the encoder manually again. */
12717                 if (encoder->base.crtc) {
12718                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
12719                                       encoder->base.base.id,
12720                                       encoder->base.name);
12721                         encoder->disable(encoder);
12722                 }
12723                 encoder->base.crtc = NULL;
12724                 encoder->connectors_active = false;
12725
12726                 /* Inconsistent output/port/pipe state happens presumably due to
12727                  * a bug in one of the get_hw_state functions. Or someplace else
12728                  * in our code, like the register restore mess on resume. Clamp
12729                  * things to off as a safer default. */
12730                 list_for_each_entry(connector,
12731                                     &dev->mode_config.connector_list,
12732                                     base.head) {
12733                         if (connector->encoder != encoder)
12734                                 continue;
12735                         connector->base.dpms = DRM_MODE_DPMS_OFF;
12736                         connector->base.encoder = NULL;
12737                 }
12738         }
12739         /* Enabled encoders without active connectors will be fixed in
12740          * the crtc fixup. */
12741 }
12742
12743 void i915_redisable_vga_power_on(struct drm_device *dev)
12744 {
12745         struct drm_i915_private *dev_priv = dev->dev_private;
12746         u32 vga_reg = i915_vgacntrl_reg(dev);
12747
12748         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
12749                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
12750                 i915_disable_vga(dev);
12751         }
12752 }
12753
12754 void i915_redisable_vga(struct drm_device *dev)
12755 {
12756         struct drm_i915_private *dev_priv = dev->dev_private;
12757
12758         /* This function can be called both from intel_modeset_setup_hw_state or
12759          * at a very early point in our resume sequence, where the power well
12760          * structures are not yet restored. Since this function is at a very
12761          * paranoid "someone might have enabled VGA while we were not looking"
12762          * level, just check if the power well is enabled instead of trying to
12763          * follow the "don't touch the power well if we don't need it" policy
12764          * the rest of the driver uses. */
12765         if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
12766                 return;
12767
12768         i915_redisable_vga_power_on(dev);
12769 }
12770
12771 static bool primary_get_hw_state(struct intel_crtc *crtc)
12772 {
12773         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
12774
12775         if (!crtc->active)
12776                 return false;
12777
12778         return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
12779 }
12780
12781 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12782 {
12783         struct drm_i915_private *dev_priv = dev->dev_private;
12784         enum pipe pipe;
12785         struct intel_crtc *crtc;
12786         struct intel_encoder *encoder;
12787         struct intel_connector *connector;
12788         int i;
12789
12790         for_each_intel_crtc(dev, crtc) {
12791                 memset(&crtc->config, 0, sizeof(crtc->config));
12792
12793                 crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
12794
12795                 crtc->active = dev_priv->display.get_pipe_config(crtc,
12796                                                                  &crtc->config);
12797
12798                 crtc->base.enabled = crtc->active;
12799                 crtc->primary_enabled = primary_get_hw_state(crtc);
12800
12801                 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
12802                               crtc->base.base.id,
12803                               crtc->active ? "enabled" : "disabled");
12804         }
12805
12806         /* FIXME: Smash this into the new shared dpll infrastructure. */
12807         if (HAS_DDI(dev))
12808                 intel_ddi_setup_hw_pll_state(dev);
12809
12810         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12811                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12812
12813                 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
12814                 pll->active = 0;
12815                 for_each_intel_crtc(dev, crtc) {
12816                         if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12817                                 pll->active++;
12818                 }
12819                 pll->refcount = pll->active;
12820
12821                 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
12822                               pll->name, pll->refcount, pll->on);
12823         }
12824
12825         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12826                             base.head) {
12827                 pipe = 0;
12828
12829                 if (encoder->get_hw_state(encoder, &pipe)) {
12830                         crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12831                         encoder->base.crtc = &crtc->base;
12832                         encoder->get_config(encoder, &crtc->config);
12833                 } else {
12834                         encoder->base.crtc = NULL;
12835                 }
12836
12837                 encoder->connectors_active = false;
12838                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12839                               encoder->base.base.id,
12840                               encoder->base.name,
12841                               encoder->base.crtc ? "enabled" : "disabled",
12842                               pipe_name(pipe));
12843         }
12844
12845         list_for_each_entry(connector, &dev->mode_config.connector_list,
12846                             base.head) {
12847                 if (connector->get_hw_state(connector)) {
12848                         connector->base.dpms = DRM_MODE_DPMS_ON;
12849                         connector->encoder->connectors_active = true;
12850                         connector->base.encoder = &connector->encoder->base;
12851                 } else {
12852                         connector->base.dpms = DRM_MODE_DPMS_OFF;
12853                         connector->base.encoder = NULL;
12854                 }
12855                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
12856                               connector->base.base.id,
12857                               connector->base.name,
12858                               connector->base.encoder ? "enabled" : "disabled");
12859         }
12860 }
12861
12862 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
12863  * and i915 state tracking structures. */
12864 void intel_modeset_setup_hw_state(struct drm_device *dev,
12865                                   bool force_restore)
12866 {
12867         struct drm_i915_private *dev_priv = dev->dev_private;
12868         enum pipe pipe;
12869         struct intel_crtc *crtc;
12870         struct intel_encoder *encoder;
12871         int i;
12872
12873         intel_modeset_readout_hw_state(dev);
12874
12875         /*
12876          * Now that we have the config, copy it to each CRTC struct
12877          * Note that this could go away if we move to using crtc_config
12878          * checking everywhere.
12879          */
12880         for_each_intel_crtc(dev, crtc) {
12881                 if (crtc->active && i915.fastboot) {
12882                         intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
12883                         DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
12884                                       crtc->base.base.id);
12885                         drm_mode_debug_printmodeline(&crtc->base.mode);
12886                 }
12887         }
12888
12889         /* HW state is read out, now we need to sanitize this mess. */
12890         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
12891                             base.head) {
12892                 intel_sanitize_encoder(encoder);
12893         }
12894
12895         for_each_pipe(pipe) {
12896                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
12897                 intel_sanitize_crtc(crtc);
12898                 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
12899         }
12900
12901         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12902                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12903
12904                 if (!pll->on || pll->active)
12905                         continue;
12906
12907                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
12908
12909                 pll->disable(dev_priv, pll);
12910                 pll->on = false;
12911         }
12912
12913         if (HAS_PCH_SPLIT(dev))
12914                 ilk_wm_get_hw_state(dev);
12915
12916         if (force_restore) {
12917                 i915_redisable_vga(dev);
12918
12919                 /*
12920                  * We need to use raw interfaces for restoring state to avoid
12921                  * checking (bogus) intermediate states.
12922                  */
12923                 for_each_pipe(pipe) {
12924                         struct drm_crtc *crtc =
12925                                 dev_priv->pipe_to_crtc_mapping[pipe];
12926
12927                         __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
12928                                          crtc->primary->fb);
12929                 }
12930         } else {
12931                 intel_modeset_update_staged_output_state(dev);
12932         }
12933
12934         intel_modeset_check_state(dev);
12935 }
12936
12937 void intel_modeset_gem_init(struct drm_device *dev)
12938 {
12939         struct drm_crtc *c;
12940         struct intel_framebuffer *fb;
12941
12942         mutex_lock(&dev->struct_mutex);
12943         intel_init_gt_powersave(dev);
12944         mutex_unlock(&dev->struct_mutex);
12945
12946         intel_modeset_init_hw(dev);
12947
12948         intel_setup_overlay(dev);
12949
12950         /*
12951          * Make sure any fbs we allocated at startup are properly
12952          * pinned & fenced.  When we do the allocation it's too early
12953          * for this.
12954          */
12955         mutex_lock(&dev->struct_mutex);
12956         for_each_crtc(dev, c) {
12957                 if (!c->primary->fb)
12958                         continue;
12959
12960                 fb = to_intel_framebuffer(c->primary->fb);
12961                 if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
12962                         DRM_ERROR("failed to pin boot fb on pipe %d\n",
12963                                   to_intel_crtc(c)->pipe);
12964                         drm_framebuffer_unreference(c->primary->fb);
12965                         c->primary->fb = NULL;
12966                 }
12967         }
12968         mutex_unlock(&dev->struct_mutex);
12969 }
12970
12971 void intel_connector_unregister(struct intel_connector *intel_connector)
12972 {
12973         struct drm_connector *connector = &intel_connector->base;
12974
12975         intel_panel_destroy_backlight(connector);
12976         drm_sysfs_connector_remove(connector);
12977 }
12978
12979 void intel_modeset_cleanup(struct drm_device *dev)
12980 {
12981         struct drm_i915_private *dev_priv = dev->dev_private;
12982         struct drm_connector *connector;
12983
12984         /*
12985          * Interrupts and polling as the first thing to avoid creating havoc.
12986          * Too much stuff here (turning of rps, connectors, ...) would
12987          * experience fancy races otherwise.
12988          */
12989         drm_irq_uninstall(dev);
12990         cancel_work_sync(&dev_priv->hotplug_work);
12991         /*
12992          * Due to the hpd irq storm handling the hotplug work can re-arm the
12993          * poll handlers. Hence disable polling after hpd handling is shut down.
12994          */
12995         drm_kms_helper_poll_fini(dev);
12996
12997         mutex_lock(&dev->struct_mutex);
12998
12999         intel_unregister_dsm_handler();
13000
13001         intel_disable_fbc(dev);
13002
13003         intel_disable_gt_powersave(dev);
13004
13005         ironlake_teardown_rc6(dev);
13006
13007         mutex_unlock(&dev->struct_mutex);
13008
13009         /* flush any delayed tasks or pending work */
13010         flush_scheduled_work();
13011
13012         /* destroy the backlight and sysfs files before encoders/connectors */
13013         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
13014                 struct intel_connector *intel_connector;
13015
13016                 intel_connector = to_intel_connector(connector);
13017                 intel_connector->unregister(intel_connector);
13018         }
13019
13020         drm_mode_config_cleanup(dev);
13021
13022         intel_cleanup_overlay(dev);
13023
13024         mutex_lock(&dev->struct_mutex);
13025         intel_cleanup_gt_powersave(dev);
13026         mutex_unlock(&dev->struct_mutex);
13027 }
13028
13029 /*
13030  * Return which encoder is currently attached for connector.
13031  */
13032 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
13033 {
13034         return &intel_attached_encoder(connector)->base;
13035 }
13036
13037 void intel_connector_attach_encoder(struct intel_connector *connector,
13038                                     struct intel_encoder *encoder)
13039 {
13040         connector->encoder = encoder;
13041         drm_mode_connector_attach_encoder(&connector->base,
13042                                           &encoder->base);
13043 }
13044
13045 /*
13046  * set vga decode state - true == enable VGA decode
13047  */
13048 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
13049 {
13050         struct drm_i915_private *dev_priv = dev->dev_private;
13051         unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
13052         u16 gmch_ctrl;
13053
13054         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
13055                 DRM_ERROR("failed to read control word\n");
13056                 return -EIO;
13057         }
13058
13059         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
13060                 return 0;
13061
13062         if (state)
13063                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
13064         else
13065                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
13066
13067         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
13068                 DRM_ERROR("failed to write control word\n");
13069                 return -EIO;
13070         }
13071
13072         return 0;
13073 }
13074
13075 struct intel_display_error_state {
13076
13077         u32 power_well_driver;
13078
13079         int num_transcoders;
13080
13081         struct intel_cursor_error_state {
13082                 u32 control;
13083                 u32 position;
13084                 u32 base;
13085                 u32 size;
13086         } cursor[I915_MAX_PIPES];
13087
13088         struct intel_pipe_error_state {
13089                 bool power_domain_on;
13090                 u32 source;
13091                 u32 stat;
13092         } pipe[I915_MAX_PIPES];
13093
13094         struct intel_plane_error_state {
13095                 u32 control;
13096                 u32 stride;
13097                 u32 size;
13098                 u32 pos;
13099                 u32 addr;
13100                 u32 surface;
13101                 u32 tile_offset;
13102         } plane[I915_MAX_PIPES];
13103
13104         struct intel_transcoder_error_state {
13105                 bool power_domain_on;
13106                 enum transcoder cpu_transcoder;
13107
13108                 u32 conf;
13109
13110                 u32 htotal;
13111                 u32 hblank;
13112                 u32 hsync;
13113                 u32 vtotal;
13114                 u32 vblank;
13115                 u32 vsync;
13116         } transcoder[4];
13117 };
13118
13119 struct intel_display_error_state *
13120 intel_display_capture_error_state(struct drm_device *dev)
13121 {
13122         struct drm_i915_private *dev_priv = dev->dev_private;
13123         struct intel_display_error_state *error;
13124         int transcoders[] = {
13125                 TRANSCODER_A,
13126                 TRANSCODER_B,
13127                 TRANSCODER_C,
13128                 TRANSCODER_EDP,
13129         };
13130         int i;
13131
13132         if (INTEL_INFO(dev)->num_pipes == 0)
13133                 return NULL;
13134
13135         error = kzalloc(sizeof(*error), GFP_ATOMIC);
13136         if (error == NULL)
13137                 return NULL;
13138
13139         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13140                 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
13141
13142         for_each_pipe(i) {
13143                 error->pipe[i].power_domain_on =
13144                         intel_display_power_enabled_unlocked(dev_priv,
13145                                                            POWER_DOMAIN_PIPE(i));
13146                 if (!error->pipe[i].power_domain_on)
13147                         continue;
13148
13149                 error->cursor[i].control = I915_READ(CURCNTR(i));
13150                 error->cursor[i].position = I915_READ(CURPOS(i));
13151                 error->cursor[i].base = I915_READ(CURBASE(i));
13152
13153                 error->plane[i].control = I915_READ(DSPCNTR(i));
13154                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
13155                 if (INTEL_INFO(dev)->gen <= 3) {
13156                         error->plane[i].size = I915_READ(DSPSIZE(i));
13157                         error->plane[i].pos = I915_READ(DSPPOS(i));
13158                 }
13159                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13160                         error->plane[i].addr = I915_READ(DSPADDR(i));
13161                 if (INTEL_INFO(dev)->gen >= 4) {
13162                         error->plane[i].surface = I915_READ(DSPSURF(i));
13163                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
13164                 }
13165
13166                 error->pipe[i].source = I915_READ(PIPESRC(i));
13167
13168                 if (!HAS_PCH_SPLIT(dev))
13169                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
13170         }
13171
13172         error->num_transcoders = INTEL_INFO(dev)->num_pipes;
13173         if (HAS_DDI(dev_priv->dev))
13174                 error->num_transcoders++; /* Account for eDP. */
13175
13176         for (i = 0; i < error->num_transcoders; i++) {
13177                 enum transcoder cpu_transcoder = transcoders[i];
13178
13179                 error->transcoder[i].power_domain_on =
13180                         intel_display_power_enabled_unlocked(dev_priv,
13181                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13182                 if (!error->transcoder[i].power_domain_on)
13183                         continue;
13184
13185                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
13186
13187                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
13188                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
13189                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
13190                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
13191                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
13192                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
13193                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
13194         }
13195
13196         return error;
13197 }
13198
13199 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13200
13201 void
13202 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13203                                 struct drm_device *dev,
13204                                 struct intel_display_error_state *error)
13205 {
13206         int i;
13207
13208         if (!error)
13209                 return;
13210
13211         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
13212         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13213                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
13214                            error->power_well_driver);
13215         for_each_pipe(i) {
13216                 err_printf(m, "Pipe [%d]:\n", i);
13217                 err_printf(m, "  Power: %s\n",
13218                            error->pipe[i].power_domain_on ? "on" : "off");
13219                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13220                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13221
13222                 err_printf(m, "Plane [%d]:\n", i);
13223                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13224                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13225                 if (INTEL_INFO(dev)->gen <= 3) {
13226                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13227                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13228                 }
13229                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
13230                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13231                 if (INTEL_INFO(dev)->gen >= 4) {
13232                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13233                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13234                 }
13235
13236                 err_printf(m, "Cursor [%d]:\n", i);
13237                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13238                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13239                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13240         }
13241
13242         for (i = 0; i < error->num_transcoders; i++) {
13243                 err_printf(m, "CPU transcoder: %c\n",
13244                            transcoder_name(error->transcoder[i].cpu_transcoder));
13245                 err_printf(m, "  Power: %s\n",
13246                            error->transcoder[i].power_domain_on ? "on" : "off");
13247                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13248                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13249                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13250                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13251                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13252                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13253                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13254         }
13255 }